@@ -764,6 +764,85 @@ void efx_remove_channels(struct efx_nic *efx)
764
764
kfree (efx -> xdp_tx_queues );
765
765
}
766
766
767
+ static int efx_set_xdp_tx_queue (struct efx_nic * efx , int xdp_queue_number ,
768
+ struct efx_tx_queue * tx_queue )
769
+ {
770
+ if (xdp_queue_number >= efx -> xdp_tx_queue_count )
771
+ return - EINVAL ;
772
+
773
+ netif_dbg (efx , drv , efx -> net_dev ,
774
+ "Channel %u TXQ %u is XDP %u, HW %u\n" ,
775
+ tx_queue -> channel -> channel , tx_queue -> label ,
776
+ xdp_queue_number , tx_queue -> queue );
777
+ efx -> xdp_tx_queues [xdp_queue_number ] = tx_queue ;
778
+ return 0 ;
779
+ }
780
+
781
+ static void efx_set_xdp_channels (struct efx_nic * efx )
782
+ {
783
+ struct efx_tx_queue * tx_queue ;
784
+ struct efx_channel * channel ;
785
+ unsigned int next_queue = 0 ;
786
+ int xdp_queue_number = 0 ;
787
+ int rc ;
788
+
789
+ /* We need to mark which channels really have RX and TX
790
+ * queues, and adjust the TX queue numbers if we have separate
791
+ * RX-only and TX-only channels.
792
+ */
793
+ efx_for_each_channel (channel , efx ) {
794
+ if (channel -> channel < efx -> tx_channel_offset )
795
+ continue ;
796
+
797
+ if (efx_channel_is_xdp_tx (channel )) {
798
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
799
+ tx_queue -> queue = next_queue ++ ;
800
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number ,
801
+ tx_queue );
802
+ if (rc == 0 )
803
+ xdp_queue_number ++ ;
804
+ }
805
+ } else {
806
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
807
+ tx_queue -> queue = next_queue ++ ;
808
+ netif_dbg (efx , drv , efx -> net_dev ,
809
+ "Channel %u TXQ %u is HW %u\n" ,
810
+ channel -> channel , tx_queue -> label ,
811
+ tx_queue -> queue );
812
+ }
813
+
814
+ /* If XDP is borrowing queues from net stack, it must
815
+ * use the queue with no csum offload, which is the
816
+ * first one of the channel
817
+ * (note: tx_queue_by_type is not initialized yet)
818
+ */
819
+ if (efx -> xdp_txq_queues_mode ==
820
+ EFX_XDP_TX_QUEUES_BORROWED ) {
821
+ tx_queue = & channel -> tx_queue [0 ];
822
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number ,
823
+ tx_queue );
824
+ if (rc == 0 )
825
+ xdp_queue_number ++ ;
826
+ }
827
+ }
828
+ }
829
+ WARN_ON (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
830
+ xdp_queue_number != efx -> xdp_tx_queue_count );
831
+ WARN_ON (efx -> xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
832
+ xdp_queue_number > efx -> xdp_tx_queue_count );
833
+
834
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
835
+ * existing queues to the exceeding CPUs
836
+ */
837
+ next_queue = 0 ;
838
+ while (xdp_queue_number < efx -> xdp_tx_queue_count ) {
839
+ tx_queue = efx -> xdp_tx_queues [next_queue ++ ];
840
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
841
+ if (rc == 0 )
842
+ xdp_queue_number ++ ;
843
+ }
844
+ }
845
+
767
846
int efx_realloc_channels (struct efx_nic * efx , u32 rxq_entries , u32 txq_entries )
768
847
{
769
848
struct efx_channel * other_channel [EFX_MAX_CHANNELS ], * channel ;
@@ -835,6 +914,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
835
914
efx_init_napi_channel (efx -> channel [i ]);
836
915
}
837
916
917
+ efx_set_xdp_channels (efx );
838
918
out :
839
919
/* Destroy unused channel structures */
840
920
for (i = 0 ; i < efx -> n_channels ; i ++ ) {
@@ -867,26 +947,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
867
947
goto out ;
868
948
}
869
949
870
- static inline int
871
- efx_set_xdp_tx_queue (struct efx_nic * efx , int xdp_queue_number ,
872
- struct efx_tx_queue * tx_queue )
873
- {
874
- if (xdp_queue_number >= efx -> xdp_tx_queue_count )
875
- return - EINVAL ;
876
-
877
- netif_dbg (efx , drv , efx -> net_dev , "Channel %u TXQ %u is XDP %u, HW %u\n" ,
878
- tx_queue -> channel -> channel , tx_queue -> label ,
879
- xdp_queue_number , tx_queue -> queue );
880
- efx -> xdp_tx_queues [xdp_queue_number ] = tx_queue ;
881
- return 0 ;
882
- }
883
-
884
950
int efx_set_channels (struct efx_nic * efx )
885
951
{
886
- struct efx_tx_queue * tx_queue ;
887
952
struct efx_channel * channel ;
888
- unsigned int next_queue = 0 ;
889
- int xdp_queue_number ;
890
953
int rc ;
891
954
892
955
efx -> tx_channel_offset =
@@ -904,61 +967,14 @@ int efx_set_channels(struct efx_nic *efx)
904
967
return - ENOMEM ;
905
968
}
906
969
907
- /* We need to mark which channels really have RX and TX
908
- * queues, and adjust the TX queue numbers if we have separate
909
- * RX-only and TX-only channels.
910
- */
911
- xdp_queue_number = 0 ;
912
970
efx_for_each_channel (channel , efx ) {
913
971
if (channel -> channel < efx -> n_rx_channels )
914
972
channel -> rx_queue .core_index = channel -> channel ;
915
973
else
916
974
channel -> rx_queue .core_index = -1 ;
917
-
918
- if (channel -> channel >= efx -> tx_channel_offset ) {
919
- if (efx_channel_is_xdp_tx (channel )) {
920
- efx_for_each_channel_tx_queue (tx_queue , channel ) {
921
- tx_queue -> queue = next_queue ++ ;
922
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
923
- if (rc == 0 )
924
- xdp_queue_number ++ ;
925
- }
926
- } else {
927
- efx_for_each_channel_tx_queue (tx_queue , channel ) {
928
- tx_queue -> queue = next_queue ++ ;
929
- netif_dbg (efx , drv , efx -> net_dev , "Channel %u TXQ %u is HW %u\n" ,
930
- channel -> channel , tx_queue -> label ,
931
- tx_queue -> queue );
932
- }
933
-
934
- /* If XDP is borrowing queues from net stack, it must use the queue
935
- * with no csum offload, which is the first one of the channel
936
- * (note: channel->tx_queue_by_type is not initialized yet)
937
- */
938
- if (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED ) {
939
- tx_queue = & channel -> tx_queue [0 ];
940
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
941
- if (rc == 0 )
942
- xdp_queue_number ++ ;
943
- }
944
- }
945
- }
946
975
}
947
- WARN_ON (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
948
- xdp_queue_number != efx -> xdp_tx_queue_count );
949
- WARN_ON (efx -> xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
950
- xdp_queue_number > efx -> xdp_tx_queue_count );
951
976
952
- /* If we have more CPUs than assigned XDP TX queues, assign the already
953
- * existing queues to the exceeding CPUs
954
- */
955
- next_queue = 0 ;
956
- while (xdp_queue_number < efx -> xdp_tx_queue_count ) {
957
- tx_queue = efx -> xdp_tx_queues [next_queue ++ ];
958
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
959
- if (rc == 0 )
960
- xdp_queue_number ++ ;
961
- }
977
+ efx_set_xdp_channels (efx );
962
978
963
979
rc = netif_set_real_num_tx_queues (efx -> net_dev , efx -> n_tx_channels );
964
980
if (rc )
0 commit comments