@@ -1003,6 +1003,8 @@ iavf_run_xdp_zc(struct iavf_ring *rx_ring, struct xdp_buff *xdp,
10031003 }
10041004
10051005 switch (xdp_act ) {
1006+ case XDP_PASS :
1007+ break ;
10061008 case XDP_TX :
10071009 err = iavf_xmit_xdp_buff_zc (xdp , xdp_ring );
10081010 if (unlikely (err ))
@@ -1028,6 +1030,42 @@ iavf_run_xdp_zc(struct iavf_ring *rx_ring, struct xdp_buff *xdp,
10281030 return xdp_act ;
10291031}
10301032
1033+ /**
1034+ * iavf_construct_skb_zc - Create an sk_buff from zero-copy buffer
1035+ * @rx_ring: Rx ring
1036+ * @xdp: Pointer to XDP buffer
1037+ *
1038+ * This function allocates a new skb from a zero-copy Rx buffer.
1039+ *
1040+ * Returns the skb on success, NULL on failure.
1041+ */
1042+ static struct sk_buff *
1043+ iavf_construct_skb_zc (struct iavf_ring * rx_ring , struct xdp_buff * xdp )
1044+ {
1045+ unsigned int totalsize = xdp -> data_end - xdp -> data_meta ;
1046+ unsigned int metasize = xdp -> data - xdp -> data_meta ;
1047+ struct sk_buff * skb ;
1048+
1049+ net_prefetch (xdp -> data_meta );
1050+
1051+ skb = __napi_alloc_skb (& rx_ring -> q_vector -> napi , totalsize ,
1052+ GFP_ATOMIC | __GFP_NOWARN );
1053+ if (unlikely (!skb ))
1054+ return NULL ;
1055+
1056+ memcpy (__skb_put (skb , totalsize ), xdp -> data_meta ,
1057+ ALIGN (totalsize , sizeof (long )));
1058+
1059+ if (metasize ) {
1060+ skb_metadata_set (skb , metasize );
1061+ __skb_pull (skb , metasize );
1062+ }
1063+
1064+ xsk_buff_free (xdp );
1065+
1066+ return skb ;
1067+ }
1068+
10311069/**
10321070 * iavf_clean_rx_irq_zc - consumes packets from the hardware ring
10331071 * @rx_ring: AF_XDP Rx ring
@@ -1053,6 +1091,8 @@ int iavf_clean_rx_irq_zc(struct iavf_ring *rx_ring, int budget)
10531091 while (likely (cleaned_count < budget )) {
10541092 union iavf_rx_desc * rx_desc ;
10551093 struct xdp_buff * xdp ;
1094+ unsigned int xdp_act ;
1095+ struct sk_buff * skb ;
10561096 unsigned int size ;
10571097 u64 qword ;
10581098
@@ -1087,8 +1127,10 @@ int iavf_clean_rx_irq_zc(struct iavf_ring *rx_ring, int budget)
10871127 xsk_buff_set_size (xdp , size );
10881128 xsk_buff_dma_sync_for_cpu (xdp , rx_ring -> xsk_pool );
10891129
1090- iavf_run_xdp_zc (rx_ring , xdp , xdp_prog , xdp_ring ,
1091- & rxq_xdp_act );
1130+ xdp_act = iavf_run_xdp_zc (rx_ring , xdp , xdp_prog , xdp_ring ,
1131+ & rxq_xdp_act );
1132+ if (xdp_act == XDP_PASS )
1133+ goto construct_skb ;
10921134
10931135 if (unlikely (rxq_xdp_act & IAVF_RXQ_XDP_ACT_STOP_NOW )) {
10941136 failure = true;
@@ -1102,6 +1144,33 @@ int iavf_clean_rx_irq_zc(struct iavf_ring *rx_ring, int budget)
11021144 cleaned_count ++ ;
11031145 if (unlikely (++ ntc == ring_size ))
11041146 ntc = 0 ;
1147+
1148+ continue ;
1149+
1150+ construct_skb :
1151+ skb = iavf_construct_skb_zc (rx_ring , xdp );
1152+ if (!skb ) {
1153+ rx_ring -> rx_stats .alloc_buff_failed ++ ;
1154+ break ;
1155+ }
1156+
1157+ cleaned_count ++ ;
1158+ if (unlikely (++ ntc == ring_size ))
1159+ ntc = 0 ;
1160+
1161+ prefetch (rx_desc );
1162+
1163+ /* probably a little skewed due to removing CRC */
1164+ total_rx_bytes += skb -> len ;
1165+
1166+ /* populate checksum, VLAN, and protocol */
1167+ iavf_process_skb_fields (rx_ring , rx_desc , skb , qword );
1168+
1169+ iavf_trace (clean_rx_irq_zc_rx , rx_ring , rx_desc , skb );
1170+ skb -> protocol = eth_type_trans (skb , rx_ring -> netdev );
1171+ napi_gro_receive (& rx_ring -> q_vector -> napi , skb );
1172+
1173+ total_rx_packets ++ ;
11051174 }
11061175
11071176 rx_ring -> next_to_clean = ntc ;
0 commit comments