@@ -574,7 +574,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
574
574
dma_addr_t dma_addr ;
575
575
unsigned char * dst ;
576
576
int shift = 0 ;
577
- int index ;
577
+ int bufidx ;
578
578
int i ;
579
579
580
580
if (!pool -> active )
@@ -590,14 +590,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
590
590
* be 0.
591
591
*/
592
592
for (i = ind_bufp -> index ; i < count ; ++ i ) {
593
- index = pool -> free_map [pool -> next_free ];
593
+ bufidx = pool -> free_map [pool -> next_free ];
594
594
595
595
/* We maybe reusing the skb from earlier resets. Allocate
596
596
* only if necessary. But since the LTB may have changed
597
597
* during reset (see init_rx_pools()), update LTB below
598
598
* even if reusing skb.
599
599
*/
600
- skb = pool -> rx_buff [index ].skb ;
600
+ skb = pool -> rx_buff [bufidx ].skb ;
601
601
if (!skb ) {
602
602
skb = netdev_alloc_skb (adapter -> netdev ,
603
603
pool -> buff_size );
@@ -612,24 +612,24 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
612
612
pool -> next_free = (pool -> next_free + 1 ) % pool -> size ;
613
613
614
614
/* Copy the skb to the long term mapped DMA buffer */
615
- offset = index * pool -> buff_size ;
615
+ offset = bufidx * pool -> buff_size ;
616
616
dst = pool -> long_term_buff .buff + offset ;
617
617
memset (dst , 0 , pool -> buff_size );
618
618
dma_addr = pool -> long_term_buff .addr + offset ;
619
619
620
620
/* add the skb to an rx_buff in the pool */
621
- pool -> rx_buff [index ].data = dst ;
622
- pool -> rx_buff [index ].dma = dma_addr ;
623
- pool -> rx_buff [index ].skb = skb ;
624
- pool -> rx_buff [index ].pool_index = pool -> index ;
625
- pool -> rx_buff [index ].size = pool -> buff_size ;
621
+ pool -> rx_buff [bufidx ].data = dst ;
622
+ pool -> rx_buff [bufidx ].dma = dma_addr ;
623
+ pool -> rx_buff [bufidx ].skb = skb ;
624
+ pool -> rx_buff [bufidx ].pool_index = pool -> index ;
625
+ pool -> rx_buff [bufidx ].size = pool -> buff_size ;
626
626
627
627
/* queue the rx_buff for the next send_subcrq_indirect */
628
628
sub_crq = & ind_bufp -> indir_arr [ind_bufp -> index ++ ];
629
629
memset (sub_crq , 0 , sizeof (* sub_crq ));
630
630
sub_crq -> rx_add .first = IBMVNIC_CRQ_CMD ;
631
631
sub_crq -> rx_add .correlator =
632
- cpu_to_be64 ((u64 )& pool -> rx_buff [index ]);
632
+ cpu_to_be64 ((u64 )& pool -> rx_buff [bufidx ]);
633
633
sub_crq -> rx_add .ioba = cpu_to_be32 (dma_addr );
634
634
sub_crq -> rx_add .map_id = pool -> long_term_buff .map_id ;
635
635
@@ -671,10 +671,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
671
671
sub_crq = & ind_bufp -> indir_arr [i ];
672
672
rx_buff = (struct ibmvnic_rx_buff * )
673
673
be64_to_cpu (sub_crq -> rx_add .correlator );
674
- index = (int )(rx_buff - pool -> rx_buff );
675
- pool -> free_map [pool -> next_free ] = index ;
676
- dev_kfree_skb_any (pool -> rx_buff [index ].skb );
677
- pool -> rx_buff [index ].skb = NULL ;
674
+ bufidx = (int )(rx_buff - pool -> rx_buff );
675
+ pool -> free_map [pool -> next_free ] = bufidx ;
676
+ dev_kfree_skb_any (pool -> rx_buff [bufidx ].skb );
677
+ pool -> rx_buff [bufidx ].skb = NULL ;
678
678
}
679
679
adapter -> replenish_add_buff_failure += ind_bufp -> index ;
680
680
atomic_add (buffers_added , & pool -> available );
@@ -2205,7 +2205,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2205
2205
unsigned int offset ;
2206
2206
int num_entries = 1 ;
2207
2207
unsigned char * dst ;
2208
- int index = 0 ;
2208
+ int bufidx = 0 ;
2209
2209
u8 proto = 0 ;
2210
2210
2211
2211
/* If a reset is in progress, drop the packet since
@@ -2239,9 +2239,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2239
2239
else
2240
2240
tx_pool = & adapter -> tx_pool [queue_num ];
2241
2241
2242
- index = tx_pool -> free_map [tx_pool -> consumer_index ];
2242
+ bufidx = tx_pool -> free_map [tx_pool -> consumer_index ];
2243
2243
2244
- if (index == IBMVNIC_INVALID_MAP ) {
2244
+ if (bufidx == IBMVNIC_INVALID_MAP ) {
2245
2245
dev_kfree_skb_any (skb );
2246
2246
tx_send_failed ++ ;
2247
2247
tx_dropped ++ ;
@@ -2252,7 +2252,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2252
2252
2253
2253
tx_pool -> free_map [tx_pool -> consumer_index ] = IBMVNIC_INVALID_MAP ;
2254
2254
2255
- offset = index * tx_pool -> buf_size ;
2255
+ offset = bufidx * tx_pool -> buf_size ;
2256
2256
dst = tx_pool -> long_term_buff .buff + offset ;
2257
2257
memset (dst , 0 , tx_pool -> buf_size );
2258
2258
data_dma_addr = tx_pool -> long_term_buff .addr + offset ;
@@ -2282,9 +2282,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2282
2282
tx_pool -> consumer_index =
2283
2283
(tx_pool -> consumer_index + 1 ) % tx_pool -> num_buffers ;
2284
2284
2285
- tx_buff = & tx_pool -> tx_buff [index ];
2285
+ tx_buff = & tx_pool -> tx_buff [bufidx ];
2286
2286
tx_buff -> skb = skb ;
2287
- tx_buff -> index = index ;
2287
+ tx_buff -> index = bufidx ;
2288
2288
tx_buff -> pool_index = queue_num ;
2289
2289
2290
2290
memset (& tx_crq , 0 , sizeof (tx_crq ));
@@ -2296,9 +2296,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2296
2296
2297
2297
if (skb_is_gso (skb ))
2298
2298
tx_crq .v1 .correlator =
2299
- cpu_to_be32 (index | IBMVNIC_TSO_POOL_MASK );
2299
+ cpu_to_be32 (bufidx | IBMVNIC_TSO_POOL_MASK );
2300
2300
else
2301
- tx_crq .v1 .correlator = cpu_to_be32 (index );
2301
+ tx_crq .v1 .correlator = cpu_to_be32 (bufidx );
2302
2302
tx_crq .v1 .dma_reg = cpu_to_be16 (tx_pool -> long_term_buff .map_id );
2303
2303
tx_crq .v1 .sge_len = cpu_to_be32 (skb -> len );
2304
2304
tx_crq .v1 .ioba = cpu_to_be64 (data_dma_addr );
0 commit comments