696bbae36c889382d9f1aa8ef1b09f5e92602089
[cascardo/linux.git] / drivers / net / ethernet / brocade / bna / bnad.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
23 #include <linux/in.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
27 #include <linux/ip.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
30
31 #include "bnad.h"
32 #include "bna.h"
33 #include "cna.h"
34
35 static DEFINE_MUTEX(bnad_fwimg_mutex);
36
37 /*
38  * Module params
39  */
40 static uint bnad_msix_disable;
41 module_param(bnad_msix_disable, uint, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43
44 static uint bnad_ioc_auto_recover = 1;
45 module_param(bnad_ioc_auto_recover, uint, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47
48 static uint bna_debugfs_enable = 1;
49 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51                  " Range[false:0|true:1]");
52
53 /*
54  * Global variables
55  */
56 static u32 bnad_rxqs_per_cq = 2;
57 static u32 bna_id;
58 static struct mutex bnad_list_mutex;
59 static const u8 bnad_bcast_addr[] __aligned(2) =
60         { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
61
62 /*
63  * Local MACROS
64  */
65 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
66         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
67          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
68          ((_bnad)->pcidev->irq))
69
70 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)        \
71 do {                                                            \
72         (_res_info)->res_type = BNA_RES_T_MEM;                  \
73         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
74         (_res_info)->res_u.mem_info.num = (_num);               \
75         (_res_info)->res_u.mem_info.len = (_size);              \
76 } while (0)
77
78 static void
79 bnad_add_to_list(struct bnad *bnad)
80 {
81         mutex_lock(&bnad_list_mutex);
82         bnad->id = bna_id++;
83         mutex_unlock(&bnad_list_mutex);
84 }
85
86 static void
87 bnad_remove_from_list(struct bnad *bnad)
88 {
89         mutex_lock(&bnad_list_mutex);
90         mutex_unlock(&bnad_list_mutex);
91 }
92
93 /*
94  * Reinitialize completions in CQ, once Rx is taken down
95  */
96 static void
97 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
98 {
99         struct bna_cq_entry *cmpl;
100         int i;
101
102         for (i = 0; i < ccb->q_depth; i++) {
103                 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
104                 cmpl->valid = 0;
105         }
106 }
107
108 /* Tx Datapath functions */
109
110
111 /* Caller should ensure that the entry at unmap_q[index] is valid */
112 static u32
113 bnad_tx_buff_unmap(struct bnad *bnad,
114                               struct bnad_tx_unmap *unmap_q,
115                               u32 q_depth, u32 index)
116 {
117         struct bnad_tx_unmap *unmap;
118         struct sk_buff *skb;
119         int vector, nvecs;
120
121         unmap = &unmap_q[index];
122         nvecs = unmap->nvecs;
123
124         skb = unmap->skb;
125         unmap->skb = NULL;
126         unmap->nvecs = 0;
127         dma_unmap_single(&bnad->pcidev->dev,
128                 dma_unmap_addr(&unmap->vectors[0], dma_addr),
129                 skb_headlen(skb), DMA_TO_DEVICE);
130         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
131         nvecs--;
132
133         vector = 0;
134         while (nvecs) {
135                 vector++;
136                 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
137                         vector = 0;
138                         BNA_QE_INDX_INC(index, q_depth);
139                         unmap = &unmap_q[index];
140                 }
141
142                 dma_unmap_page(&bnad->pcidev->dev,
143                         dma_unmap_addr(&unmap->vectors[vector], dma_addr),
144                         dma_unmap_len(&unmap->vectors[vector], dma_len),
145                         DMA_TO_DEVICE);
146                 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
147                 nvecs--;
148         }
149
150         BNA_QE_INDX_INC(index, q_depth);
151
152         return index;
153 }
154
155 /*
156  * Frees all pending Tx Bufs
157  * At this point no activity is expected on the Q,
158  * so DMA unmap & freeing is fine.
159  */
160 static void
161 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
162 {
163         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164         struct sk_buff *skb;
165         int i;
166
167         for (i = 0; i < tcb->q_depth; i++) {
168                 skb = unmap_q[i].skb;
169                 if (!skb)
170                         continue;
171                 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
172
173                 dev_kfree_skb_any(skb);
174         }
175 }
176
177 /*
178  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
179  * Can be called in a) Interrupt context
180  *                  b) Sending context
181  */
182 static u32
183 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
184 {
185         u32 sent_packets = 0, sent_bytes = 0;
186         u32 wis, unmap_wis, hw_cons, cons, q_depth;
187         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
188         struct bnad_tx_unmap *unmap;
189         struct sk_buff *skb;
190
191         /* Just return if TX is stopped */
192         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
193                 return 0;
194
195         hw_cons = *(tcb->hw_consumer_index);
196         cons = tcb->consumer_index;
197         q_depth = tcb->q_depth;
198
199         wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
200         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
201
202         while (wis) {
203                 unmap = &unmap_q[cons];
204
205                 skb = unmap->skb;
206
207                 sent_packets++;
208                 sent_bytes += skb->len;
209
210                 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
211                 wis -= unmap_wis;
212
213                 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
214                 dev_kfree_skb_any(skb);
215         }
216
217         /* Update consumer pointers. */
218         tcb->consumer_index = hw_cons;
219
220         tcb->txq->tx_packets += sent_packets;
221         tcb->txq->tx_bytes += sent_bytes;
222
223         return sent_packets;
224 }
225
226 static u32
227 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
228 {
229         struct net_device *netdev = bnad->netdev;
230         u32 sent = 0;
231
232         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
233                 return 0;
234
235         sent = bnad_txcmpl_process(bnad, tcb);
236         if (sent) {
237                 if (netif_queue_stopped(netdev) &&
238                     netif_carrier_ok(netdev) &&
239                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
240                                     BNAD_NETIF_WAKE_THRESHOLD) {
241                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
242                                 netif_wake_queue(netdev);
243                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
244                         }
245                 }
246         }
247
248         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
249                 bna_ib_ack(tcb->i_dbell, sent);
250
251         smp_mb__before_atomic();
252         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
253
254         return sent;
255 }
256
257 /* MSIX Tx Completion Handler */
258 static irqreturn_t
259 bnad_msix_tx(int irq, void *data)
260 {
261         struct bna_tcb *tcb = (struct bna_tcb *)data;
262         struct bnad *bnad = tcb->bnad;
263
264         bnad_tx_complete(bnad, tcb);
265
266         return IRQ_HANDLED;
267 }
268
269 static inline void
270 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
271 {
272         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
273
274         unmap_q->reuse_pi = -1;
275         unmap_q->alloc_order = -1;
276         unmap_q->map_size = 0;
277         unmap_q->type = BNAD_RXBUF_NONE;
278 }
279
280 /* Default is page-based allocation. Multi-buffer support - TBD */
281 static int
282 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
283 {
284         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
285         int order;
286
287         bnad_rxq_alloc_uninit(bnad, rcb);
288
289         order = get_order(rcb->rxq->buffer_size);
290
291         unmap_q->type = BNAD_RXBUF_PAGE;
292
293         if (bna_is_small_rxq(rcb->id)) {
294                 unmap_q->alloc_order = 0;
295                 unmap_q->map_size = rcb->rxq->buffer_size;
296         } else {
297                 if (rcb->rxq->multi_buffer) {
298                         unmap_q->alloc_order = 0;
299                         unmap_q->map_size = rcb->rxq->buffer_size;
300                         unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
301                 } else {
302                         unmap_q->alloc_order = order;
303                         unmap_q->map_size =
304                                 (rcb->rxq->buffer_size > 2048) ?
305                                 PAGE_SIZE << order : 2048;
306                 }
307         }
308
309         BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
310
311         return 0;
312 }
313
314 static inline void
315 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
316 {
317         if (!unmap->page)
318                 return;
319
320         dma_unmap_page(&bnad->pcidev->dev,
321                         dma_unmap_addr(&unmap->vector, dma_addr),
322                         unmap->vector.len, DMA_FROM_DEVICE);
323         put_page(unmap->page);
324         unmap->page = NULL;
325         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
326         unmap->vector.len = 0;
327 }
328
329 static inline void
330 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
331 {
332         if (!unmap->skb)
333                 return;
334
335         dma_unmap_single(&bnad->pcidev->dev,
336                         dma_unmap_addr(&unmap->vector, dma_addr),
337                         unmap->vector.len, DMA_FROM_DEVICE);
338         dev_kfree_skb_any(unmap->skb);
339         unmap->skb = NULL;
340         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
341         unmap->vector.len = 0;
342 }
343
344 static void
345 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
346 {
347         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
348         int i;
349
350         for (i = 0; i < rcb->q_depth; i++) {
351                 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
352
353                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
354                         bnad_rxq_cleanup_skb(bnad, unmap);
355                 else
356                         bnad_rxq_cleanup_page(bnad, unmap);
357         }
358         bnad_rxq_alloc_uninit(bnad, rcb);
359 }
360
361 static u32
362 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
363 {
364         u32 alloced, prod, q_depth;
365         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
366         struct bnad_rx_unmap *unmap, *prev;
367         struct bna_rxq_entry *rxent;
368         struct page *page;
369         u32 page_offset, alloc_size;
370         dma_addr_t dma_addr;
371
372         prod = rcb->producer_index;
373         q_depth = rcb->q_depth;
374
375         alloc_size = PAGE_SIZE << unmap_q->alloc_order;
376         alloced = 0;
377
378         while (nalloc--) {
379                 unmap = &unmap_q->unmap[prod];
380
381                 if (unmap_q->reuse_pi < 0) {
382                         page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
383                                         unmap_q->alloc_order);
384                         page_offset = 0;
385                 } else {
386                         prev = &unmap_q->unmap[unmap_q->reuse_pi];
387                         page = prev->page;
388                         page_offset = prev->page_offset + unmap_q->map_size;
389                         get_page(page);
390                 }
391
392                 if (unlikely(!page)) {
393                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394                         rcb->rxq->rxbuf_alloc_failed++;
395                         goto finishing;
396                 }
397
398                 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
399                                         unmap_q->map_size, DMA_FROM_DEVICE);
400                 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
401                         put_page(page);
402                         BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
403                         rcb->rxq->rxbuf_map_failed++;
404                         goto finishing;
405                 }
406
407                 unmap->page = page;
408                 unmap->page_offset = page_offset;
409                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
410                 unmap->vector.len = unmap_q->map_size;
411                 page_offset += unmap_q->map_size;
412
413                 if (page_offset < alloc_size)
414                         unmap_q->reuse_pi = prod;
415                 else
416                         unmap_q->reuse_pi = -1;
417
418                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
419                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
420                 BNA_QE_INDX_INC(prod, q_depth);
421                 alloced++;
422         }
423
424 finishing:
425         if (likely(alloced)) {
426                 rcb->producer_index = prod;
427                 smp_mb();
428                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
429                         bna_rxq_prod_indx_doorbell(rcb);
430         }
431
432         return alloced;
433 }
434
435 static u32
436 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
437 {
438         u32 alloced, prod, q_depth, buff_sz;
439         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
440         struct bnad_rx_unmap *unmap;
441         struct bna_rxq_entry *rxent;
442         struct sk_buff *skb;
443         dma_addr_t dma_addr;
444
445         buff_sz = rcb->rxq->buffer_size;
446         prod = rcb->producer_index;
447         q_depth = rcb->q_depth;
448
449         alloced = 0;
450         while (nalloc--) {
451                 unmap = &unmap_q->unmap[prod];
452
453                 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
454
455                 if (unlikely(!skb)) {
456                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
457                         rcb->rxq->rxbuf_alloc_failed++;
458                         goto finishing;
459                 }
460
461                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
462                                           buff_sz, DMA_FROM_DEVICE);
463                 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
464                         dev_kfree_skb_any(skb);
465                         BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
466                         rcb->rxq->rxbuf_map_failed++;
467                         goto finishing;
468                 }
469
470                 unmap->skb = skb;
471                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
472                 unmap->vector.len = buff_sz;
473
474                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
475                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
476                 BNA_QE_INDX_INC(prod, q_depth);
477                 alloced++;
478         }
479
480 finishing:
481         if (likely(alloced)) {
482                 rcb->producer_index = prod;
483                 smp_mb();
484                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
485                         bna_rxq_prod_indx_doorbell(rcb);
486         }
487
488         return alloced;
489 }
490
491 static inline void
492 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
493 {
494         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
495         u32 to_alloc;
496
497         to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
498         if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
499                 return;
500
501         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
502                 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
503         else
504                 bnad_rxq_refill_page(bnad, rcb, to_alloc);
505 }
506
507 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
508                                         BNA_CQ_EF_IPV6 | \
509                                         BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
510                                         BNA_CQ_EF_L4_CKSUM_OK)
511
512 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
513                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
514 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
515                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
516 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
517                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
518 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
519                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
520
521 static void
522 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
523                     u32 sop_ci, u32 nvecs)
524 {
525         struct bnad_rx_unmap_q *unmap_q;
526         struct bnad_rx_unmap *unmap;
527         u32 ci, vec;
528
529         unmap_q = rcb->unmap_q;
530         for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
531                 unmap = &unmap_q->unmap[ci];
532                 BNA_QE_INDX_INC(ci, rcb->q_depth);
533
534                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
535                         bnad_rxq_cleanup_skb(bnad, unmap);
536                 else
537                         bnad_rxq_cleanup_page(bnad, unmap);
538         }
539 }
540
541 static void
542 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
543 {
544         struct bna_rcb *rcb;
545         struct bnad *bnad;
546         struct bnad_rx_unmap_q *unmap_q;
547         struct bna_cq_entry *cq, *cmpl;
548         u32 ci, pi, totlen = 0;
549
550         cq = ccb->sw_q;
551         pi = ccb->producer_index;
552         cmpl = &cq[pi];
553
554         rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
555         unmap_q = rcb->unmap_q;
556         bnad = rcb->bnad;
557         ci = rcb->consumer_index;
558
559         /* prefetch header */
560         prefetch(page_address(unmap_q->unmap[ci].page) +
561                  unmap_q->unmap[ci].page_offset);
562
563         while (nvecs--) {
564                 struct bnad_rx_unmap *unmap;
565                 u32 len;
566
567                 unmap = &unmap_q->unmap[ci];
568                 BNA_QE_INDX_INC(ci, rcb->q_depth);
569
570                 dma_unmap_page(&bnad->pcidev->dev,
571                                dma_unmap_addr(&unmap->vector, dma_addr),
572                                unmap->vector.len, DMA_FROM_DEVICE);
573
574                 len = ntohs(cmpl->length);
575                 skb->truesize += unmap->vector.len;
576                 totlen += len;
577
578                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
579                                    unmap->page, unmap->page_offset, len);
580
581                 unmap->page = NULL;
582                 unmap->vector.len = 0;
583
584                 BNA_QE_INDX_INC(pi, ccb->q_depth);
585                 cmpl = &cq[pi];
586         }
587
588         skb->len += totlen;
589         skb->data_len += totlen;
590 }
591
592 static inline void
593 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
594                   struct bnad_rx_unmap *unmap, u32 len)
595 {
596         prefetch(skb->data);
597
598         dma_unmap_single(&bnad->pcidev->dev,
599                         dma_unmap_addr(&unmap->vector, dma_addr),
600                         unmap->vector.len, DMA_FROM_DEVICE);
601
602         skb_put(skb, len);
603         skb->protocol = eth_type_trans(skb, bnad->netdev);
604
605         unmap->skb = NULL;
606         unmap->vector.len = 0;
607 }
608
609 static u32
610 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
611 {
612         struct bna_cq_entry *cq, *cmpl, *next_cmpl;
613         struct bna_rcb *rcb = NULL;
614         struct bnad_rx_unmap_q *unmap_q;
615         struct bnad_rx_unmap *unmap = NULL;
616         struct sk_buff *skb = NULL;
617         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
618         struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
619         u32 packets = 0, len = 0, totlen = 0;
620         u32 pi, vec, sop_ci = 0, nvecs = 0;
621         u32 flags, masked_flags;
622
623         prefetch(bnad->netdev);
624
625         cq = ccb->sw_q;
626
627         while (packets < budget) {
628                 cmpl = &cq[ccb->producer_index];
629                 if (!cmpl->valid)
630                         break;
631                 /* The 'valid' field is set by the adapter, only after writing
632                  * the other fields of completion entry. Hence, do not load
633                  * other fields of completion entry *before* the 'valid' is
634                  * loaded. Adding the rmb() here prevents the compiler and/or
635                  * CPU from reordering the reads which would potentially result
636                  * in reading stale values in completion entry.
637                  */
638                 rmb();
639
640                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
641
642                 if (bna_is_small_rxq(cmpl->rxq_id))
643                         rcb = ccb->rcb[1];
644                 else
645                         rcb = ccb->rcb[0];
646
647                 unmap_q = rcb->unmap_q;
648
649                 /* start of packet ci */
650                 sop_ci = rcb->consumer_index;
651
652                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
653                         unmap = &unmap_q->unmap[sop_ci];
654                         skb = unmap->skb;
655                 } else {
656                         skb = napi_get_frags(&rx_ctrl->napi);
657                         if (unlikely(!skb))
658                                 break;
659                 }
660                 prefetch(skb);
661
662                 flags = ntohl(cmpl->flags);
663                 len = ntohs(cmpl->length);
664                 totlen = len;
665                 nvecs = 1;
666
667                 /* Check all the completions for this frame.
668                  * busy-wait doesn't help much, break here.
669                  */
670                 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
671                     (flags & BNA_CQ_EF_EOP) == 0) {
672                         pi = ccb->producer_index;
673                         do {
674                                 BNA_QE_INDX_INC(pi, ccb->q_depth);
675                                 next_cmpl = &cq[pi];
676
677                                 if (!next_cmpl->valid)
678                                         break;
679                                 /* The 'valid' field is set by the adapter, only
680                                  * after writing the other fields of completion
681                                  * entry. Hence, do not load other fields of
682                                  * completion entry *before* the 'valid' is
683                                  * loaded. Adding the rmb() here prevents the
684                                  * compiler and/or CPU from reordering the reads
685                                  * which would potentially result in reading
686                                  * stale values in completion entry.
687                                  */
688                                 rmb();
689
690                                 len = ntohs(next_cmpl->length);
691                                 flags = ntohl(next_cmpl->flags);
692
693                                 nvecs++;
694                                 totlen += len;
695                         } while ((flags & BNA_CQ_EF_EOP) == 0);
696
697                         if (!next_cmpl->valid)
698                                 break;
699                 }
700                 packets++;
701
702                 /* TODO: BNA_CQ_EF_LOCAL ? */
703                 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
704                                                 BNA_CQ_EF_FCS_ERROR |
705                                                 BNA_CQ_EF_TOO_LONG))) {
706                         bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
707                         rcb->rxq->rx_packets_with_error++;
708
709                         goto next;
710                 }
711
712                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
713                         bnad_cq_setup_skb(bnad, skb, unmap, len);
714                 else
715                         bnad_cq_setup_skb_frags(ccb, skb, nvecs);
716
717                 rcb->rxq->rx_packets++;
718                 rcb->rxq->rx_bytes += totlen;
719                 ccb->bytes_per_intr += totlen;
720
721                 masked_flags = flags & flags_cksum_prot_mask;
722
723                 if (likely
724                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
725                      ((masked_flags == flags_tcp4) ||
726                       (masked_flags == flags_udp4) ||
727                       (masked_flags == flags_tcp6) ||
728                       (masked_flags == flags_udp6))))
729                         skb->ip_summed = CHECKSUM_UNNECESSARY;
730                 else
731                         skb_checksum_none_assert(skb);
732
733                 if ((flags & BNA_CQ_EF_VLAN) &&
734                     (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
735                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
736
737                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
738                         netif_receive_skb(skb);
739                 else
740                         napi_gro_frags(&rx_ctrl->napi);
741
742 next:
743                 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
744                 for (vec = 0; vec < nvecs; vec++) {
745                         cmpl = &cq[ccb->producer_index];
746                         cmpl->valid = 0;
747                         BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
748                 }
749         }
750
751         napi_gro_flush(&rx_ctrl->napi, false);
752         if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
753                 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
754
755         bnad_rxq_post(bnad, ccb->rcb[0]);
756         if (ccb->rcb[1])
757                 bnad_rxq_post(bnad, ccb->rcb[1]);
758
759         return packets;
760 }
761
762 static void
763 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
764 {
765         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
766         struct napi_struct *napi = &rx_ctrl->napi;
767
768         if (likely(napi_schedule_prep(napi))) {
769                 __napi_schedule(napi);
770                 rx_ctrl->rx_schedule++;
771         }
772 }
773
774 /* MSIX Rx Path Handler */
775 static irqreturn_t
776 bnad_msix_rx(int irq, void *data)
777 {
778         struct bna_ccb *ccb = (struct bna_ccb *)data;
779
780         if (ccb) {
781                 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
782                 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
783         }
784
785         return IRQ_HANDLED;
786 }
787
788 /* Interrupt handlers */
789
790 /* Mbox Interrupt Handlers */
791 static irqreturn_t
792 bnad_msix_mbox_handler(int irq, void *data)
793 {
794         u32 intr_status;
795         unsigned long flags;
796         struct bnad *bnad = (struct bnad *)data;
797
798         spin_lock_irqsave(&bnad->bna_lock, flags);
799         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
800                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
801                 return IRQ_HANDLED;
802         }
803
804         bna_intr_status_get(&bnad->bna, intr_status);
805
806         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
807                 bna_mbox_handler(&bnad->bna, intr_status);
808
809         spin_unlock_irqrestore(&bnad->bna_lock, flags);
810
811         return IRQ_HANDLED;
812 }
813
814 static irqreturn_t
815 bnad_isr(int irq, void *data)
816 {
817         int i, j;
818         u32 intr_status;
819         unsigned long flags;
820         struct bnad *bnad = (struct bnad *)data;
821         struct bnad_rx_info *rx_info;
822         struct bnad_rx_ctrl *rx_ctrl;
823         struct bna_tcb *tcb = NULL;
824
825         spin_lock_irqsave(&bnad->bna_lock, flags);
826         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
827                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
828                 return IRQ_NONE;
829         }
830
831         bna_intr_status_get(&bnad->bna, intr_status);
832
833         if (unlikely(!intr_status)) {
834                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
835                 return IRQ_NONE;
836         }
837
838         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
839                 bna_mbox_handler(&bnad->bna, intr_status);
840
841         spin_unlock_irqrestore(&bnad->bna_lock, flags);
842
843         if (!BNA_IS_INTX_DATA_INTR(intr_status))
844                 return IRQ_HANDLED;
845
846         /* Process data interrupts */
847         /* Tx processing */
848         for (i = 0; i < bnad->num_tx; i++) {
849                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
850                         tcb = bnad->tx_info[i].tcb[j];
851                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
852                                 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
853                 }
854         }
855         /* Rx processing */
856         for (i = 0; i < bnad->num_rx; i++) {
857                 rx_info = &bnad->rx_info[i];
858                 if (!rx_info->rx)
859                         continue;
860                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
861                         rx_ctrl = &rx_info->rx_ctrl[j];
862                         if (rx_ctrl->ccb)
863                                 bnad_netif_rx_schedule_poll(bnad,
864                                                             rx_ctrl->ccb);
865                 }
866         }
867         return IRQ_HANDLED;
868 }
869
870 /*
871  * Called in interrupt / callback context
872  * with bna_lock held, so cfg_flags access is OK
873  */
874 static void
875 bnad_enable_mbox_irq(struct bnad *bnad)
876 {
877         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
878
879         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
880 }
881
882 /*
883  * Called with bnad->bna_lock held b'cos of
884  * bnad->cfg_flags access.
885  */
886 static void
887 bnad_disable_mbox_irq(struct bnad *bnad)
888 {
889         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
890
891         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
892 }
893
894 static void
895 bnad_set_netdev_perm_addr(struct bnad *bnad)
896 {
897         struct net_device *netdev = bnad->netdev;
898
899         ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
900         if (is_zero_ether_addr(netdev->dev_addr))
901                 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
902 }
903
904 /* Control Path Handlers */
905
906 /* Callbacks */
907 void
908 bnad_cb_mbox_intr_enable(struct bnad *bnad)
909 {
910         bnad_enable_mbox_irq(bnad);
911 }
912
913 void
914 bnad_cb_mbox_intr_disable(struct bnad *bnad)
915 {
916         bnad_disable_mbox_irq(bnad);
917 }
918
919 void
920 bnad_cb_ioceth_ready(struct bnad *bnad)
921 {
922         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
923         complete(&bnad->bnad_completions.ioc_comp);
924 }
925
926 void
927 bnad_cb_ioceth_failed(struct bnad *bnad)
928 {
929         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
930         complete(&bnad->bnad_completions.ioc_comp);
931 }
932
933 void
934 bnad_cb_ioceth_disabled(struct bnad *bnad)
935 {
936         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
937         complete(&bnad->bnad_completions.ioc_comp);
938 }
939
940 static void
941 bnad_cb_enet_disabled(void *arg)
942 {
943         struct bnad *bnad = (struct bnad *)arg;
944
945         netif_carrier_off(bnad->netdev);
946         complete(&bnad->bnad_completions.enet_comp);
947 }
948
949 void
950 bnad_cb_ethport_link_status(struct bnad *bnad,
951                         enum bna_link_status link_status)
952 {
953         bool link_up = false;
954
955         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
956
957         if (link_status == BNA_CEE_UP) {
958                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
959                         BNAD_UPDATE_CTR(bnad, cee_toggle);
960                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
961         } else {
962                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
963                         BNAD_UPDATE_CTR(bnad, cee_toggle);
964                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
965         }
966
967         if (link_up) {
968                 if (!netif_carrier_ok(bnad->netdev)) {
969                         uint tx_id, tcb_id;
970                         netdev_info(bnad->netdev, "link up\n");
971                         netif_carrier_on(bnad->netdev);
972                         BNAD_UPDATE_CTR(bnad, link_toggle);
973                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
974                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
975                                       tcb_id++) {
976                                         struct bna_tcb *tcb =
977                                         bnad->tx_info[tx_id].tcb[tcb_id];
978                                         u32 txq_id;
979                                         if (!tcb)
980                                                 continue;
981
982                                         txq_id = tcb->id;
983
984                                         if (test_bit(BNAD_TXQ_TX_STARTED,
985                                                      &tcb->flags)) {
986                                                 /*
987                                                  * Force an immediate
988                                                  * Transmit Schedule */
989                                                 netif_wake_subqueue(
990                                                                 bnad->netdev,
991                                                                 txq_id);
992                                                 BNAD_UPDATE_CTR(bnad,
993                                                         netif_queue_wakeup);
994                                         } else {
995                                                 netif_stop_subqueue(
996                                                                 bnad->netdev,
997                                                                 txq_id);
998                                                 BNAD_UPDATE_CTR(bnad,
999                                                         netif_queue_stop);
1000                                         }
1001                                 }
1002                         }
1003                 }
1004         } else {
1005                 if (netif_carrier_ok(bnad->netdev)) {
1006                         netdev_info(bnad->netdev, "link down\n");
1007                         netif_carrier_off(bnad->netdev);
1008                         BNAD_UPDATE_CTR(bnad, link_toggle);
1009                 }
1010         }
1011 }
1012
1013 static void
1014 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
1015 {
1016         struct bnad *bnad = (struct bnad *)arg;
1017
1018         complete(&bnad->bnad_completions.tx_comp);
1019 }
1020
1021 static void
1022 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1023 {
1024         struct bnad_tx_info *tx_info =
1025                         (struct bnad_tx_info *)tcb->txq->tx->priv;
1026
1027         tcb->priv = tcb;
1028         tx_info->tcb[tcb->id] = tcb;
1029 }
1030
1031 static void
1032 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1033 {
1034         struct bnad_tx_info *tx_info =
1035                         (struct bnad_tx_info *)tcb->txq->tx->priv;
1036
1037         tx_info->tcb[tcb->id] = NULL;
1038         tcb->priv = NULL;
1039 }
1040
1041 static void
1042 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1043 {
1044         struct bnad_rx_info *rx_info =
1045                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1046
1047         rx_info->rx_ctrl[ccb->id].ccb = ccb;
1048         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1049 }
1050
1051 static void
1052 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1053 {
1054         struct bnad_rx_info *rx_info =
1055                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1056
1057         rx_info->rx_ctrl[ccb->id].ccb = NULL;
1058 }
1059
1060 static void
1061 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1062 {
1063         struct bnad_tx_info *tx_info =
1064                         (struct bnad_tx_info *)tx->priv;
1065         struct bna_tcb *tcb;
1066         u32 txq_id;
1067         int i;
1068
1069         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1070                 tcb = tx_info->tcb[i];
1071                 if (!tcb)
1072                         continue;
1073                 txq_id = tcb->id;
1074                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1075                 netif_stop_subqueue(bnad->netdev, txq_id);
1076         }
1077 }
1078
1079 static void
1080 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1081 {
1082         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1083         struct bna_tcb *tcb;
1084         u32 txq_id;
1085         int i;
1086
1087         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1088                 tcb = tx_info->tcb[i];
1089                 if (!tcb)
1090                         continue;
1091                 txq_id = tcb->id;
1092
1093                 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1094                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1095                 BUG_ON(*(tcb->hw_consumer_index) != 0);
1096
1097                 if (netif_carrier_ok(bnad->netdev)) {
1098                         netif_wake_subqueue(bnad->netdev, txq_id);
1099                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1100                 }
1101         }
1102
1103         /*
1104          * Workaround for first ioceth enable failure & we
1105          * get a 0 MAC address. We try to get the MAC address
1106          * again here.
1107          */
1108         if (is_zero_ether_addr(bnad->perm_addr)) {
1109                 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1110                 bnad_set_netdev_perm_addr(bnad);
1111         }
1112 }
1113
1114 /*
1115  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1116  */
1117 static void
1118 bnad_tx_cleanup(struct delayed_work *work)
1119 {
1120         struct bnad_tx_info *tx_info =
1121                 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1122         struct bnad *bnad = NULL;
1123         struct bna_tcb *tcb;
1124         unsigned long flags;
1125         u32 i, pending = 0;
1126
1127         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1128                 tcb = tx_info->tcb[i];
1129                 if (!tcb)
1130                         continue;
1131
1132                 bnad = tcb->bnad;
1133
1134                 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1135                         pending++;
1136                         continue;
1137                 }
1138
1139                 bnad_txq_cleanup(bnad, tcb);
1140
1141                 smp_mb__before_atomic();
1142                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1143         }
1144
1145         if (pending) {
1146                 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1147                         msecs_to_jiffies(1));
1148                 return;
1149         }
1150
1151         spin_lock_irqsave(&bnad->bna_lock, flags);
1152         bna_tx_cleanup_complete(tx_info->tx);
1153         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1154 }
1155
1156 static void
1157 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1158 {
1159         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1160         struct bna_tcb *tcb;
1161         int i;
1162
1163         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1164                 tcb = tx_info->tcb[i];
1165                 if (!tcb)
1166                         continue;
1167         }
1168
1169         queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1170 }
1171
1172 static void
1173 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1174 {
1175         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1176         struct bna_ccb *ccb;
1177         struct bnad_rx_ctrl *rx_ctrl;
1178         int i;
1179
1180         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1181                 rx_ctrl = &rx_info->rx_ctrl[i];
1182                 ccb = rx_ctrl->ccb;
1183                 if (!ccb)
1184                         continue;
1185
1186                 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1187
1188                 if (ccb->rcb[1])
1189                         clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1190         }
1191 }
1192
1193 /*
1194  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1195  */
1196 static void
1197 bnad_rx_cleanup(void *work)
1198 {
1199         struct bnad_rx_info *rx_info =
1200                 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1201         struct bnad_rx_ctrl *rx_ctrl;
1202         struct bnad *bnad = NULL;
1203         unsigned long flags;
1204         u32 i;
1205
1206         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1207                 rx_ctrl = &rx_info->rx_ctrl[i];
1208
1209                 if (!rx_ctrl->ccb)
1210                         continue;
1211
1212                 bnad = rx_ctrl->ccb->bnad;
1213
1214                 /*
1215                  * Wait till the poll handler has exited
1216                  * and nothing can be scheduled anymore
1217                  */
1218                 napi_disable(&rx_ctrl->napi);
1219
1220                 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1221                 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1222                 if (rx_ctrl->ccb->rcb[1])
1223                         bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1224         }
1225
1226         spin_lock_irqsave(&bnad->bna_lock, flags);
1227         bna_rx_cleanup_complete(rx_info->rx);
1228         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1229 }
1230
1231 static void
1232 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1233 {
1234         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235         struct bna_ccb *ccb;
1236         struct bnad_rx_ctrl *rx_ctrl;
1237         int i;
1238
1239         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1240                 rx_ctrl = &rx_info->rx_ctrl[i];
1241                 ccb = rx_ctrl->ccb;
1242                 if (!ccb)
1243                         continue;
1244
1245                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1246
1247                 if (ccb->rcb[1])
1248                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1249         }
1250
1251         queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1252 }
1253
1254 static void
1255 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1256 {
1257         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1258         struct bna_ccb *ccb;
1259         struct bna_rcb *rcb;
1260         struct bnad_rx_ctrl *rx_ctrl;
1261         int i, j;
1262
1263         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1264                 rx_ctrl = &rx_info->rx_ctrl[i];
1265                 ccb = rx_ctrl->ccb;
1266                 if (!ccb)
1267                         continue;
1268
1269                 napi_enable(&rx_ctrl->napi);
1270
1271                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1272                         rcb = ccb->rcb[j];
1273                         if (!rcb)
1274                                 continue;
1275
1276                         bnad_rxq_alloc_init(bnad, rcb);
1277                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1278                         set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1279                         bnad_rxq_post(bnad, rcb);
1280                 }
1281         }
1282 }
1283
1284 static void
1285 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1286 {
1287         struct bnad *bnad = (struct bnad *)arg;
1288
1289         complete(&bnad->bnad_completions.rx_comp);
1290 }
1291
1292 static void
1293 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1294 {
1295         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1296         complete(&bnad->bnad_completions.mcast_comp);
1297 }
1298
1299 void
1300 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1301                        struct bna_stats *stats)
1302 {
1303         if (status == BNA_CB_SUCCESS)
1304                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1305
1306         if (!netif_running(bnad->netdev) ||
1307                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1308                 return;
1309
1310         mod_timer(&bnad->stats_timer,
1311                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1312 }
1313
1314 static void
1315 bnad_cb_enet_mtu_set(struct bnad *bnad)
1316 {
1317         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1318         complete(&bnad->bnad_completions.mtu_comp);
1319 }
1320
1321 void
1322 bnad_cb_completion(void *arg, enum bfa_status status)
1323 {
1324         struct bnad_iocmd_comp *iocmd_comp =
1325                         (struct bnad_iocmd_comp *)arg;
1326
1327         iocmd_comp->comp_status = (u32) status;
1328         complete(&iocmd_comp->comp);
1329 }
1330
1331 /* Resource allocation, free functions */
1332
1333 static void
1334 bnad_mem_free(struct bnad *bnad,
1335               struct bna_mem_info *mem_info)
1336 {
1337         int i;
1338         dma_addr_t dma_pa;
1339
1340         if (mem_info->mdl == NULL)
1341                 return;
1342
1343         for (i = 0; i < mem_info->num; i++) {
1344                 if (mem_info->mdl[i].kva != NULL) {
1345                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1346                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1347                                                 dma_pa);
1348                                 dma_free_coherent(&bnad->pcidev->dev,
1349                                                   mem_info->mdl[i].len,
1350                                                   mem_info->mdl[i].kva, dma_pa);
1351                         } else
1352                                 kfree(mem_info->mdl[i].kva);
1353                 }
1354         }
1355         kfree(mem_info->mdl);
1356         mem_info->mdl = NULL;
1357 }
1358
1359 static int
1360 bnad_mem_alloc(struct bnad *bnad,
1361                struct bna_mem_info *mem_info)
1362 {
1363         int i;
1364         dma_addr_t dma_pa;
1365
1366         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1367                 mem_info->mdl = NULL;
1368                 return 0;
1369         }
1370
1371         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1372                                 GFP_KERNEL);
1373         if (mem_info->mdl == NULL)
1374                 return -ENOMEM;
1375
1376         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1377                 for (i = 0; i < mem_info->num; i++) {
1378                         mem_info->mdl[i].len = mem_info->len;
1379                         mem_info->mdl[i].kva =
1380                                 dma_alloc_coherent(&bnad->pcidev->dev,
1381                                                    mem_info->len, &dma_pa,
1382                                                    GFP_KERNEL);
1383                         if (mem_info->mdl[i].kva == NULL)
1384                                 goto err_return;
1385
1386                         BNA_SET_DMA_ADDR(dma_pa,
1387                                          &(mem_info->mdl[i].dma));
1388                 }
1389         } else {
1390                 for (i = 0; i < mem_info->num; i++) {
1391                         mem_info->mdl[i].len = mem_info->len;
1392                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1393                                                         GFP_KERNEL);
1394                         if (mem_info->mdl[i].kva == NULL)
1395                                 goto err_return;
1396                 }
1397         }
1398
1399         return 0;
1400
1401 err_return:
1402         bnad_mem_free(bnad, mem_info);
1403         return -ENOMEM;
1404 }
1405
1406 /* Free IRQ for Mailbox */
1407 static void
1408 bnad_mbox_irq_free(struct bnad *bnad)
1409 {
1410         int irq;
1411         unsigned long flags;
1412
1413         spin_lock_irqsave(&bnad->bna_lock, flags);
1414         bnad_disable_mbox_irq(bnad);
1415         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1416
1417         irq = BNAD_GET_MBOX_IRQ(bnad);
1418         free_irq(irq, bnad);
1419 }
1420
1421 /*
1422  * Allocates IRQ for Mailbox, but keep it disabled
1423  * This will be enabled once we get the mbox enable callback
1424  * from bna
1425  */
1426 static int
1427 bnad_mbox_irq_alloc(struct bnad *bnad)
1428 {
1429         int             err = 0;
1430         unsigned long   irq_flags, flags;
1431         u32     irq;
1432         irq_handler_t   irq_handler;
1433
1434         spin_lock_irqsave(&bnad->bna_lock, flags);
1435         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1436                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1437                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1438                 irq_flags = 0;
1439         } else {
1440                 irq_handler = (irq_handler_t)bnad_isr;
1441                 irq = bnad->pcidev->irq;
1442                 irq_flags = IRQF_SHARED;
1443         }
1444
1445         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1446         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1447
1448         /*
1449          * Set the Mbox IRQ disable flag, so that the IRQ handler
1450          * called from request_irq() for SHARED IRQs do not execute
1451          */
1452         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1453
1454         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1455
1456         err = request_irq(irq, irq_handler, irq_flags,
1457                           bnad->mbox_irq_name, bnad);
1458
1459         return err;
1460 }
1461
1462 static void
1463 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1464 {
1465         kfree(intr_info->idl);
1466         intr_info->idl = NULL;
1467 }
1468
1469 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1470 static int
1471 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1472                     u32 txrx_id, struct bna_intr_info *intr_info)
1473 {
1474         int i, vector_start = 0;
1475         u32 cfg_flags;
1476         unsigned long flags;
1477
1478         spin_lock_irqsave(&bnad->bna_lock, flags);
1479         cfg_flags = bnad->cfg_flags;
1480         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1481
1482         if (cfg_flags & BNAD_CF_MSIX) {
1483                 intr_info->intr_type = BNA_INTR_T_MSIX;
1484                 intr_info->idl = kcalloc(intr_info->num,
1485                                         sizeof(struct bna_intr_descr),
1486                                         GFP_KERNEL);
1487                 if (!intr_info->idl)
1488                         return -ENOMEM;
1489
1490                 switch (src) {
1491                 case BNAD_INTR_TX:
1492                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1493                         break;
1494
1495                 case BNAD_INTR_RX:
1496                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1497                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1498                                         txrx_id;
1499                         break;
1500
1501                 default:
1502                         BUG();
1503                 }
1504
1505                 for (i = 0; i < intr_info->num; i++)
1506                         intr_info->idl[i].vector = vector_start + i;
1507         } else {
1508                 intr_info->intr_type = BNA_INTR_T_INTX;
1509                 intr_info->num = 1;
1510                 intr_info->idl = kcalloc(intr_info->num,
1511                                         sizeof(struct bna_intr_descr),
1512                                         GFP_KERNEL);
1513                 if (!intr_info->idl)
1514                         return -ENOMEM;
1515
1516                 switch (src) {
1517                 case BNAD_INTR_TX:
1518                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1519                         break;
1520
1521                 case BNAD_INTR_RX:
1522                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1523                         break;
1524                 }
1525         }
1526         return 0;
1527 }
1528
1529 /* NOTE: Should be called for MSIX only
1530  * Unregisters Tx MSIX vector(s) from the kernel
1531  */
1532 static void
1533 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1534                         int num_txqs)
1535 {
1536         int i;
1537         int vector_num;
1538
1539         for (i = 0; i < num_txqs; i++) {
1540                 if (tx_info->tcb[i] == NULL)
1541                         continue;
1542
1543                 vector_num = tx_info->tcb[i]->intr_vector;
1544                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1545         }
1546 }
1547
1548 /* NOTE: Should be called for MSIX only
1549  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1550  */
1551 static int
1552 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1553                         u32 tx_id, int num_txqs)
1554 {
1555         int i;
1556         int err;
1557         int vector_num;
1558
1559         for (i = 0; i < num_txqs; i++) {
1560                 vector_num = tx_info->tcb[i]->intr_vector;
1561                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1562                                 tx_id + tx_info->tcb[i]->id);
1563                 err = request_irq(bnad->msix_table[vector_num].vector,
1564                                   (irq_handler_t)bnad_msix_tx, 0,
1565                                   tx_info->tcb[i]->name,
1566                                   tx_info->tcb[i]);
1567                 if (err)
1568                         goto err_return;
1569         }
1570
1571         return 0;
1572
1573 err_return:
1574         if (i > 0)
1575                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1576         return -1;
1577 }
1578
1579 /* NOTE: Should be called for MSIX only
1580  * Unregisters Rx MSIX vector(s) from the kernel
1581  */
1582 static void
1583 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1584                         int num_rxps)
1585 {
1586         int i;
1587         int vector_num;
1588
1589         for (i = 0; i < num_rxps; i++) {
1590                 if (rx_info->rx_ctrl[i].ccb == NULL)
1591                         continue;
1592
1593                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1594                 free_irq(bnad->msix_table[vector_num].vector,
1595                          rx_info->rx_ctrl[i].ccb);
1596         }
1597 }
1598
1599 /* NOTE: Should be called for MSIX only
1600  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1601  */
1602 static int
1603 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1604                         u32 rx_id, int num_rxps)
1605 {
1606         int i;
1607         int err;
1608         int vector_num;
1609
1610         for (i = 0; i < num_rxps; i++) {
1611                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1612                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1613                         bnad->netdev->name,
1614                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1615                 err = request_irq(bnad->msix_table[vector_num].vector,
1616                                   (irq_handler_t)bnad_msix_rx, 0,
1617                                   rx_info->rx_ctrl[i].ccb->name,
1618                                   rx_info->rx_ctrl[i].ccb);
1619                 if (err)
1620                         goto err_return;
1621         }
1622
1623         return 0;
1624
1625 err_return:
1626         if (i > 0)
1627                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1628         return -1;
1629 }
1630
1631 /* Free Tx object Resources */
1632 static void
1633 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1634 {
1635         int i;
1636
1637         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1638                 if (res_info[i].res_type == BNA_RES_T_MEM)
1639                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1640                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1641                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1642         }
1643 }
1644
1645 /* Allocates memory and interrupt resources for Tx object */
1646 static int
1647 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1648                   u32 tx_id)
1649 {
1650         int i, err = 0;
1651
1652         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1653                 if (res_info[i].res_type == BNA_RES_T_MEM)
1654                         err = bnad_mem_alloc(bnad,
1655                                         &res_info[i].res_u.mem_info);
1656                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1657                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1658                                         &res_info[i].res_u.intr_info);
1659                 if (err)
1660                         goto err_return;
1661         }
1662         return 0;
1663
1664 err_return:
1665         bnad_tx_res_free(bnad, res_info);
1666         return err;
1667 }
1668
1669 /* Free Rx object Resources */
1670 static void
1671 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1672 {
1673         int i;
1674
1675         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1676                 if (res_info[i].res_type == BNA_RES_T_MEM)
1677                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1678                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1679                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1680         }
1681 }
1682
1683 /* Allocates memory and interrupt resources for Rx object */
1684 static int
1685 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1686                   uint rx_id)
1687 {
1688         int i, err = 0;
1689
1690         /* All memory needs to be allocated before setup_ccbs */
1691         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1692                 if (res_info[i].res_type == BNA_RES_T_MEM)
1693                         err = bnad_mem_alloc(bnad,
1694                                         &res_info[i].res_u.mem_info);
1695                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1696                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1697                                         &res_info[i].res_u.intr_info);
1698                 if (err)
1699                         goto err_return;
1700         }
1701         return 0;
1702
1703 err_return:
1704         bnad_rx_res_free(bnad, res_info);
1705         return err;
1706 }
1707
1708 /* Timer callbacks */
1709 /* a) IOC timer */
1710 static void
1711 bnad_ioc_timeout(unsigned long data)
1712 {
1713         struct bnad *bnad = (struct bnad *)data;
1714         unsigned long flags;
1715
1716         spin_lock_irqsave(&bnad->bna_lock, flags);
1717         bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1718         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1719 }
1720
1721 static void
1722 bnad_ioc_hb_check(unsigned long data)
1723 {
1724         struct bnad *bnad = (struct bnad *)data;
1725         unsigned long flags;
1726
1727         spin_lock_irqsave(&bnad->bna_lock, flags);
1728         bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1729         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1730 }
1731
1732 static void
1733 bnad_iocpf_timeout(unsigned long data)
1734 {
1735         struct bnad *bnad = (struct bnad *)data;
1736         unsigned long flags;
1737
1738         spin_lock_irqsave(&bnad->bna_lock, flags);
1739         bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1740         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1741 }
1742
1743 static void
1744 bnad_iocpf_sem_timeout(unsigned long data)
1745 {
1746         struct bnad *bnad = (struct bnad *)data;
1747         unsigned long flags;
1748
1749         spin_lock_irqsave(&bnad->bna_lock, flags);
1750         bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1751         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1752 }
1753
1754 /*
1755  * All timer routines use bnad->bna_lock to protect against
1756  * the following race, which may occur in case of no locking:
1757  *      Time    CPU m   CPU n
1758  *      0       1 = test_bit
1759  *      1                       clear_bit
1760  *      2                       del_timer_sync
1761  *      3       mod_timer
1762  */
1763
1764 /* b) Dynamic Interrupt Moderation Timer */
1765 static void
1766 bnad_dim_timeout(unsigned long data)
1767 {
1768         struct bnad *bnad = (struct bnad *)data;
1769         struct bnad_rx_info *rx_info;
1770         struct bnad_rx_ctrl *rx_ctrl;
1771         int i, j;
1772         unsigned long flags;
1773
1774         if (!netif_carrier_ok(bnad->netdev))
1775                 return;
1776
1777         spin_lock_irqsave(&bnad->bna_lock, flags);
1778         for (i = 0; i < bnad->num_rx; i++) {
1779                 rx_info = &bnad->rx_info[i];
1780                 if (!rx_info->rx)
1781                         continue;
1782                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1783                         rx_ctrl = &rx_info->rx_ctrl[j];
1784                         if (!rx_ctrl->ccb)
1785                                 continue;
1786                         bna_rx_dim_update(rx_ctrl->ccb);
1787                 }
1788         }
1789
1790         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1791         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1792                 mod_timer(&bnad->dim_timer,
1793                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1794         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1795 }
1796
1797 /* c)  Statistics Timer */
1798 static void
1799 bnad_stats_timeout(unsigned long data)
1800 {
1801         struct bnad *bnad = (struct bnad *)data;
1802         unsigned long flags;
1803
1804         if (!netif_running(bnad->netdev) ||
1805                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1806                 return;
1807
1808         spin_lock_irqsave(&bnad->bna_lock, flags);
1809         bna_hw_stats_get(&bnad->bna);
1810         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1811 }
1812
1813 /*
1814  * Set up timer for DIM
1815  * Called with bnad->bna_lock held
1816  */
1817 void
1818 bnad_dim_timer_start(struct bnad *bnad)
1819 {
1820         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1821             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1822                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1823                             (unsigned long)bnad);
1824                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1825                 mod_timer(&bnad->dim_timer,
1826                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1827         }
1828 }
1829
1830 /*
1831  * Set up timer for statistics
1832  * Called with mutex_lock(&bnad->conf_mutex) held
1833  */
1834 static void
1835 bnad_stats_timer_start(struct bnad *bnad)
1836 {
1837         unsigned long flags;
1838
1839         spin_lock_irqsave(&bnad->bna_lock, flags);
1840         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1841                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1842                             (unsigned long)bnad);
1843                 mod_timer(&bnad->stats_timer,
1844                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1845         }
1846         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1847 }
1848
1849 /*
1850  * Stops the stats timer
1851  * Called with mutex_lock(&bnad->conf_mutex) held
1852  */
1853 static void
1854 bnad_stats_timer_stop(struct bnad *bnad)
1855 {
1856         int to_del = 0;
1857         unsigned long flags;
1858
1859         spin_lock_irqsave(&bnad->bna_lock, flags);
1860         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1861                 to_del = 1;
1862         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1863         if (to_del)
1864                 del_timer_sync(&bnad->stats_timer);
1865 }
1866
1867 /* Utilities */
1868
1869 static void
1870 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1871 {
1872         int i = 1; /* Index 0 has broadcast address */
1873         struct netdev_hw_addr *mc_addr;
1874
1875         netdev_for_each_mc_addr(mc_addr, netdev) {
1876                 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1877                 i++;
1878         }
1879 }
1880
1881 static int
1882 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1883 {
1884         struct bnad_rx_ctrl *rx_ctrl =
1885                 container_of(napi, struct bnad_rx_ctrl, napi);
1886         struct bnad *bnad = rx_ctrl->bnad;
1887         int rcvd = 0;
1888
1889         rx_ctrl->rx_poll_ctr++;
1890
1891         if (!netif_carrier_ok(bnad->netdev))
1892                 goto poll_exit;
1893
1894         rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1895         if (rcvd >= budget)
1896                 return rcvd;
1897
1898 poll_exit:
1899         napi_complete(napi);
1900
1901         rx_ctrl->rx_complete++;
1902
1903         if (rx_ctrl->ccb)
1904                 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1905
1906         return rcvd;
1907 }
1908
1909 #define BNAD_NAPI_POLL_QUOTA            64
1910 static void
1911 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1912 {
1913         struct bnad_rx_ctrl *rx_ctrl;
1914         int i;
1915
1916         /* Initialize & enable NAPI */
1917         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1918                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1919                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1920                                bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1921         }
1922 }
1923
1924 static void
1925 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1926 {
1927         int i;
1928
1929         /* First disable and then clean up */
1930         for (i = 0; i < bnad->num_rxp_per_rx; i++)
1931                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1932 }
1933
1934 /* Should be held with conf_lock held */
1935 void
1936 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1937 {
1938         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1939         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1940         unsigned long flags;
1941
1942         if (!tx_info->tx)
1943                 return;
1944
1945         init_completion(&bnad->bnad_completions.tx_comp);
1946         spin_lock_irqsave(&bnad->bna_lock, flags);
1947         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1948         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1949         wait_for_completion(&bnad->bnad_completions.tx_comp);
1950
1951         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1952                 bnad_tx_msix_unregister(bnad, tx_info,
1953                         bnad->num_txq_per_tx);
1954
1955         spin_lock_irqsave(&bnad->bna_lock, flags);
1956         bna_tx_destroy(tx_info->tx);
1957         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1958
1959         tx_info->tx = NULL;
1960         tx_info->tx_id = 0;
1961
1962         bnad_tx_res_free(bnad, res_info);
1963 }
1964
1965 /* Should be held with conf_lock held */
1966 int
1967 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1968 {
1969         int err;
1970         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1971         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1972         struct bna_intr_info *intr_info =
1973                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1974         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1975         static const struct bna_tx_event_cbfn tx_cbfn = {
1976                 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1977                 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1978                 .tx_stall_cbfn = bnad_cb_tx_stall,
1979                 .tx_resume_cbfn = bnad_cb_tx_resume,
1980                 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1981         };
1982
1983         struct bna_tx *tx;
1984         unsigned long flags;
1985
1986         tx_info->tx_id = tx_id;
1987
1988         /* Initialize the Tx object configuration */
1989         tx_config->num_txq = bnad->num_txq_per_tx;
1990         tx_config->txq_depth = bnad->txq_depth;
1991         tx_config->tx_type = BNA_TX_T_REGULAR;
1992         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1993
1994         /* Get BNA's resource requirement for one tx object */
1995         spin_lock_irqsave(&bnad->bna_lock, flags);
1996         bna_tx_res_req(bnad->num_txq_per_tx,
1997                 bnad->txq_depth, res_info);
1998         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1999
2000         /* Fill Unmap Q memory requirements */
2001         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
2002                         bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
2003                         bnad->txq_depth));
2004
2005         /* Allocate resources */
2006         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
2007         if (err)
2008                 return err;
2009
2010         /* Ask BNA to create one Tx object, supplying required resources */
2011         spin_lock_irqsave(&bnad->bna_lock, flags);
2012         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2013                         tx_info);
2014         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2015         if (!tx) {
2016                 err = -ENOMEM;
2017                 goto err_return;
2018         }
2019         tx_info->tx = tx;
2020
2021         INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2022                         (work_func_t)bnad_tx_cleanup);
2023
2024         /* Register ISR for the Tx object */
2025         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2026                 err = bnad_tx_msix_register(bnad, tx_info,
2027                         tx_id, bnad->num_txq_per_tx);
2028                 if (err)
2029                         goto cleanup_tx;
2030         }
2031
2032         spin_lock_irqsave(&bnad->bna_lock, flags);
2033         bna_tx_enable(tx);
2034         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2035
2036         return 0;
2037
2038 cleanup_tx:
2039         spin_lock_irqsave(&bnad->bna_lock, flags);
2040         bna_tx_destroy(tx_info->tx);
2041         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2042         tx_info->tx = NULL;
2043         tx_info->tx_id = 0;
2044 err_return:
2045         bnad_tx_res_free(bnad, res_info);
2046         return err;
2047 }
2048
2049 /* Setup the rx config for bna_rx_create */
2050 /* bnad decides the configuration */
2051 static void
2052 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2053 {
2054         memset(rx_config, 0, sizeof(*rx_config));
2055         rx_config->rx_type = BNA_RX_T_REGULAR;
2056         rx_config->num_paths = bnad->num_rxp_per_rx;
2057         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2058
2059         if (bnad->num_rxp_per_rx > 1) {
2060                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2061                 rx_config->rss_config.hash_type =
2062                                 (BFI_ENET_RSS_IPV6 |
2063                                  BFI_ENET_RSS_IPV6_TCP |
2064                                  BFI_ENET_RSS_IPV4 |
2065                                  BFI_ENET_RSS_IPV4_TCP);
2066                 rx_config->rss_config.hash_mask =
2067                                 bnad->num_rxp_per_rx - 1;
2068                 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2069                         sizeof(rx_config->rss_config.toeplitz_hash_key));
2070         } else {
2071                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2072                 memset(&rx_config->rss_config, 0,
2073                        sizeof(rx_config->rss_config));
2074         }
2075
2076         rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2077         rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2078
2079         /* BNA_RXP_SINGLE - one data-buffer queue
2080          * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2081          * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2082          */
2083         /* TODO: configurable param for queue type */
2084         rx_config->rxp_type = BNA_RXP_SLR;
2085
2086         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2087             rx_config->frame_size > 4096) {
2088                 /* though size_routing_enable is set in SLR,
2089                  * small packets may get routed to same rxq.
2090                  * set buf_size to 2048 instead of PAGE_SIZE.
2091                  */
2092                 rx_config->q0_buf_size = 2048;
2093                 /* this should be in multiples of 2 */
2094                 rx_config->q0_num_vecs = 4;
2095                 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2096                 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2097         } else {
2098                 rx_config->q0_buf_size = rx_config->frame_size;
2099                 rx_config->q0_num_vecs = 1;
2100                 rx_config->q0_depth = bnad->rxq_depth;
2101         }
2102
2103         /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2104         if (rx_config->rxp_type == BNA_RXP_SLR) {
2105                 rx_config->q1_depth = bnad->rxq_depth;
2106                 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2107         }
2108
2109         rx_config->vlan_strip_status =
2110                 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2111                 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2112 }
2113
2114 static void
2115 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2116 {
2117         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2118         int i;
2119
2120         for (i = 0; i < bnad->num_rxp_per_rx; i++)
2121                 rx_info->rx_ctrl[i].bnad = bnad;
2122 }
2123
2124 /* Called with mutex_lock(&bnad->conf_mutex) held */
2125 static u32
2126 bnad_reinit_rx(struct bnad *bnad)
2127 {
2128         struct net_device *netdev = bnad->netdev;
2129         u32 err = 0, current_err = 0;
2130         u32 rx_id = 0, count = 0;
2131         unsigned long flags;
2132
2133         /* destroy and create new rx objects */
2134         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2135                 if (!bnad->rx_info[rx_id].rx)
2136                         continue;
2137                 bnad_destroy_rx(bnad, rx_id);
2138         }
2139
2140         spin_lock_irqsave(&bnad->bna_lock, flags);
2141         bna_enet_mtu_set(&bnad->bna.enet,
2142                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2143         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2144
2145         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2146                 count++;
2147                 current_err = bnad_setup_rx(bnad, rx_id);
2148                 if (current_err && !err) {
2149                         err = current_err;
2150                         netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2151                 }
2152         }
2153
2154         /* restore rx configuration */
2155         if (bnad->rx_info[0].rx && !err) {
2156                 bnad_restore_vlans(bnad, 0);
2157                 bnad_enable_default_bcast(bnad);
2158                 spin_lock_irqsave(&bnad->bna_lock, flags);
2159                 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2160                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2161                 bnad_set_rx_mode(netdev);
2162         }
2163
2164         return count;
2165 }
2166
2167 /* Called with bnad_conf_lock() held */
2168 void
2169 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2170 {
2171         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2172         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2173         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2174         unsigned long flags;
2175         int to_del = 0;
2176
2177         if (!rx_info->rx)
2178                 return;
2179
2180         if (0 == rx_id) {
2181                 spin_lock_irqsave(&bnad->bna_lock, flags);
2182                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2183                     test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2184                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2185                         to_del = 1;
2186                 }
2187                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188                 if (to_del)
2189                         del_timer_sync(&bnad->dim_timer);
2190         }
2191
2192         init_completion(&bnad->bnad_completions.rx_comp);
2193         spin_lock_irqsave(&bnad->bna_lock, flags);
2194         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2195         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2196         wait_for_completion(&bnad->bnad_completions.rx_comp);
2197
2198         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2199                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2200
2201         bnad_napi_delete(bnad, rx_id);
2202
2203         spin_lock_irqsave(&bnad->bna_lock, flags);
2204         bna_rx_destroy(rx_info->rx);
2205
2206         rx_info->rx = NULL;
2207         rx_info->rx_id = 0;
2208         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2209
2210         bnad_rx_res_free(bnad, res_info);
2211 }
2212
2213 /* Called with mutex_lock(&bnad->conf_mutex) held */
2214 int
2215 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2216 {
2217         int err;
2218         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2219         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2220         struct bna_intr_info *intr_info =
2221                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2222         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2223         static const struct bna_rx_event_cbfn rx_cbfn = {
2224                 .rcb_setup_cbfn = NULL,
2225                 .rcb_destroy_cbfn = NULL,
2226                 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2227                 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2228                 .rx_stall_cbfn = bnad_cb_rx_stall,
2229                 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2230                 .rx_post_cbfn = bnad_cb_rx_post,
2231         };
2232         struct bna_rx *rx;
2233         unsigned long flags;
2234
2235         rx_info->rx_id = rx_id;
2236
2237         /* Initialize the Rx object configuration */
2238         bnad_init_rx_config(bnad, rx_config);
2239
2240         /* Get BNA's resource requirement for one Rx object */
2241         spin_lock_irqsave(&bnad->bna_lock, flags);
2242         bna_rx_res_req(rx_config, res_info);
2243         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2244
2245         /* Fill Unmap Q memory requirements */
2246         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2247                                  rx_config->num_paths,
2248                         (rx_config->q0_depth *
2249                          sizeof(struct bnad_rx_unmap)) +
2250                          sizeof(struct bnad_rx_unmap_q));
2251
2252         if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2253                 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2254                                          rx_config->num_paths,
2255                                 (rx_config->q1_depth *
2256                                  sizeof(struct bnad_rx_unmap) +
2257                                  sizeof(struct bnad_rx_unmap_q)));
2258         }
2259         /* Allocate resource */
2260         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2261         if (err)
2262                 return err;
2263
2264         bnad_rx_ctrl_init(bnad, rx_id);
2265
2266         /* Ask BNA to create one Rx object, supplying required resources */
2267         spin_lock_irqsave(&bnad->bna_lock, flags);
2268         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2269                         rx_info);
2270         if (!rx) {
2271                 err = -ENOMEM;
2272                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2273                 goto err_return;
2274         }
2275         rx_info->rx = rx;
2276         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2277
2278         INIT_WORK(&rx_info->rx_cleanup_work,
2279                         (work_func_t)(bnad_rx_cleanup));
2280
2281         /*
2282          * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2283          * so that IRQ handler cannot schedule NAPI at this point.
2284          */
2285         bnad_napi_add(bnad, rx_id);
2286
2287         /* Register ISR for the Rx object */
2288         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2289                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2290                                                 rx_config->num_paths);
2291                 if (err)
2292                         goto err_return;
2293         }
2294
2295         spin_lock_irqsave(&bnad->bna_lock, flags);
2296         if (0 == rx_id) {
2297                 /* Set up Dynamic Interrupt Moderation Vector */
2298                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2299                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2300
2301                 /* Enable VLAN filtering only on the default Rx */
2302                 bna_rx_vlanfilter_enable(rx);
2303
2304                 /* Start the DIM timer */
2305                 bnad_dim_timer_start(bnad);
2306         }
2307
2308         bna_rx_enable(rx);
2309         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2310
2311         return 0;
2312
2313 err_return:
2314         bnad_destroy_rx(bnad, rx_id);
2315         return err;
2316 }
2317
2318 /* Called with conf_lock & bnad->bna_lock held */
2319 void
2320 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2321 {
2322         struct bnad_tx_info *tx_info;
2323
2324         tx_info = &bnad->tx_info[0];
2325         if (!tx_info->tx)
2326                 return;
2327
2328         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2329 }
2330
2331 /* Called with conf_lock & bnad->bna_lock held */
2332 void
2333 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2334 {
2335         struct bnad_rx_info *rx_info;
2336         int     i;
2337
2338         for (i = 0; i < bnad->num_rx; i++) {
2339                 rx_info = &bnad->rx_info[i];
2340                 if (!rx_info->rx)
2341                         continue;
2342                 bna_rx_coalescing_timeo_set(rx_info->rx,
2343                                 bnad->rx_coalescing_timeo);
2344         }
2345 }
2346
2347 /*
2348  * Called with bnad->bna_lock held
2349  */
2350 int
2351 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2352 {
2353         int ret;
2354
2355         if (!is_valid_ether_addr(mac_addr))
2356                 return -EADDRNOTAVAIL;
2357
2358         /* If datapath is down, pretend everything went through */
2359         if (!bnad->rx_info[0].rx)
2360                 return 0;
2361
2362         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2363         if (ret != BNA_CB_SUCCESS)
2364                 return -EADDRNOTAVAIL;
2365
2366         return 0;
2367 }
2368
2369 /* Should be called with conf_lock held */
2370 int
2371 bnad_enable_default_bcast(struct bnad *bnad)
2372 {
2373         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2374         int ret;
2375         unsigned long flags;
2376
2377         init_completion(&bnad->bnad_completions.mcast_comp);
2378
2379         spin_lock_irqsave(&bnad->bna_lock, flags);
2380         ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2381                                bnad_cb_rx_mcast_add);
2382         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2383
2384         if (ret == BNA_CB_SUCCESS)
2385                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2386         else
2387                 return -ENODEV;
2388
2389         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2390                 return -ENODEV;
2391
2392         return 0;
2393 }
2394
2395 /* Called with mutex_lock(&bnad->conf_mutex) held */
2396 void
2397 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2398 {
2399         u16 vid;
2400         unsigned long flags;
2401
2402         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2403                 spin_lock_irqsave(&bnad->bna_lock, flags);
2404                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2405                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2406         }
2407 }
2408
2409 /* Statistics utilities */
2410 void
2411 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2412 {
2413         int i, j;
2414
2415         for (i = 0; i < bnad->num_rx; i++) {
2416                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2417                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2418                                 stats->rx_packets += bnad->rx_info[i].
2419                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2420                                 stats->rx_bytes += bnad->rx_info[i].
2421                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2422                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2423                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2424                                         rcb[1]->rxq) {
2425                                         stats->rx_packets +=
2426                                                 bnad->rx_info[i].rx_ctrl[j].
2427                                                 ccb->rcb[1]->rxq->rx_packets;
2428                                         stats->rx_bytes +=
2429                                                 bnad->rx_info[i].rx_ctrl[j].
2430                                                 ccb->rcb[1]->rxq->rx_bytes;
2431                                 }
2432                         }
2433                 }
2434         }
2435         for (i = 0; i < bnad->num_tx; i++) {
2436                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2437                         if (bnad->tx_info[i].tcb[j]) {
2438                                 stats->tx_packets +=
2439                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2440                                 stats->tx_bytes +=
2441                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2442                         }
2443                 }
2444         }
2445 }
2446
2447 /*
2448  * Must be called with the bna_lock held.
2449  */
2450 void
2451 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2452 {
2453         struct bfi_enet_stats_mac *mac_stats;
2454         u32 bmap;
2455         int i;
2456
2457         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2458         stats->rx_errors =
2459                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2460                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2461                 mac_stats->rx_undersize;
2462         stats->tx_errors = mac_stats->tx_fcs_error +
2463                                         mac_stats->tx_undersize;
2464         stats->rx_dropped = mac_stats->rx_drop;
2465         stats->tx_dropped = mac_stats->tx_drop;
2466         stats->multicast = mac_stats->rx_multicast;
2467         stats->collisions = mac_stats->tx_total_collision;
2468
2469         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2470
2471         /* receive ring buffer overflow  ?? */
2472
2473         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2474         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2475         /* recv'r fifo overrun */
2476         bmap = bna_rx_rid_mask(&bnad->bna);
2477         for (i = 0; bmap; i++) {
2478                 if (bmap & 1) {
2479                         stats->rx_fifo_errors +=
2480                                 bnad->stats.bna_stats->
2481                                         hw_stats.rxf_stats[i].frame_drops;
2482                         break;
2483                 }
2484                 bmap >>= 1;
2485         }
2486 }
2487
2488 static void
2489 bnad_mbox_irq_sync(struct bnad *bnad)
2490 {
2491         u32 irq;
2492         unsigned long flags;
2493
2494         spin_lock_irqsave(&bnad->bna_lock, flags);
2495         if (bnad->cfg_flags & BNAD_CF_MSIX)
2496                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2497         else
2498                 irq = bnad->pcidev->irq;
2499         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2500
2501         synchronize_irq(irq);
2502 }
2503
2504 /* Utility used by bnad_start_xmit, for doing TSO */
2505 static int
2506 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2507 {
2508         int err;
2509
2510         err = skb_cow_head(skb, 0);
2511         if (err < 0) {
2512                 BNAD_UPDATE_CTR(bnad, tso_err);
2513                 return err;
2514         }
2515
2516         /*
2517          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2518          * excluding the length field.
2519          */
2520         if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2521                 struct iphdr *iph = ip_hdr(skb);
2522
2523                 /* Do we really need these? */
2524                 iph->tot_len = 0;
2525                 iph->check = 0;
2526
2527                 tcp_hdr(skb)->check =
2528                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2529                                            IPPROTO_TCP, 0);
2530                 BNAD_UPDATE_CTR(bnad, tso4);
2531         } else {
2532                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2533
2534                 ipv6h->payload_len = 0;
2535                 tcp_hdr(skb)->check =
2536                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2537                                          IPPROTO_TCP, 0);
2538                 BNAD_UPDATE_CTR(bnad, tso6);
2539         }
2540
2541         return 0;
2542 }
2543
2544 /*
2545  * Initialize Q numbers depending on Rx Paths
2546  * Called with bnad->bna_lock held, because of cfg_flags
2547  * access.
2548  */
2549 static void
2550 bnad_q_num_init(struct bnad *bnad)
2551 {
2552         int rxps;
2553
2554         rxps = min((uint)num_online_cpus(),
2555                         (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2556
2557         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2558                 rxps = 1;       /* INTx */
2559
2560         bnad->num_rx = 1;
2561         bnad->num_tx = 1;
2562         bnad->num_rxp_per_rx = rxps;
2563         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2564 }
2565
2566 /*
2567  * Adjusts the Q numbers, given a number of msix vectors
2568  * Give preference to RSS as opposed to Tx priority Queues,
2569  * in such a case, just use 1 Tx Q
2570  * Called with bnad->bna_lock held b'cos of cfg_flags access
2571  */
2572 static void
2573 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2574 {
2575         bnad->num_txq_per_tx = 1;
2576         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2577              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2578             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2579                 bnad->num_rxp_per_rx = msix_vectors -
2580                         (bnad->num_tx * bnad->num_txq_per_tx) -
2581                         BNAD_MAILBOX_MSIX_VECTORS;
2582         } else
2583                 bnad->num_rxp_per_rx = 1;
2584 }
2585
2586 /* Enable / disable ioceth */
2587 static int
2588 bnad_ioceth_disable(struct bnad *bnad)
2589 {
2590         unsigned long flags;
2591         int err = 0;
2592
2593         spin_lock_irqsave(&bnad->bna_lock, flags);
2594         init_completion(&bnad->bnad_completions.ioc_comp);
2595         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2596         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2597
2598         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2599                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2600
2601         err = bnad->bnad_completions.ioc_comp_status;
2602         return err;
2603 }
2604
2605 static int
2606 bnad_ioceth_enable(struct bnad *bnad)
2607 {
2608         int err = 0;
2609         unsigned long flags;
2610
2611         spin_lock_irqsave(&bnad->bna_lock, flags);
2612         init_completion(&bnad->bnad_completions.ioc_comp);
2613         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2614         bna_ioceth_enable(&bnad->bna.ioceth);
2615         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2616
2617         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2618                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2619
2620         err = bnad->bnad_completions.ioc_comp_status;
2621
2622         return err;
2623 }
2624
2625 /* Free BNA resources */
2626 static void
2627 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2628                 u32 res_val_max)
2629 {
2630         int i;
2631
2632         for (i = 0; i < res_val_max; i++)
2633                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2634 }
2635
2636 /* Allocates memory and interrupt resources for BNA */
2637 static int
2638 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2639                 u32 res_val_max)
2640 {
2641         int i, err;
2642
2643         for (i = 0; i < res_val_max; i++) {
2644                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2645                 if (err)
2646                         goto err_return;
2647         }
2648         return 0;
2649
2650 err_return:
2651         bnad_res_free(bnad, res_info, res_val_max);
2652         return err;
2653 }
2654
2655 /* Interrupt enable / disable */
2656 static void
2657 bnad_enable_msix(struct bnad *bnad)
2658 {
2659         int i, ret;
2660         unsigned long flags;
2661
2662         spin_lock_irqsave(&bnad->bna_lock, flags);
2663         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2664                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2665                 return;
2666         }
2667         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2668
2669         if (bnad->msix_table)
2670                 return;
2671
2672         bnad->msix_table =
2673                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2674
2675         if (!bnad->msix_table)
2676                 goto intx_mode;
2677
2678         for (i = 0; i < bnad->msix_num; i++)
2679                 bnad->msix_table[i].entry = i;
2680
2681         ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2682                                     1, bnad->msix_num);
2683         if (ret < 0) {
2684                 goto intx_mode;
2685         } else if (ret < bnad->msix_num) {
2686                 dev_warn(&bnad->pcidev->dev,
2687                          "%d MSI-X vectors allocated < %d requested\n",
2688                          ret, bnad->msix_num);
2689
2690                 spin_lock_irqsave(&bnad->bna_lock, flags);
2691                 /* ret = #of vectors that we got */
2692                 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2693                         (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2694                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2695
2696                 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2697                          BNAD_MAILBOX_MSIX_VECTORS;
2698
2699                 if (bnad->msix_num > ret) {
2700                         pci_disable_msix(bnad->pcidev);
2701                         goto intx_mode;
2702                 }
2703         }
2704
2705         pci_intx(bnad->pcidev, 0);
2706
2707         return;
2708
2709 intx_mode:
2710         dev_warn(&bnad->pcidev->dev,
2711                  "MSI-X enable failed - operating in INTx mode\n");
2712
2713         kfree(bnad->msix_table);
2714         bnad->msix_table = NULL;
2715         bnad->msix_num = 0;
2716         spin_lock_irqsave(&bnad->bna_lock, flags);
2717         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2718         bnad_q_num_init(bnad);
2719         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2720 }
2721
2722 static void
2723 bnad_disable_msix(struct bnad *bnad)
2724 {
2725         u32 cfg_flags;
2726         unsigned long flags;
2727
2728         spin_lock_irqsave(&bnad->bna_lock, flags);
2729         cfg_flags = bnad->cfg_flags;
2730         if (bnad->cfg_flags & BNAD_CF_MSIX)
2731                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2732         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2733
2734         if (cfg_flags & BNAD_CF_MSIX) {
2735                 pci_disable_msix(bnad->pcidev);
2736                 kfree(bnad->msix_table);
2737                 bnad->msix_table = NULL;
2738         }
2739 }
2740
2741 /* Netdev entry points */
2742 static int
2743 bnad_open(struct net_device *netdev)
2744 {
2745         int err;
2746         struct bnad *bnad = netdev_priv(netdev);
2747         struct bna_pause_config pause_config;
2748         unsigned long flags;
2749
2750         mutex_lock(&bnad->conf_mutex);
2751
2752         /* Tx */
2753         err = bnad_setup_tx(bnad, 0);
2754         if (err)
2755                 goto err_return;
2756
2757         /* Rx */
2758         err = bnad_setup_rx(bnad, 0);
2759         if (err)
2760                 goto cleanup_tx;
2761
2762         /* Port */
2763         pause_config.tx_pause = 0;
2764         pause_config.rx_pause = 0;
2765
2766         spin_lock_irqsave(&bnad->bna_lock, flags);
2767         bna_enet_mtu_set(&bnad->bna.enet,
2768                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2769         bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2770         bna_enet_enable(&bnad->bna.enet);
2771         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2772
2773         /* Enable broadcast */
2774         bnad_enable_default_bcast(bnad);
2775
2776         /* Restore VLANs, if any */
2777         bnad_restore_vlans(bnad, 0);
2778
2779         /* Set the UCAST address */
2780         spin_lock_irqsave(&bnad->bna_lock, flags);
2781         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2782         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2783
2784         /* Start the stats timer */
2785         bnad_stats_timer_start(bnad);
2786
2787         mutex_unlock(&bnad->conf_mutex);
2788
2789         return 0;
2790
2791 cleanup_tx:
2792         bnad_destroy_tx(bnad, 0);
2793
2794 err_return:
2795         mutex_unlock(&bnad->conf_mutex);
2796         return err;
2797 }
2798
2799 static int
2800 bnad_stop(struct net_device *netdev)
2801 {
2802         struct bnad *bnad = netdev_priv(netdev);
2803         unsigned long flags;
2804
2805         mutex_lock(&bnad->conf_mutex);
2806
2807         /* Stop the stats timer */
2808         bnad_stats_timer_stop(bnad);
2809
2810         init_completion(&bnad->bnad_completions.enet_comp);
2811
2812         spin_lock_irqsave(&bnad->bna_lock, flags);
2813         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2814                         bnad_cb_enet_disabled);
2815         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2816
2817         wait_for_completion(&bnad->bnad_completions.enet_comp);
2818
2819         bnad_destroy_tx(bnad, 0);
2820         bnad_destroy_rx(bnad, 0);
2821
2822         /* Synchronize mailbox IRQ */
2823         bnad_mbox_irq_sync(bnad);
2824
2825         mutex_unlock(&bnad->conf_mutex);
2826
2827         return 0;
2828 }
2829
2830 /* TX */
2831 /* Returns 0 for success */
2832 static int
2833 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2834                     struct sk_buff *skb, struct bna_txq_entry *txqent)
2835 {
2836         u16 flags = 0;
2837         u32 gso_size;
2838         u16 vlan_tag = 0;
2839
2840         if (skb_vlan_tag_present(skb)) {
2841                 vlan_tag = (u16)skb_vlan_tag_get(skb);
2842                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2843         }
2844         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2845                 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2846                                 | (vlan_tag & 0x1fff);
2847                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2848         }
2849         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2850
2851         if (skb_is_gso(skb)) {
2852                 gso_size = skb_shinfo(skb)->gso_size;
2853                 if (unlikely(gso_size > bnad->netdev->mtu)) {
2854                         BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2855                         return -EINVAL;
2856                 }
2857                 if (unlikely((gso_size + skb_transport_offset(skb) +
2858                               tcp_hdrlen(skb)) >= skb->len)) {
2859                         txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2860                         txqent->hdr.wi.lso_mss = 0;
2861                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2862                 } else {
2863                         txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2864                         txqent->hdr.wi.lso_mss = htons(gso_size);
2865                 }
2866
2867                 if (bnad_tso_prepare(bnad, skb)) {
2868                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2869                         return -EINVAL;
2870                 }
2871
2872                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2873                 txqent->hdr.wi.l4_hdr_size_n_offset =
2874                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2875                         tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2876         } else  {
2877                 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2878                 txqent->hdr.wi.lso_mss = 0;
2879
2880                 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2881                         BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2882                         return -EINVAL;
2883                 }
2884
2885                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2886                         __be16 net_proto = vlan_get_protocol(skb);
2887                         u8 proto = 0;
2888
2889                         if (net_proto == htons(ETH_P_IP))
2890                                 proto = ip_hdr(skb)->protocol;
2891 #ifdef NETIF_F_IPV6_CSUM
2892                         else if (net_proto == htons(ETH_P_IPV6)) {
2893                                 /* nexthdr may not be TCP immediately. */
2894                                 proto = ipv6_hdr(skb)->nexthdr;
2895                         }
2896 #endif
2897                         if (proto == IPPROTO_TCP) {
2898                                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2899                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2900                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2901                                               (0, skb_transport_offset(skb)));
2902
2903                                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2904
2905                                 if (unlikely(skb_headlen(skb) <
2906                                             skb_transport_offset(skb) +
2907                                     tcp_hdrlen(skb))) {
2908                                         BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2909                                         return -EINVAL;
2910                                 }
2911                         } else if (proto == IPPROTO_UDP) {
2912                                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2913                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2914                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2915                                               (0, skb_transport_offset(skb)));
2916
2917                                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2918                                 if (unlikely(skb_headlen(skb) <
2919                                             skb_transport_offset(skb) +
2920                                     sizeof(struct udphdr))) {
2921                                         BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2922                                         return -EINVAL;
2923                                 }
2924                         } else {
2925
2926                                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2927                                 return -EINVAL;
2928                         }
2929                 } else
2930                         txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2931         }
2932
2933         txqent->hdr.wi.flags = htons(flags);
2934         txqent->hdr.wi.frame_length = htonl(skb->len);
2935
2936         return 0;
2937 }
2938
2939 /*
2940  * bnad_start_xmit : Netdev entry point for Transmit
2941  *                   Called under lock held by net_device
2942  */
2943 static netdev_tx_t
2944 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2945 {
2946         struct bnad *bnad = netdev_priv(netdev);
2947         u32 txq_id = 0;
2948         struct bna_tcb *tcb = NULL;
2949         struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2950         u32             prod, q_depth, vect_id;
2951         u32             wis, vectors, len;
2952         int             i;
2953         dma_addr_t              dma_addr;
2954         struct bna_txq_entry *txqent;
2955
2956         len = skb_headlen(skb);
2957
2958         /* Sanity checks for the skb */
2959
2960         if (unlikely(skb->len <= ETH_HLEN)) {
2961                 dev_kfree_skb_any(skb);
2962                 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2963                 return NETDEV_TX_OK;
2964         }
2965         if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2966                 dev_kfree_skb_any(skb);
2967                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2968                 return NETDEV_TX_OK;
2969         }
2970         if (unlikely(len == 0)) {
2971                 dev_kfree_skb_any(skb);
2972                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2973                 return NETDEV_TX_OK;
2974         }
2975
2976         tcb = bnad->tx_info[0].tcb[txq_id];
2977
2978         /*
2979          * Takes care of the Tx that is scheduled between clearing the flag
2980          * and the netif_tx_stop_all_queues() call.
2981          */
2982         if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2983                 dev_kfree_skb_any(skb);
2984                 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2985                 return NETDEV_TX_OK;
2986         }
2987
2988         q_depth = tcb->q_depth;
2989         prod = tcb->producer_index;
2990         unmap_q = tcb->unmap_q;
2991
2992         vectors = 1 + skb_shinfo(skb)->nr_frags;
2993         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2994
2995         if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2996                 dev_kfree_skb_any(skb);
2997                 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2998                 return NETDEV_TX_OK;
2999         }
3000
3001         /* Check for available TxQ resources */
3002         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3003                 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
3004                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
3005                         u32 sent;
3006                         sent = bnad_txcmpl_process(bnad, tcb);
3007                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3008                                 bna_ib_ack(tcb->i_dbell, sent);
3009                         smp_mb__before_atomic();
3010                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
3011                 } else {
3012                         netif_stop_queue(netdev);
3013                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3014                 }
3015
3016                 smp_mb();
3017                 /*
3018                  * Check again to deal with race condition between
3019                  * netif_stop_queue here, and netif_wake_queue in
3020                  * interrupt handler which is not inside netif tx lock.
3021                  */
3022                 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3023                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3024                         return NETDEV_TX_BUSY;
3025                 } else {
3026                         netif_wake_queue(netdev);
3027                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3028                 }
3029         }
3030
3031         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3032         head_unmap = &unmap_q[prod];
3033
3034         /* Program the opcode, flags, frame_len, num_vectors in WI */
3035         if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3036                 dev_kfree_skb_any(skb);
3037                 return NETDEV_TX_OK;
3038         }
3039         txqent->hdr.wi.reserved = 0;
3040         txqent->hdr.wi.num_vectors = vectors;
3041
3042         head_unmap->skb = skb;
3043         head_unmap->nvecs = 0;
3044
3045         /* Program the vectors */
3046         unmap = head_unmap;
3047         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3048                                   len, DMA_TO_DEVICE);
3049         if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3050                 dev_kfree_skb_any(skb);
3051                 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3052                 return NETDEV_TX_OK;
3053         }
3054         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3055         txqent->vector[0].length = htons(len);
3056         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3057         head_unmap->nvecs++;
3058
3059         for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3060                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3061                 u32             size = skb_frag_size(frag);
3062
3063                 if (unlikely(size == 0)) {
3064                         /* Undo the changes starting at tcb->producer_index */
3065                         bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3066                                 tcb->producer_index);
3067                         dev_kfree_skb_any(skb);
3068                         BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3069                         return NETDEV_TX_OK;
3070                 }
3071
3072                 len += size;
3073
3074                 vect_id++;
3075                 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3076                         vect_id = 0;
3077                         BNA_QE_INDX_INC(prod, q_depth);
3078                         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3079                         txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3080                         unmap = &unmap_q[prod];
3081                 }
3082
3083                 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3084                                             0, size, DMA_TO_DEVICE);
3085                 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3086                         /* Undo the changes starting at tcb->producer_index */
3087                         bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3088                                            tcb->producer_index);
3089                         dev_kfree_skb_any(skb);
3090                         BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3091                         return NETDEV_TX_OK;
3092                 }
3093
3094                 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3095                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3096                 txqent->vector[vect_id].length = htons(size);
3097                 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3098                                    dma_addr);
3099                 head_unmap->nvecs++;
3100         }
3101
3102         if (unlikely(len != skb->len)) {
3103                 /* Undo the changes starting at tcb->producer_index */
3104                 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3105                 dev_kfree_skb_any(skb);
3106                 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3107                 return NETDEV_TX_OK;
3108         }
3109
3110         BNA_QE_INDX_INC(prod, q_depth);
3111         tcb->producer_index = prod;
3112
3113         smp_mb();
3114
3115         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3116                 return NETDEV_TX_OK;
3117
3118         skb_tx_timestamp(skb);
3119
3120         bna_txq_prod_indx_doorbell(tcb);
3121         smp_mb();
3122
3123         return NETDEV_TX_OK;
3124 }
3125
3126 /*
3127  * Used spin_lock to synchronize reading of stats structures, which
3128  * is written by BNA under the same lock.
3129  */
3130 static struct rtnl_link_stats64 *
3131 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3132 {
3133         struct bnad *bnad = netdev_priv(netdev);
3134         unsigned long flags;
3135
3136         spin_lock_irqsave(&bnad->bna_lock, flags);
3137
3138         bnad_netdev_qstats_fill(bnad, stats);
3139         bnad_netdev_hwstats_fill(bnad, stats);
3140
3141         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3142
3143         return stats;
3144 }
3145
3146 static void
3147 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3148 {
3149         struct net_device *netdev = bnad->netdev;
3150         int uc_count = netdev_uc_count(netdev);
3151         enum bna_cb_status ret;
3152         u8 *mac_list;
3153         struct netdev_hw_addr *ha;
3154         int entry;
3155
3156         if (netdev_uc_empty(bnad->netdev)) {
3157                 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3158                 return;
3159         }
3160
3161         if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3162                 goto mode_default;
3163
3164         mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3165         if (mac_list == NULL)
3166                 goto mode_default;
3167
3168         entry = 0;
3169         netdev_for_each_uc_addr(ha, netdev) {
3170                 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3171                 entry++;
3172         }
3173
3174         ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3175         kfree(mac_list);
3176
3177         if (ret != BNA_CB_SUCCESS)
3178                 goto mode_default;
3179
3180         return;
3181
3182         /* ucast packets not in UCAM are routed to default function */
3183 mode_default:
3184         bnad->cfg_flags |= BNAD_CF_DEFAULT;
3185         bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3186 }
3187
3188 static void
3189 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3190 {
3191         struct net_device *netdev = bnad->netdev;
3192         int mc_count = netdev_mc_count(netdev);
3193         enum bna_cb_status ret;
3194         u8 *mac_list;
3195
3196         if (netdev->flags & IFF_ALLMULTI)
3197                 goto mode_allmulti;
3198
3199         if (netdev_mc_empty(netdev))
3200                 return;
3201
3202         if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3203                 goto mode_allmulti;
3204
3205         mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3206
3207         if (mac_list == NULL)
3208                 goto mode_allmulti;
3209
3210         ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3211
3212         /* copy rest of the MCAST addresses */
3213         bnad_netdev_mc_list_get(netdev, mac_list);
3214         ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3215         kfree(mac_list);
3216
3217         if (ret != BNA_CB_SUCCESS)
3218                 goto mode_allmulti;
3219
3220         return;
3221
3222 mode_allmulti:
3223         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3224         bna_rx_mcast_delall(bnad->rx_info[0].rx);
3225 }
3226
3227 void
3228 bnad_set_rx_mode(struct net_device *netdev)
3229 {
3230         struct bnad *bnad = netdev_priv(netdev);
3231         enum bna_rxmode new_mode, mode_mask;
3232         unsigned long flags;
3233
3234         spin_lock_irqsave(&bnad->bna_lock, flags);
3235
3236         if (bnad->rx_info[0].rx == NULL) {
3237                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3238                 return;
3239         }
3240
3241         /* clear bnad flags to update it with new settings */
3242         bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3243                         BNAD_CF_ALLMULTI);
3244
3245         new_mode = 0;
3246         if (netdev->flags & IFF_PROMISC) {
3247                 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3248                 bnad->cfg_flags |= BNAD_CF_PROMISC;
3249         } else {
3250                 bnad_set_rx_mcast_fltr(bnad);
3251
3252                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3253                         new_mode |= BNA_RXMODE_ALLMULTI;
3254
3255                 bnad_set_rx_ucast_fltr(bnad);
3256
3257                 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3258                         new_mode |= BNA_RXMODE_DEFAULT;
3259         }
3260
3261         mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3262                         BNA_RXMODE_ALLMULTI;
3263         bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3264
3265         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3266 }
3267
3268 /*
3269  * bna_lock is used to sync writes to netdev->addr
3270  * conf_lock cannot be used since this call may be made
3271  * in a non-blocking context.
3272  */
3273 static int
3274 bnad_set_mac_address(struct net_device *netdev, void *addr)
3275 {
3276         int err;
3277         struct bnad *bnad = netdev_priv(netdev);
3278         struct sockaddr *sa = (struct sockaddr *)addr;
3279         unsigned long flags;
3280
3281         spin_lock_irqsave(&bnad->bna_lock, flags);
3282
3283         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3284         if (!err)
3285                 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3286
3287         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3288
3289         return err;
3290 }
3291
3292 static int
3293 bnad_mtu_set(struct bnad *bnad, int frame_size)
3294 {
3295         unsigned long flags;
3296
3297         init_completion(&bnad->bnad_completions.mtu_comp);
3298
3299         spin_lock_irqsave(&bnad->bna_lock, flags);
3300         bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3301         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3302
3303         wait_for_completion(&bnad->bnad_completions.mtu_comp);
3304
3305         return bnad->bnad_completions.mtu_comp_status;
3306 }
3307
3308 static int
3309 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3310 {
3311         int err, mtu;
3312         struct bnad *bnad = netdev_priv(netdev);
3313         u32 rx_count = 0, frame, new_frame;
3314
3315         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3316                 return -EINVAL;
3317
3318         mutex_lock(&bnad->conf_mutex);
3319
3320         mtu = netdev->mtu;
3321         netdev->mtu = new_mtu;
3322
3323         frame = BNAD_FRAME_SIZE(mtu);
3324         new_frame = BNAD_FRAME_SIZE(new_mtu);
3325
3326         /* check if multi-buffer needs to be enabled */
3327         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3328             netif_running(bnad->netdev)) {
3329                 /* only when transition is over 4K */
3330                 if ((frame <= 4096 && new_frame > 4096) ||
3331                     (frame > 4096 && new_frame <= 4096))
3332                         rx_count = bnad_reinit_rx(bnad);
3333         }
3334
3335         /* rx_count > 0 - new rx created
3336          *      - Linux set err = 0 and return
3337          */
3338         err = bnad_mtu_set(bnad, new_frame);
3339         if (err)
3340                 err = -EBUSY;
3341
3342         mutex_unlock(&bnad->conf_mutex);
3343         return err;
3344 }
3345
3346 static int
3347 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3348 {
3349         struct bnad *bnad = netdev_priv(netdev);
3350         unsigned long flags;
3351
3352         if (!bnad->rx_info[0].rx)
3353                 return 0;
3354
3355         mutex_lock(&bnad->conf_mutex);
3356
3357         spin_lock_irqsave(&bnad->bna_lock, flags);
3358         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3359         set_bit(vid, bnad->active_vlans);
3360         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3361
3362         mutex_unlock(&bnad->conf_mutex);
3363
3364         return 0;
3365 }
3366
3367 static int
3368 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3369 {
3370         struct bnad *bnad = netdev_priv(netdev);
3371         unsigned long flags;
3372
3373         if (!bnad->rx_info[0].rx)
3374                 return 0;
3375
3376         mutex_lock(&bnad->conf_mutex);
3377
3378         spin_lock_irqsave(&bnad->bna_lock, flags);
3379         clear_bit(vid, bnad->active_vlans);
3380         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3381         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3382
3383         mutex_unlock(&bnad->conf_mutex);
3384
3385         return 0;
3386 }
3387
3388 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3389 {
3390         struct bnad *bnad = netdev_priv(dev);
3391         netdev_features_t changed = features ^ dev->features;
3392
3393         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3394                 unsigned long flags;
3395
3396                 spin_lock_irqsave(&bnad->bna_lock, flags);
3397
3398                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3399                         bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3400                 else
3401                         bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3402
3403                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3404         }
3405
3406         return 0;
3407 }
3408
3409 #ifdef CONFIG_NET_POLL_CONTROLLER
3410 static void
3411 bnad_netpoll(struct net_device *netdev)
3412 {
3413         struct bnad *bnad = netdev_priv(netdev);
3414         struct bnad_rx_info *rx_info;
3415         struct bnad_rx_ctrl *rx_ctrl;
3416         u32 curr_mask;
3417         int i, j;
3418
3419         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3420                 bna_intx_disable(&bnad->bna, curr_mask);
3421                 bnad_isr(bnad->pcidev->irq, netdev);
3422                 bna_intx_enable(&bnad->bna, curr_mask);
3423         } else {
3424                 /*
3425                  * Tx processing may happen in sending context, so no need
3426                  * to explicitly process completions here
3427                  */
3428
3429                 /* Rx processing */
3430                 for (i = 0; i < bnad->num_rx; i++) {
3431                         rx_info = &bnad->rx_info[i];
3432                         if (!rx_info->rx)
3433                                 continue;
3434                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3435                                 rx_ctrl = &rx_info->rx_ctrl[j];
3436                                 if (rx_ctrl->ccb)
3437                                         bnad_netif_rx_schedule_poll(bnad,
3438                                                             rx_ctrl->ccb);
3439                         }
3440                 }
3441         }
3442 }
3443 #endif
3444
3445 static const struct net_device_ops bnad_netdev_ops = {
3446         .ndo_open               = bnad_open,
3447         .ndo_stop               = bnad_stop,
3448         .ndo_start_xmit         = bnad_start_xmit,
3449         .ndo_get_stats64                = bnad_get_stats64,
3450         .ndo_set_rx_mode        = bnad_set_rx_mode,
3451         .ndo_validate_addr      = eth_validate_addr,
3452         .ndo_set_mac_address    = bnad_set_mac_address,
3453         .ndo_change_mtu         = bnad_change_mtu,
3454         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3455         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3456         .ndo_set_features       = bnad_set_features,
3457 #ifdef CONFIG_NET_POLL_CONTROLLER
3458         .ndo_poll_controller    = bnad_netpoll
3459 #endif
3460 };
3461
3462 static void
3463 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3464 {
3465         struct net_device *netdev = bnad->netdev;
3466
3467         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3468                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3469                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3470                 NETIF_F_HW_VLAN_CTAG_RX;
3471
3472         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3473                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3474                 NETIF_F_TSO | NETIF_F_TSO6;
3475
3476         netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3477
3478         if (using_dac)
3479                 netdev->features |= NETIF_F_HIGHDMA;
3480
3481         netdev->mem_start = bnad->mmio_start;
3482         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3483
3484         netdev->netdev_ops = &bnad_netdev_ops;
3485         bnad_set_ethtool_ops(netdev);
3486 }
3487
3488 /*
3489  * 1. Initialize the bnad structure
3490  * 2. Setup netdev pointer in pci_dev
3491  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3492  * 4. Initialize work queue.
3493  */
3494 static int
3495 bnad_init(struct bnad *bnad,
3496           struct pci_dev *pdev, struct net_device *netdev)
3497 {
3498         unsigned long flags;
3499
3500         SET_NETDEV_DEV(netdev, &pdev->dev);
3501         pci_set_drvdata(pdev, netdev);
3502
3503         bnad->netdev = netdev;
3504         bnad->pcidev = pdev;
3505         bnad->mmio_start = pci_resource_start(pdev, 0);
3506         bnad->mmio_len = pci_resource_len(pdev, 0);
3507         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3508         if (!bnad->bar0) {
3509                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3510                 return -ENOMEM;
3511         }
3512         dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3513                  (unsigned long long) bnad->mmio_len);
3514
3515         spin_lock_irqsave(&bnad->bna_lock, flags);
3516         if (!bnad_msix_disable)
3517                 bnad->cfg_flags = BNAD_CF_MSIX;
3518
3519         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3520
3521         bnad_q_num_init(bnad);
3522         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3523
3524         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3525                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3526                          BNAD_MAILBOX_MSIX_VECTORS;
3527
3528         bnad->txq_depth = BNAD_TXQ_DEPTH;
3529         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3530
3531         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3532         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3533
3534         sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3535         bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3536         if (!bnad->work_q) {
3537                 iounmap(bnad->bar0);
3538                 return -ENOMEM;
3539         }
3540
3541         return 0;
3542 }
3543
3544 /*
3545  * Must be called after bnad_pci_uninit()
3546  * so that iounmap() and pci_set_drvdata(NULL)
3547  * happens only after PCI uninitialization.
3548  */
3549 static void
3550 bnad_uninit(struct bnad *bnad)
3551 {
3552         if (bnad->work_q) {
3553                 flush_workqueue(bnad->work_q);
3554                 destroy_workqueue(bnad->work_q);
3555                 bnad->work_q = NULL;
3556         }
3557
3558         if (bnad->bar0)
3559                 iounmap(bnad->bar0);
3560 }
3561
3562 /*
3563  * Initialize locks
3564         a) Per ioceth mutes used for serializing configuration
3565            changes from OS interface
3566         b) spin lock used to protect bna state machine
3567  */
3568 static void
3569 bnad_lock_init(struct bnad *bnad)
3570 {
3571         spin_lock_init(&bnad->bna_lock);
3572         mutex_init(&bnad->conf_mutex);
3573         mutex_init(&bnad_list_mutex);
3574 }
3575
3576 static void
3577 bnad_lock_uninit(struct bnad *bnad)
3578 {
3579         mutex_destroy(&bnad->conf_mutex);
3580         mutex_destroy(&bnad_list_mutex);
3581 }
3582
3583 /* PCI Initialization */
3584 static int
3585 bnad_pci_init(struct bnad *bnad,
3586               struct pci_dev *pdev, bool *using_dac)
3587 {
3588         int err;
3589
3590         err = pci_enable_device(pdev);
3591         if (err)
3592                 return err;
3593         err = pci_request_regions(pdev, BNAD_NAME);
3594         if (err)
3595                 goto disable_device;
3596         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3597                 *using_dac = true;
3598         } else {
3599                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3600                 if (err)
3601                         goto release_regions;
3602                 *using_dac = false;
3603         }
3604         pci_set_master(pdev);
3605         return 0;
3606
3607 release_regions:
3608         pci_release_regions(pdev);
3609 disable_device:
3610         pci_disable_device(pdev);
3611
3612         return err;
3613 }
3614
3615 static void
3616 bnad_pci_uninit(struct pci_dev *pdev)
3617 {
3618         pci_release_regions(pdev);
3619         pci_disable_device(pdev);
3620 }
3621
3622 static int
3623 bnad_pci_probe(struct pci_dev *pdev,
3624                 const struct pci_device_id *pcidev_id)
3625 {
3626         bool    using_dac;
3627         int     err;
3628         struct bnad *bnad;
3629         struct bna *bna;
3630         struct net_device *netdev;
3631         struct bfa_pcidev pcidev_info;
3632         unsigned long flags;
3633
3634         mutex_lock(&bnad_fwimg_mutex);
3635         if (!cna_get_firmware_buf(pdev)) {
3636                 mutex_unlock(&bnad_fwimg_mutex);
3637                 dev_err(&pdev->dev, "failed to load firmware image!\n");
3638                 return -ENODEV;
3639         }
3640         mutex_unlock(&bnad_fwimg_mutex);
3641
3642         /*
3643          * Allocates sizeof(struct net_device + struct bnad)
3644          * bnad = netdev->priv
3645          */
3646         netdev = alloc_etherdev(sizeof(struct bnad));
3647         if (!netdev) {
3648                 err = -ENOMEM;
3649                 return err;
3650         }
3651         bnad = netdev_priv(netdev);
3652         bnad_lock_init(bnad);
3653         bnad_add_to_list(bnad);
3654
3655         mutex_lock(&bnad->conf_mutex);
3656         /*
3657          * PCI initialization
3658          *      Output : using_dac = 1 for 64 bit DMA
3659          *                         = 0 for 32 bit DMA
3660          */
3661         using_dac = false;
3662         err = bnad_pci_init(bnad, pdev, &using_dac);
3663         if (err)
3664                 goto unlock_mutex;
3665
3666         /*
3667          * Initialize bnad structure
3668          * Setup relation between pci_dev & netdev
3669          */
3670         err = bnad_init(bnad, pdev, netdev);
3671         if (err)
3672                 goto pci_uninit;
3673
3674         /* Initialize netdev structure, set up ethtool ops */
3675         bnad_netdev_init(bnad, using_dac);
3676
3677         /* Set link to down state */
3678         netif_carrier_off(netdev);
3679
3680         /* Setup the debugfs node for this bfad */
3681         if (bna_debugfs_enable)
3682                 bnad_debugfs_init(bnad);
3683
3684         /* Get resource requirement form bna */
3685         spin_lock_irqsave(&bnad->bna_lock, flags);
3686         bna_res_req(&bnad->res_info[0]);
3687         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3688
3689         /* Allocate resources from bna */
3690         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3691         if (err)
3692                 goto drv_uninit;
3693
3694         bna = &bnad->bna;
3695
3696         /* Setup pcidev_info for bna_init() */
3697         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3698         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3699         pcidev_info.device_id = bnad->pcidev->device;
3700         pcidev_info.pci_bar_kva = bnad->bar0;
3701
3702         spin_lock_irqsave(&bnad->bna_lock, flags);
3703         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3704         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3705
3706         bnad->stats.bna_stats = &bna->stats;
3707
3708         bnad_enable_msix(bnad);
3709         err = bnad_mbox_irq_alloc(bnad);
3710         if (err)
3711                 goto res_free;
3712
3713         /* Set up timers */
3714         setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3715                     (unsigned long)bnad);
3716         setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3717                     (unsigned long)bnad);
3718         setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3719                     (unsigned long)bnad);
3720         setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3721                     (unsigned long)bnad);
3722
3723         /*
3724          * Start the chip
3725          * If the call back comes with error, we bail out.
3726          * This is a catastrophic error.
3727          */
3728         err = bnad_ioceth_enable(bnad);
3729         if (err) {
3730                 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3731                 goto probe_success;
3732         }
3733
3734         spin_lock_irqsave(&bnad->bna_lock, flags);
3735         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3736                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3737                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3738                         bna_attr(bna)->num_rxp - 1);
3739                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3740                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3741                         err = -EIO;
3742         }
3743         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3744         if (err)
3745                 goto disable_ioceth;
3746
3747         spin_lock_irqsave(&bnad->bna_lock, flags);
3748         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3749         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3750
3751         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3752         if (err) {
3753                 err = -EIO;
3754                 goto disable_ioceth;
3755         }
3756
3757         spin_lock_irqsave(&bnad->bna_lock, flags);
3758         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3759         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3760
3761         /* Get the burnt-in mac */
3762         spin_lock_irqsave(&bnad->bna_lock, flags);
3763         bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3764         bnad_set_netdev_perm_addr(bnad);
3765         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3766
3767         mutex_unlock(&bnad->conf_mutex);
3768
3769         /* Finally, reguister with net_device layer */
3770         err = register_netdev(netdev);
3771         if (err) {
3772                 dev_err(&pdev->dev, "registering net device failed\n");
3773                 goto probe_uninit;
3774         }
3775         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3776
3777         return 0;
3778
3779 probe_success:
3780         mutex_unlock(&bnad->conf_mutex);
3781         return 0;
3782
3783 probe_uninit:
3784         mutex_lock(&bnad->conf_mutex);
3785         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3786 disable_ioceth:
3787         bnad_ioceth_disable(bnad);
3788         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3789         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3790         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3791         spin_lock_irqsave(&bnad->bna_lock, flags);
3792         bna_uninit(bna);
3793         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3794         bnad_mbox_irq_free(bnad);
3795         bnad_disable_msix(bnad);
3796 res_free:
3797         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3798 drv_uninit:
3799         /* Remove the debugfs node for this bnad */
3800         kfree(bnad->regdata);
3801         bnad_debugfs_uninit(bnad);
3802         bnad_uninit(bnad);
3803 pci_uninit:
3804         bnad_pci_uninit(pdev);
3805 unlock_mutex:
3806         mutex_unlock(&bnad->conf_mutex);
3807         bnad_remove_from_list(bnad);
3808         bnad_lock_uninit(bnad);
3809         free_netdev(netdev);
3810         return err;
3811 }
3812
3813 static void
3814 bnad_pci_remove(struct pci_dev *pdev)
3815 {
3816         struct net_device *netdev = pci_get_drvdata(pdev);
3817         struct bnad *bnad;
3818         struct bna *bna;
3819         unsigned long flags;
3820
3821         if (!netdev)
3822                 return;
3823
3824         bnad = netdev_priv(netdev);
3825         bna = &bnad->bna;
3826
3827         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3828                 unregister_netdev(netdev);
3829
3830         mutex_lock(&bnad->conf_mutex);
3831         bnad_ioceth_disable(bnad);
3832         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3833         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3834         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3835         spin_lock_irqsave(&bnad->bna_lock, flags);
3836         bna_uninit(bna);
3837         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3838
3839         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3840         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3841         bnad_mbox_irq_free(bnad);
3842         bnad_disable_msix(bnad);
3843         bnad_pci_uninit(pdev);
3844         mutex_unlock(&bnad->conf_mutex);
3845         bnad_remove_from_list(bnad);
3846         bnad_lock_uninit(bnad);
3847         /* Remove the debugfs node for this bnad */
3848         kfree(bnad->regdata);
3849         bnad_debugfs_uninit(bnad);
3850         bnad_uninit(bnad);
3851         free_netdev(netdev);
3852 }
3853
3854 static const struct pci_device_id bnad_pci_id_table[] = {
3855         {
3856                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3857                         PCI_DEVICE_ID_BROCADE_CT),
3858                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3859                 .class_mask =  0xffff00
3860         },
3861         {
3862                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3863                         BFA_PCI_DEVICE_ID_CT2),
3864                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3865                 .class_mask =  0xffff00
3866         },
3867         {0,  },
3868 };
3869
3870 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3871
3872 static struct pci_driver bnad_pci_driver = {
3873         .name = BNAD_NAME,
3874         .id_table = bnad_pci_id_table,
3875         .probe = bnad_pci_probe,
3876         .remove = bnad_pci_remove,
3877 };
3878
3879 static int __init
3880 bnad_module_init(void)
3881 {
3882         int err;
3883
3884         pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3885                 BNAD_VERSION);
3886
3887         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3888
3889         err = pci_register_driver(&bnad_pci_driver);
3890         if (err < 0) {
3891                 pr_err("bna: PCI driver registration failed err=%d\n", err);
3892                 return err;
3893         }
3894
3895         return 0;
3896 }
3897
3898 static void __exit
3899 bnad_module_exit(void)
3900 {
3901         pci_unregister_driver(&bnad_pci_driver);
3902         release_firmware(bfi_fw);
3903 }
3904
3905 module_init(bnad_module_init);
3906 module_exit(bnad_module_exit);
3907
3908 MODULE_AUTHOR("Brocade");
3909 MODULE_LICENSE("GPL");
3910 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3911 MODULE_VERSION(BNAD_VERSION);
3912 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3913 MODULE_FIRMWARE(CNA_FW_FILE_CT2);