2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
35 static DEFINE_MUTEX(bnad_fwimg_mutex);
40 static uint bnad_msix_disable;
41 module_param(bnad_msix_disable, uint, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
44 static uint bnad_ioc_auto_recover = 1;
45 module_param(bnad_ioc_auto_recover, uint, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
48 static uint bna_debugfs_enable = 1;
49 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51 " Range[false:0|true:1]");
56 static u32 bnad_rxqs_per_cq = 2;
57 static atomic_t bna_id;
58 static struct mutex bnad_list_mutex;
59 static const u8 bnad_bcast_addr[] __aligned(2) =
60 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
65 #define BNAD_GET_MBOX_IRQ(_bnad) \
66 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
67 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
68 ((_bnad)->pcidev->irq))
70 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
72 (_res_info)->res_type = BNA_RES_T_MEM; \
73 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
74 (_res_info)->res_u.mem_info.num = (_num); \
75 (_res_info)->res_u.mem_info.len = (_size); \
79 bnad_add_to_list(struct bnad *bnad)
81 mutex_lock(&bnad_list_mutex);
82 mutex_unlock(&bnad_list_mutex);
86 bnad_remove_from_list(struct bnad *bnad)
88 mutex_lock(&bnad_list_mutex);
89 mutex_unlock(&bnad_list_mutex);
93 * Reinitialize completions in CQ, once Rx is taken down
96 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
98 struct bna_cq_entry *cmpl;
101 for (i = 0; i < ccb->q_depth; i++) {
102 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
107 /* Tx Datapath functions */
110 /* Caller should ensure that the entry at unmap_q[index] is valid */
112 bnad_tx_buff_unmap(struct bnad *bnad,
113 struct bnad_tx_unmap *unmap_q,
114 u32 q_depth, u32 index)
116 struct bnad_tx_unmap *unmap;
120 unmap = &unmap_q[index];
121 nvecs = unmap->nvecs;
126 dma_unmap_single(&bnad->pcidev->dev,
127 dma_unmap_addr(&unmap->vectors[0], dma_addr),
128 skb_headlen(skb), DMA_TO_DEVICE);
129 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
135 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
137 BNA_QE_INDX_INC(index, q_depth);
138 unmap = &unmap_q[index];
141 dma_unmap_page(&bnad->pcidev->dev,
142 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
143 dma_unmap_len(&unmap->vectors[vector], dma_len),
145 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
149 BNA_QE_INDX_INC(index, q_depth);
155 * Frees all pending Tx Bufs
156 * At this point no activity is expected on the Q,
157 * so DMA unmap & freeing is fine.
160 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
162 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
166 for (i = 0; i < tcb->q_depth; i++) {
167 skb = unmap_q[i].skb;
170 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
172 dev_kfree_skb_any(skb);
177 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
178 * Can be called in a) Interrupt context
182 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
184 u32 sent_packets = 0, sent_bytes = 0;
185 u32 wis, unmap_wis, hw_cons, cons, q_depth;
186 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
187 struct bnad_tx_unmap *unmap;
190 /* Just return if TX is stopped */
191 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
194 hw_cons = *(tcb->hw_consumer_index);
195 cons = tcb->consumer_index;
196 q_depth = tcb->q_depth;
198 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
199 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
202 unmap = &unmap_q[cons];
207 sent_bytes += skb->len;
209 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
212 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
213 dev_kfree_skb_any(skb);
216 /* Update consumer pointers. */
217 tcb->consumer_index = hw_cons;
219 tcb->txq->tx_packets += sent_packets;
220 tcb->txq->tx_bytes += sent_bytes;
226 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
228 struct net_device *netdev = bnad->netdev;
231 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
234 sent = bnad_txcmpl_process(bnad, tcb);
236 if (netif_queue_stopped(netdev) &&
237 netif_carrier_ok(netdev) &&
238 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
239 BNAD_NETIF_WAKE_THRESHOLD) {
240 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
241 netif_wake_queue(netdev);
242 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
247 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
248 bna_ib_ack(tcb->i_dbell, sent);
250 smp_mb__before_atomic();
251 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
256 /* MSIX Tx Completion Handler */
258 bnad_msix_tx(int irq, void *data)
260 struct bna_tcb *tcb = (struct bna_tcb *)data;
261 struct bnad *bnad = tcb->bnad;
263 bnad_tx_complete(bnad, tcb);
269 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
271 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
273 unmap_q->reuse_pi = -1;
274 unmap_q->alloc_order = -1;
275 unmap_q->map_size = 0;
276 unmap_q->type = BNAD_RXBUF_NONE;
279 /* Default is page-based allocation. Multi-buffer support - TBD */
281 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
283 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
286 bnad_rxq_alloc_uninit(bnad, rcb);
288 order = get_order(rcb->rxq->buffer_size);
290 unmap_q->type = BNAD_RXBUF_PAGE;
292 if (bna_is_small_rxq(rcb->id)) {
293 unmap_q->alloc_order = 0;
294 unmap_q->map_size = rcb->rxq->buffer_size;
296 if (rcb->rxq->multi_buffer) {
297 unmap_q->alloc_order = 0;
298 unmap_q->map_size = rcb->rxq->buffer_size;
299 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
301 unmap_q->alloc_order = order;
303 (rcb->rxq->buffer_size > 2048) ?
304 PAGE_SIZE << order : 2048;
308 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
314 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
319 dma_unmap_page(&bnad->pcidev->dev,
320 dma_unmap_addr(&unmap->vector, dma_addr),
321 unmap->vector.len, DMA_FROM_DEVICE);
322 put_page(unmap->page);
324 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
325 unmap->vector.len = 0;
329 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
334 dma_unmap_single(&bnad->pcidev->dev,
335 dma_unmap_addr(&unmap->vector, dma_addr),
336 unmap->vector.len, DMA_FROM_DEVICE);
337 dev_kfree_skb_any(unmap->skb);
339 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
340 unmap->vector.len = 0;
344 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
346 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
349 for (i = 0; i < rcb->q_depth; i++) {
350 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
352 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
353 bnad_rxq_cleanup_skb(bnad, unmap);
355 bnad_rxq_cleanup_page(bnad, unmap);
357 bnad_rxq_alloc_uninit(bnad, rcb);
361 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
363 u32 alloced, prod, q_depth;
364 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
365 struct bnad_rx_unmap *unmap, *prev;
366 struct bna_rxq_entry *rxent;
368 u32 page_offset, alloc_size;
371 prod = rcb->producer_index;
372 q_depth = rcb->q_depth;
374 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
378 unmap = &unmap_q->unmap[prod];
380 if (unmap_q->reuse_pi < 0) {
381 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
382 unmap_q->alloc_order);
385 prev = &unmap_q->unmap[unmap_q->reuse_pi];
387 page_offset = prev->page_offset + unmap_q->map_size;
391 if (unlikely(!page)) {
392 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
393 rcb->rxq->rxbuf_alloc_failed++;
397 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
398 unmap_q->map_size, DMA_FROM_DEVICE);
399 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
401 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
402 rcb->rxq->rxbuf_map_failed++;
407 unmap->page_offset = page_offset;
408 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
409 unmap->vector.len = unmap_q->map_size;
410 page_offset += unmap_q->map_size;
412 if (page_offset < alloc_size)
413 unmap_q->reuse_pi = prod;
415 unmap_q->reuse_pi = -1;
417 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
418 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
419 BNA_QE_INDX_INC(prod, q_depth);
424 if (likely(alloced)) {
425 rcb->producer_index = prod;
427 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
428 bna_rxq_prod_indx_doorbell(rcb);
435 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
437 u32 alloced, prod, q_depth, buff_sz;
438 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
439 struct bnad_rx_unmap *unmap;
440 struct bna_rxq_entry *rxent;
444 buff_sz = rcb->rxq->buffer_size;
445 prod = rcb->producer_index;
446 q_depth = rcb->q_depth;
450 unmap = &unmap_q->unmap[prod];
452 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
454 if (unlikely(!skb)) {
455 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
456 rcb->rxq->rxbuf_alloc_failed++;
460 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
461 buff_sz, DMA_FROM_DEVICE);
462 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
463 dev_kfree_skb_any(skb);
464 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
465 rcb->rxq->rxbuf_map_failed++;
470 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
471 unmap->vector.len = buff_sz;
473 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
474 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
475 BNA_QE_INDX_INC(prod, q_depth);
480 if (likely(alloced)) {
481 rcb->producer_index = prod;
483 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
484 bna_rxq_prod_indx_doorbell(rcb);
491 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
493 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
496 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
497 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
500 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
501 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
503 bnad_rxq_refill_page(bnad, rcb, to_alloc);
506 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
508 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
509 BNA_CQ_EF_L4_CKSUM_OK)
511 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
512 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
513 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
514 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
515 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
516 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
517 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
518 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
521 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
522 u32 sop_ci, u32 nvecs)
524 struct bnad_rx_unmap_q *unmap_q;
525 struct bnad_rx_unmap *unmap;
528 unmap_q = rcb->unmap_q;
529 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
530 unmap = &unmap_q->unmap[ci];
531 BNA_QE_INDX_INC(ci, rcb->q_depth);
533 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
534 bnad_rxq_cleanup_skb(bnad, unmap);
536 bnad_rxq_cleanup_page(bnad, unmap);
541 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
545 struct bnad_rx_unmap_q *unmap_q;
546 struct bna_cq_entry *cq, *cmpl;
547 u32 ci, pi, totlen = 0;
550 pi = ccb->producer_index;
553 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
554 unmap_q = rcb->unmap_q;
556 ci = rcb->consumer_index;
558 /* prefetch header */
559 prefetch(page_address(unmap_q->unmap[ci].page) +
560 unmap_q->unmap[ci].page_offset);
563 struct bnad_rx_unmap *unmap;
566 unmap = &unmap_q->unmap[ci];
567 BNA_QE_INDX_INC(ci, rcb->q_depth);
569 dma_unmap_page(&bnad->pcidev->dev,
570 dma_unmap_addr(&unmap->vector, dma_addr),
571 unmap->vector.len, DMA_FROM_DEVICE);
573 len = ntohs(cmpl->length);
574 skb->truesize += unmap->vector.len;
577 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
578 unmap->page, unmap->page_offset, len);
581 unmap->vector.len = 0;
583 BNA_QE_INDX_INC(pi, ccb->q_depth);
588 skb->data_len += totlen;
592 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
593 struct bnad_rx_unmap *unmap, u32 len)
597 dma_unmap_single(&bnad->pcidev->dev,
598 dma_unmap_addr(&unmap->vector, dma_addr),
599 unmap->vector.len, DMA_FROM_DEVICE);
602 skb->protocol = eth_type_trans(skb, bnad->netdev);
605 unmap->vector.len = 0;
609 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
611 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
612 struct bna_rcb *rcb = NULL;
613 struct bnad_rx_unmap_q *unmap_q;
614 struct bnad_rx_unmap *unmap = NULL;
615 struct sk_buff *skb = NULL;
616 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
617 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
618 u32 packets = 0, len = 0, totlen = 0;
619 u32 pi, vec, sop_ci = 0, nvecs = 0;
620 u32 flags, masked_flags;
622 prefetch(bnad->netdev);
626 while (packets < budget) {
627 cmpl = &cq[ccb->producer_index];
630 /* The 'valid' field is set by the adapter, only after writing
631 * the other fields of completion entry. Hence, do not load
632 * other fields of completion entry *before* the 'valid' is
633 * loaded. Adding the rmb() here prevents the compiler and/or
634 * CPU from reordering the reads which would potentially result
635 * in reading stale values in completion entry.
639 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
641 if (bna_is_small_rxq(cmpl->rxq_id))
646 unmap_q = rcb->unmap_q;
648 /* start of packet ci */
649 sop_ci = rcb->consumer_index;
651 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
652 unmap = &unmap_q->unmap[sop_ci];
655 skb = napi_get_frags(&rx_ctrl->napi);
661 flags = ntohl(cmpl->flags);
662 len = ntohs(cmpl->length);
666 /* Check all the completions for this frame.
667 * busy-wait doesn't help much, break here.
669 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
670 (flags & BNA_CQ_EF_EOP) == 0) {
671 pi = ccb->producer_index;
673 BNA_QE_INDX_INC(pi, ccb->q_depth);
676 if (!next_cmpl->valid)
678 /* The 'valid' field is set by the adapter, only
679 * after writing the other fields of completion
680 * entry. Hence, do not load other fields of
681 * completion entry *before* the 'valid' is
682 * loaded. Adding the rmb() here prevents the
683 * compiler and/or CPU from reordering the reads
684 * which would potentially result in reading
685 * stale values in completion entry.
689 len = ntohs(next_cmpl->length);
690 flags = ntohl(next_cmpl->flags);
694 } while ((flags & BNA_CQ_EF_EOP) == 0);
696 if (!next_cmpl->valid)
701 /* TODO: BNA_CQ_EF_LOCAL ? */
702 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
703 BNA_CQ_EF_FCS_ERROR |
704 BNA_CQ_EF_TOO_LONG))) {
705 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
706 rcb->rxq->rx_packets_with_error++;
711 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
712 bnad_cq_setup_skb(bnad, skb, unmap, len);
714 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
716 rcb->rxq->rx_packets++;
717 rcb->rxq->rx_bytes += totlen;
718 ccb->bytes_per_intr += totlen;
720 masked_flags = flags & flags_cksum_prot_mask;
723 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
724 ((masked_flags == flags_tcp4) ||
725 (masked_flags == flags_udp4) ||
726 (masked_flags == flags_tcp6) ||
727 (masked_flags == flags_udp6))))
728 skb->ip_summed = CHECKSUM_UNNECESSARY;
730 skb_checksum_none_assert(skb);
732 if ((flags & BNA_CQ_EF_VLAN) &&
733 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
736 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
737 netif_receive_skb(skb);
739 napi_gro_frags(&rx_ctrl->napi);
742 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
743 for (vec = 0; vec < nvecs; vec++) {
744 cmpl = &cq[ccb->producer_index];
746 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
750 napi_gro_flush(&rx_ctrl->napi, false);
751 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
752 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
754 bnad_rxq_post(bnad, ccb->rcb[0]);
756 bnad_rxq_post(bnad, ccb->rcb[1]);
762 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
764 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
765 struct napi_struct *napi = &rx_ctrl->napi;
767 if (likely(napi_schedule_prep(napi))) {
768 __napi_schedule(napi);
769 rx_ctrl->rx_schedule++;
773 /* MSIX Rx Path Handler */
775 bnad_msix_rx(int irq, void *data)
777 struct bna_ccb *ccb = (struct bna_ccb *)data;
780 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
781 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
787 /* Interrupt handlers */
789 /* Mbox Interrupt Handlers */
791 bnad_msix_mbox_handler(int irq, void *data)
795 struct bnad *bnad = (struct bnad *)data;
797 spin_lock_irqsave(&bnad->bna_lock, flags);
798 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
799 spin_unlock_irqrestore(&bnad->bna_lock, flags);
803 bna_intr_status_get(&bnad->bna, intr_status);
805 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
806 bna_mbox_handler(&bnad->bna, intr_status);
808 spin_unlock_irqrestore(&bnad->bna_lock, flags);
814 bnad_isr(int irq, void *data)
819 struct bnad *bnad = (struct bnad *)data;
820 struct bnad_rx_info *rx_info;
821 struct bnad_rx_ctrl *rx_ctrl;
822 struct bna_tcb *tcb = NULL;
824 spin_lock_irqsave(&bnad->bna_lock, flags);
825 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
826 spin_unlock_irqrestore(&bnad->bna_lock, flags);
830 bna_intr_status_get(&bnad->bna, intr_status);
832 if (unlikely(!intr_status)) {
833 spin_unlock_irqrestore(&bnad->bna_lock, flags);
837 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
838 bna_mbox_handler(&bnad->bna, intr_status);
840 spin_unlock_irqrestore(&bnad->bna_lock, flags);
842 if (!BNA_IS_INTX_DATA_INTR(intr_status))
845 /* Process data interrupts */
847 for (i = 0; i < bnad->num_tx; i++) {
848 for (j = 0; j < bnad->num_txq_per_tx; j++) {
849 tcb = bnad->tx_info[i].tcb[j];
850 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
851 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
855 for (i = 0; i < bnad->num_rx; i++) {
856 rx_info = &bnad->rx_info[i];
859 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
860 rx_ctrl = &rx_info->rx_ctrl[j];
862 bnad_netif_rx_schedule_poll(bnad,
870 * Called in interrupt / callback context
871 * with bna_lock held, so cfg_flags access is OK
874 bnad_enable_mbox_irq(struct bnad *bnad)
876 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
878 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
882 * Called with bnad->bna_lock held b'cos of
883 * bnad->cfg_flags access.
886 bnad_disable_mbox_irq(struct bnad *bnad)
888 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
890 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
894 bnad_set_netdev_perm_addr(struct bnad *bnad)
896 struct net_device *netdev = bnad->netdev;
898 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
899 if (is_zero_ether_addr(netdev->dev_addr))
900 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
903 /* Control Path Handlers */
907 bnad_cb_mbox_intr_enable(struct bnad *bnad)
909 bnad_enable_mbox_irq(bnad);
913 bnad_cb_mbox_intr_disable(struct bnad *bnad)
915 bnad_disable_mbox_irq(bnad);
919 bnad_cb_ioceth_ready(struct bnad *bnad)
921 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
922 complete(&bnad->bnad_completions.ioc_comp);
926 bnad_cb_ioceth_failed(struct bnad *bnad)
928 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
929 complete(&bnad->bnad_completions.ioc_comp);
933 bnad_cb_ioceth_disabled(struct bnad *bnad)
935 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
936 complete(&bnad->bnad_completions.ioc_comp);
940 bnad_cb_enet_disabled(void *arg)
942 struct bnad *bnad = (struct bnad *)arg;
944 netif_carrier_off(bnad->netdev);
945 complete(&bnad->bnad_completions.enet_comp);
949 bnad_cb_ethport_link_status(struct bnad *bnad,
950 enum bna_link_status link_status)
952 bool link_up = false;
954 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
956 if (link_status == BNA_CEE_UP) {
957 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
958 BNAD_UPDATE_CTR(bnad, cee_toggle);
959 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
961 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
962 BNAD_UPDATE_CTR(bnad, cee_toggle);
963 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
967 if (!netif_carrier_ok(bnad->netdev)) {
969 netdev_info(bnad->netdev, "link up\n");
970 netif_carrier_on(bnad->netdev);
971 BNAD_UPDATE_CTR(bnad, link_toggle);
972 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
973 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
975 struct bna_tcb *tcb =
976 bnad->tx_info[tx_id].tcb[tcb_id];
983 if (test_bit(BNAD_TXQ_TX_STARTED,
987 * Transmit Schedule */
991 BNAD_UPDATE_CTR(bnad,
997 BNAD_UPDATE_CTR(bnad,
1004 if (netif_carrier_ok(bnad->netdev)) {
1005 netdev_info(bnad->netdev, "link down\n");
1006 netif_carrier_off(bnad->netdev);
1007 BNAD_UPDATE_CTR(bnad, link_toggle);
1013 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
1015 struct bnad *bnad = (struct bnad *)arg;
1017 complete(&bnad->bnad_completions.tx_comp);
1021 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1023 struct bnad_tx_info *tx_info =
1024 (struct bnad_tx_info *)tcb->txq->tx->priv;
1027 tx_info->tcb[tcb->id] = tcb;
1031 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1033 struct bnad_tx_info *tx_info =
1034 (struct bnad_tx_info *)tcb->txq->tx->priv;
1036 tx_info->tcb[tcb->id] = NULL;
1041 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1043 struct bnad_rx_info *rx_info =
1044 (struct bnad_rx_info *)ccb->cq->rx->priv;
1046 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1047 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1051 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1053 struct bnad_rx_info *rx_info =
1054 (struct bnad_rx_info *)ccb->cq->rx->priv;
1056 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1060 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1062 struct bnad_tx_info *tx_info =
1063 (struct bnad_tx_info *)tx->priv;
1064 struct bna_tcb *tcb;
1068 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1069 tcb = tx_info->tcb[i];
1073 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1074 netif_stop_subqueue(bnad->netdev, txq_id);
1079 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1081 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1082 struct bna_tcb *tcb;
1086 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1087 tcb = tx_info->tcb[i];
1092 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1093 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1094 BUG_ON(*(tcb->hw_consumer_index) != 0);
1096 if (netif_carrier_ok(bnad->netdev)) {
1097 netif_wake_subqueue(bnad->netdev, txq_id);
1098 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1103 * Workaround for first ioceth enable failure & we
1104 * get a 0 MAC address. We try to get the MAC address
1107 if (is_zero_ether_addr(bnad->perm_addr)) {
1108 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1109 bnad_set_netdev_perm_addr(bnad);
1114 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1117 bnad_tx_cleanup(struct delayed_work *work)
1119 struct bnad_tx_info *tx_info =
1120 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1121 struct bnad *bnad = NULL;
1122 struct bna_tcb *tcb;
1123 unsigned long flags;
1126 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1127 tcb = tx_info->tcb[i];
1133 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1138 bnad_txq_cleanup(bnad, tcb);
1140 smp_mb__before_atomic();
1141 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1145 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1146 msecs_to_jiffies(1));
1150 spin_lock_irqsave(&bnad->bna_lock, flags);
1151 bna_tx_cleanup_complete(tx_info->tx);
1152 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1156 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1158 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1159 struct bna_tcb *tcb;
1162 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1163 tcb = tx_info->tcb[i];
1168 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1172 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1174 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1175 struct bna_ccb *ccb;
1176 struct bnad_rx_ctrl *rx_ctrl;
1179 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1180 rx_ctrl = &rx_info->rx_ctrl[i];
1185 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1188 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1193 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1196 bnad_rx_cleanup(void *work)
1198 struct bnad_rx_info *rx_info =
1199 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1200 struct bnad_rx_ctrl *rx_ctrl;
1201 struct bnad *bnad = NULL;
1202 unsigned long flags;
1205 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1206 rx_ctrl = &rx_info->rx_ctrl[i];
1211 bnad = rx_ctrl->ccb->bnad;
1214 * Wait till the poll handler has exited
1215 * and nothing can be scheduled anymore
1217 napi_disable(&rx_ctrl->napi);
1219 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1220 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1221 if (rx_ctrl->ccb->rcb[1])
1222 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1225 spin_lock_irqsave(&bnad->bna_lock, flags);
1226 bna_rx_cleanup_complete(rx_info->rx);
1227 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1231 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1233 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1234 struct bna_ccb *ccb;
1235 struct bnad_rx_ctrl *rx_ctrl;
1238 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1239 rx_ctrl = &rx_info->rx_ctrl[i];
1244 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1247 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1250 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1254 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1256 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1257 struct bna_ccb *ccb;
1258 struct bna_rcb *rcb;
1259 struct bnad_rx_ctrl *rx_ctrl;
1262 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1263 rx_ctrl = &rx_info->rx_ctrl[i];
1268 napi_enable(&rx_ctrl->napi);
1270 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1275 bnad_rxq_alloc_init(bnad, rcb);
1276 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1277 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1278 bnad_rxq_post(bnad, rcb);
1284 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1286 struct bnad *bnad = (struct bnad *)arg;
1288 complete(&bnad->bnad_completions.rx_comp);
1292 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1294 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1295 complete(&bnad->bnad_completions.mcast_comp);
1299 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1300 struct bna_stats *stats)
1302 if (status == BNA_CB_SUCCESS)
1303 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1305 if (!netif_running(bnad->netdev) ||
1306 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1309 mod_timer(&bnad->stats_timer,
1310 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1314 bnad_cb_enet_mtu_set(struct bnad *bnad)
1316 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1317 complete(&bnad->bnad_completions.mtu_comp);
1321 bnad_cb_completion(void *arg, enum bfa_status status)
1323 struct bnad_iocmd_comp *iocmd_comp =
1324 (struct bnad_iocmd_comp *)arg;
1326 iocmd_comp->comp_status = (u32) status;
1327 complete(&iocmd_comp->comp);
1330 /* Resource allocation, free functions */
1333 bnad_mem_free(struct bnad *bnad,
1334 struct bna_mem_info *mem_info)
1339 if (mem_info->mdl == NULL)
1342 for (i = 0; i < mem_info->num; i++) {
1343 if (mem_info->mdl[i].kva != NULL) {
1344 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1345 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1347 dma_free_coherent(&bnad->pcidev->dev,
1348 mem_info->mdl[i].len,
1349 mem_info->mdl[i].kva, dma_pa);
1351 kfree(mem_info->mdl[i].kva);
1354 kfree(mem_info->mdl);
1355 mem_info->mdl = NULL;
1359 bnad_mem_alloc(struct bnad *bnad,
1360 struct bna_mem_info *mem_info)
1365 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1366 mem_info->mdl = NULL;
1370 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1372 if (mem_info->mdl == NULL)
1375 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1376 for (i = 0; i < mem_info->num; i++) {
1377 mem_info->mdl[i].len = mem_info->len;
1378 mem_info->mdl[i].kva =
1379 dma_alloc_coherent(&bnad->pcidev->dev,
1380 mem_info->len, &dma_pa,
1382 if (mem_info->mdl[i].kva == NULL)
1385 BNA_SET_DMA_ADDR(dma_pa,
1386 &(mem_info->mdl[i].dma));
1389 for (i = 0; i < mem_info->num; i++) {
1390 mem_info->mdl[i].len = mem_info->len;
1391 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1393 if (mem_info->mdl[i].kva == NULL)
1401 bnad_mem_free(bnad, mem_info);
1405 /* Free IRQ for Mailbox */
1407 bnad_mbox_irq_free(struct bnad *bnad)
1410 unsigned long flags;
1412 spin_lock_irqsave(&bnad->bna_lock, flags);
1413 bnad_disable_mbox_irq(bnad);
1414 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1416 irq = BNAD_GET_MBOX_IRQ(bnad);
1417 free_irq(irq, bnad);
1421 * Allocates IRQ for Mailbox, but keep it disabled
1422 * This will be enabled once we get the mbox enable callback
1426 bnad_mbox_irq_alloc(struct bnad *bnad)
1429 unsigned long irq_flags, flags;
1431 irq_handler_t irq_handler;
1433 spin_lock_irqsave(&bnad->bna_lock, flags);
1434 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1435 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1436 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1439 irq_handler = (irq_handler_t)bnad_isr;
1440 irq = bnad->pcidev->irq;
1441 irq_flags = IRQF_SHARED;
1444 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1445 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1448 * Set the Mbox IRQ disable flag, so that the IRQ handler
1449 * called from request_irq() for SHARED IRQs do not execute
1451 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1453 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1455 err = request_irq(irq, irq_handler, irq_flags,
1456 bnad->mbox_irq_name, bnad);
1462 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1464 kfree(intr_info->idl);
1465 intr_info->idl = NULL;
1468 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1470 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1471 u32 txrx_id, struct bna_intr_info *intr_info)
1473 int i, vector_start = 0;
1475 unsigned long flags;
1477 spin_lock_irqsave(&bnad->bna_lock, flags);
1478 cfg_flags = bnad->cfg_flags;
1479 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1481 if (cfg_flags & BNAD_CF_MSIX) {
1482 intr_info->intr_type = BNA_INTR_T_MSIX;
1483 intr_info->idl = kcalloc(intr_info->num,
1484 sizeof(struct bna_intr_descr),
1486 if (!intr_info->idl)
1491 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1495 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1496 (bnad->num_tx * bnad->num_txq_per_tx) +
1504 for (i = 0; i < intr_info->num; i++)
1505 intr_info->idl[i].vector = vector_start + i;
1507 intr_info->intr_type = BNA_INTR_T_INTX;
1509 intr_info->idl = kcalloc(intr_info->num,
1510 sizeof(struct bna_intr_descr),
1512 if (!intr_info->idl)
1517 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1521 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1528 /* NOTE: Should be called for MSIX only
1529 * Unregisters Tx MSIX vector(s) from the kernel
1532 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1538 for (i = 0; i < num_txqs; i++) {
1539 if (tx_info->tcb[i] == NULL)
1542 vector_num = tx_info->tcb[i]->intr_vector;
1543 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1547 /* NOTE: Should be called for MSIX only
1548 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1551 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1552 u32 tx_id, int num_txqs)
1558 for (i = 0; i < num_txqs; i++) {
1559 vector_num = tx_info->tcb[i]->intr_vector;
1560 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1561 tx_id + tx_info->tcb[i]->id);
1562 err = request_irq(bnad->msix_table[vector_num].vector,
1563 (irq_handler_t)bnad_msix_tx, 0,
1564 tx_info->tcb[i]->name,
1574 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1578 /* NOTE: Should be called for MSIX only
1579 * Unregisters Rx MSIX vector(s) from the kernel
1582 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1588 for (i = 0; i < num_rxps; i++) {
1589 if (rx_info->rx_ctrl[i].ccb == NULL)
1592 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1593 free_irq(bnad->msix_table[vector_num].vector,
1594 rx_info->rx_ctrl[i].ccb);
1598 /* NOTE: Should be called for MSIX only
1599 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1602 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1603 u32 rx_id, int num_rxps)
1609 for (i = 0; i < num_rxps; i++) {
1610 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1611 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1613 rx_id + rx_info->rx_ctrl[i].ccb->id);
1614 err = request_irq(bnad->msix_table[vector_num].vector,
1615 (irq_handler_t)bnad_msix_rx, 0,
1616 rx_info->rx_ctrl[i].ccb->name,
1617 rx_info->rx_ctrl[i].ccb);
1626 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1630 /* Free Tx object Resources */
1632 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1636 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1637 if (res_info[i].res_type == BNA_RES_T_MEM)
1638 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1639 else if (res_info[i].res_type == BNA_RES_T_INTR)
1640 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1644 /* Allocates memory and interrupt resources for Tx object */
1646 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1651 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1652 if (res_info[i].res_type == BNA_RES_T_MEM)
1653 err = bnad_mem_alloc(bnad,
1654 &res_info[i].res_u.mem_info);
1655 else if (res_info[i].res_type == BNA_RES_T_INTR)
1656 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1657 &res_info[i].res_u.intr_info);
1664 bnad_tx_res_free(bnad, res_info);
1668 /* Free Rx object Resources */
1670 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1674 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1675 if (res_info[i].res_type == BNA_RES_T_MEM)
1676 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1677 else if (res_info[i].res_type == BNA_RES_T_INTR)
1678 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1682 /* Allocates memory and interrupt resources for Rx object */
1684 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1689 /* All memory needs to be allocated before setup_ccbs */
1690 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1691 if (res_info[i].res_type == BNA_RES_T_MEM)
1692 err = bnad_mem_alloc(bnad,
1693 &res_info[i].res_u.mem_info);
1694 else if (res_info[i].res_type == BNA_RES_T_INTR)
1695 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1696 &res_info[i].res_u.intr_info);
1703 bnad_rx_res_free(bnad, res_info);
1707 /* Timer callbacks */
1710 bnad_ioc_timeout(unsigned long data)
1712 struct bnad *bnad = (struct bnad *)data;
1713 unsigned long flags;
1715 spin_lock_irqsave(&bnad->bna_lock, flags);
1716 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1721 bnad_ioc_hb_check(unsigned long data)
1723 struct bnad *bnad = (struct bnad *)data;
1724 unsigned long flags;
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1732 bnad_iocpf_timeout(unsigned long data)
1734 struct bnad *bnad = (struct bnad *)data;
1735 unsigned long flags;
1737 spin_lock_irqsave(&bnad->bna_lock, flags);
1738 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1739 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1743 bnad_iocpf_sem_timeout(unsigned long data)
1745 struct bnad *bnad = (struct bnad *)data;
1746 unsigned long flags;
1748 spin_lock_irqsave(&bnad->bna_lock, flags);
1749 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1750 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1754 * All timer routines use bnad->bna_lock to protect against
1755 * the following race, which may occur in case of no locking:
1763 /* b) Dynamic Interrupt Moderation Timer */
1765 bnad_dim_timeout(unsigned long data)
1767 struct bnad *bnad = (struct bnad *)data;
1768 struct bnad_rx_info *rx_info;
1769 struct bnad_rx_ctrl *rx_ctrl;
1771 unsigned long flags;
1773 if (!netif_carrier_ok(bnad->netdev))
1776 spin_lock_irqsave(&bnad->bna_lock, flags);
1777 for (i = 0; i < bnad->num_rx; i++) {
1778 rx_info = &bnad->rx_info[i];
1781 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1782 rx_ctrl = &rx_info->rx_ctrl[j];
1785 bna_rx_dim_update(rx_ctrl->ccb);
1789 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1790 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1791 mod_timer(&bnad->dim_timer,
1792 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1793 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1796 /* c) Statistics Timer */
1798 bnad_stats_timeout(unsigned long data)
1800 struct bnad *bnad = (struct bnad *)data;
1801 unsigned long flags;
1803 if (!netif_running(bnad->netdev) ||
1804 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1807 spin_lock_irqsave(&bnad->bna_lock, flags);
1808 bna_hw_stats_get(&bnad->bna);
1809 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1813 * Set up timer for DIM
1814 * Called with bnad->bna_lock held
1817 bnad_dim_timer_start(struct bnad *bnad)
1819 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1820 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1821 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1822 (unsigned long)bnad);
1823 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1824 mod_timer(&bnad->dim_timer,
1825 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1830 * Set up timer for statistics
1831 * Called with mutex_lock(&bnad->conf_mutex) held
1834 bnad_stats_timer_start(struct bnad *bnad)
1836 unsigned long flags;
1838 spin_lock_irqsave(&bnad->bna_lock, flags);
1839 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1840 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1841 (unsigned long)bnad);
1842 mod_timer(&bnad->stats_timer,
1843 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1845 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1849 * Stops the stats timer
1850 * Called with mutex_lock(&bnad->conf_mutex) held
1853 bnad_stats_timer_stop(struct bnad *bnad)
1856 unsigned long flags;
1858 spin_lock_irqsave(&bnad->bna_lock, flags);
1859 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1861 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1863 del_timer_sync(&bnad->stats_timer);
1869 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1871 int i = 1; /* Index 0 has broadcast address */
1872 struct netdev_hw_addr *mc_addr;
1874 netdev_for_each_mc_addr(mc_addr, netdev) {
1875 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1881 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1883 struct bnad_rx_ctrl *rx_ctrl =
1884 container_of(napi, struct bnad_rx_ctrl, napi);
1885 struct bnad *bnad = rx_ctrl->bnad;
1888 rx_ctrl->rx_poll_ctr++;
1890 if (!netif_carrier_ok(bnad->netdev))
1893 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1898 napi_complete(napi);
1900 rx_ctrl->rx_complete++;
1903 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1908 #define BNAD_NAPI_POLL_QUOTA 64
1910 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1912 struct bnad_rx_ctrl *rx_ctrl;
1915 /* Initialize & enable NAPI */
1916 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1917 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1918 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1919 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1924 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1928 /* First disable and then clean up */
1929 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1930 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1933 /* Should be held with conf_lock held */
1935 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1937 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1938 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1939 unsigned long flags;
1944 init_completion(&bnad->bnad_completions.tx_comp);
1945 spin_lock_irqsave(&bnad->bna_lock, flags);
1946 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1947 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1948 wait_for_completion(&bnad->bnad_completions.tx_comp);
1950 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1951 bnad_tx_msix_unregister(bnad, tx_info,
1952 bnad->num_txq_per_tx);
1954 spin_lock_irqsave(&bnad->bna_lock, flags);
1955 bna_tx_destroy(tx_info->tx);
1956 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1961 bnad_tx_res_free(bnad, res_info);
1964 /* Should be held with conf_lock held */
1966 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1969 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1970 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1971 struct bna_intr_info *intr_info =
1972 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1973 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1974 static const struct bna_tx_event_cbfn tx_cbfn = {
1975 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1976 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1977 .tx_stall_cbfn = bnad_cb_tx_stall,
1978 .tx_resume_cbfn = bnad_cb_tx_resume,
1979 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1983 unsigned long flags;
1985 tx_info->tx_id = tx_id;
1987 /* Initialize the Tx object configuration */
1988 tx_config->num_txq = bnad->num_txq_per_tx;
1989 tx_config->txq_depth = bnad->txq_depth;
1990 tx_config->tx_type = BNA_TX_T_REGULAR;
1991 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1993 /* Get BNA's resource requirement for one tx object */
1994 spin_lock_irqsave(&bnad->bna_lock, flags);
1995 bna_tx_res_req(bnad->num_txq_per_tx,
1996 bnad->txq_depth, res_info);
1997 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1999 /* Fill Unmap Q memory requirements */
2000 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
2001 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
2004 /* Allocate resources */
2005 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
2009 /* Ask BNA to create one Tx object, supplying required resources */
2010 spin_lock_irqsave(&bnad->bna_lock, flags);
2011 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2013 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2020 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2021 (work_func_t)bnad_tx_cleanup);
2023 /* Register ISR for the Tx object */
2024 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2025 err = bnad_tx_msix_register(bnad, tx_info,
2026 tx_id, bnad->num_txq_per_tx);
2031 spin_lock_irqsave(&bnad->bna_lock, flags);
2033 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2038 spin_lock_irqsave(&bnad->bna_lock, flags);
2039 bna_tx_destroy(tx_info->tx);
2040 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2044 bnad_tx_res_free(bnad, res_info);
2048 /* Setup the rx config for bna_rx_create */
2049 /* bnad decides the configuration */
2051 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2053 memset(rx_config, 0, sizeof(*rx_config));
2054 rx_config->rx_type = BNA_RX_T_REGULAR;
2055 rx_config->num_paths = bnad->num_rxp_per_rx;
2056 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2058 if (bnad->num_rxp_per_rx > 1) {
2059 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2060 rx_config->rss_config.hash_type =
2061 (BFI_ENET_RSS_IPV6 |
2062 BFI_ENET_RSS_IPV6_TCP |
2064 BFI_ENET_RSS_IPV4_TCP);
2065 rx_config->rss_config.hash_mask =
2066 bnad->num_rxp_per_rx - 1;
2067 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2068 sizeof(rx_config->rss_config.toeplitz_hash_key));
2070 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2071 memset(&rx_config->rss_config, 0,
2072 sizeof(rx_config->rss_config));
2075 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2076 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2078 /* BNA_RXP_SINGLE - one data-buffer queue
2079 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2080 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2082 /* TODO: configurable param for queue type */
2083 rx_config->rxp_type = BNA_RXP_SLR;
2085 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2086 rx_config->frame_size > 4096) {
2087 /* though size_routing_enable is set in SLR,
2088 * small packets may get routed to same rxq.
2089 * set buf_size to 2048 instead of PAGE_SIZE.
2091 rx_config->q0_buf_size = 2048;
2092 /* this should be in multiples of 2 */
2093 rx_config->q0_num_vecs = 4;
2094 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2095 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2097 rx_config->q0_buf_size = rx_config->frame_size;
2098 rx_config->q0_num_vecs = 1;
2099 rx_config->q0_depth = bnad->rxq_depth;
2102 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2103 if (rx_config->rxp_type == BNA_RXP_SLR) {
2104 rx_config->q1_depth = bnad->rxq_depth;
2105 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2108 rx_config->vlan_strip_status =
2109 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2110 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2114 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2116 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2119 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2120 rx_info->rx_ctrl[i].bnad = bnad;
2123 /* Called with mutex_lock(&bnad->conf_mutex) held */
2125 bnad_reinit_rx(struct bnad *bnad)
2127 struct net_device *netdev = bnad->netdev;
2128 u32 err = 0, current_err = 0;
2129 u32 rx_id = 0, count = 0;
2130 unsigned long flags;
2132 /* destroy and create new rx objects */
2133 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2134 if (!bnad->rx_info[rx_id].rx)
2136 bnad_destroy_rx(bnad, rx_id);
2139 spin_lock_irqsave(&bnad->bna_lock, flags);
2140 bna_enet_mtu_set(&bnad->bna.enet,
2141 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2142 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2144 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2146 current_err = bnad_setup_rx(bnad, rx_id);
2147 if (current_err && !err) {
2149 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2153 /* restore rx configuration */
2154 if (bnad->rx_info[0].rx && !err) {
2155 bnad_restore_vlans(bnad, 0);
2156 bnad_enable_default_bcast(bnad);
2157 spin_lock_irqsave(&bnad->bna_lock, flags);
2158 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2159 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2160 bnad_set_rx_mode(netdev);
2166 /* Called with bnad_conf_lock() held */
2168 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2170 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2171 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2172 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2173 unsigned long flags;
2180 spin_lock_irqsave(&bnad->bna_lock, flags);
2181 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2182 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2183 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2186 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188 del_timer_sync(&bnad->dim_timer);
2191 init_completion(&bnad->bnad_completions.rx_comp);
2192 spin_lock_irqsave(&bnad->bna_lock, flags);
2193 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2194 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2195 wait_for_completion(&bnad->bnad_completions.rx_comp);
2197 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2198 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2200 bnad_napi_delete(bnad, rx_id);
2202 spin_lock_irqsave(&bnad->bna_lock, flags);
2203 bna_rx_destroy(rx_info->rx);
2207 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2209 bnad_rx_res_free(bnad, res_info);
2212 /* Called with mutex_lock(&bnad->conf_mutex) held */
2214 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2217 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2218 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2219 struct bna_intr_info *intr_info =
2220 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2221 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2222 static const struct bna_rx_event_cbfn rx_cbfn = {
2223 .rcb_setup_cbfn = NULL,
2224 .rcb_destroy_cbfn = NULL,
2225 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2226 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2227 .rx_stall_cbfn = bnad_cb_rx_stall,
2228 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2229 .rx_post_cbfn = bnad_cb_rx_post,
2232 unsigned long flags;
2234 rx_info->rx_id = rx_id;
2236 /* Initialize the Rx object configuration */
2237 bnad_init_rx_config(bnad, rx_config);
2239 /* Get BNA's resource requirement for one Rx object */
2240 spin_lock_irqsave(&bnad->bna_lock, flags);
2241 bna_rx_res_req(rx_config, res_info);
2242 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2244 /* Fill Unmap Q memory requirements */
2245 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2246 rx_config->num_paths,
2247 (rx_config->q0_depth *
2248 sizeof(struct bnad_rx_unmap)) +
2249 sizeof(struct bnad_rx_unmap_q));
2251 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2252 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2253 rx_config->num_paths,
2254 (rx_config->q1_depth *
2255 sizeof(struct bnad_rx_unmap) +
2256 sizeof(struct bnad_rx_unmap_q)));
2258 /* Allocate resource */
2259 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2263 bnad_rx_ctrl_init(bnad, rx_id);
2265 /* Ask BNA to create one Rx object, supplying required resources */
2266 spin_lock_irqsave(&bnad->bna_lock, flags);
2267 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2271 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2275 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2277 INIT_WORK(&rx_info->rx_cleanup_work,
2278 (work_func_t)(bnad_rx_cleanup));
2281 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2282 * so that IRQ handler cannot schedule NAPI at this point.
2284 bnad_napi_add(bnad, rx_id);
2286 /* Register ISR for the Rx object */
2287 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2288 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2289 rx_config->num_paths);
2294 spin_lock_irqsave(&bnad->bna_lock, flags);
2296 /* Set up Dynamic Interrupt Moderation Vector */
2297 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2298 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2300 /* Enable VLAN filtering only on the default Rx */
2301 bna_rx_vlanfilter_enable(rx);
2303 /* Start the DIM timer */
2304 bnad_dim_timer_start(bnad);
2308 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2313 bnad_destroy_rx(bnad, rx_id);
2317 /* Called with conf_lock & bnad->bna_lock held */
2319 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2321 struct bnad_tx_info *tx_info;
2323 tx_info = &bnad->tx_info[0];
2327 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2330 /* Called with conf_lock & bnad->bna_lock held */
2332 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2334 struct bnad_rx_info *rx_info;
2337 for (i = 0; i < bnad->num_rx; i++) {
2338 rx_info = &bnad->rx_info[i];
2341 bna_rx_coalescing_timeo_set(rx_info->rx,
2342 bnad->rx_coalescing_timeo);
2347 * Called with bnad->bna_lock held
2350 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2354 if (!is_valid_ether_addr(mac_addr))
2355 return -EADDRNOTAVAIL;
2357 /* If datapath is down, pretend everything went through */
2358 if (!bnad->rx_info[0].rx)
2361 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2362 if (ret != BNA_CB_SUCCESS)
2363 return -EADDRNOTAVAIL;
2368 /* Should be called with conf_lock held */
2370 bnad_enable_default_bcast(struct bnad *bnad)
2372 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2374 unsigned long flags;
2376 init_completion(&bnad->bnad_completions.mcast_comp);
2378 spin_lock_irqsave(&bnad->bna_lock, flags);
2379 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2380 bnad_cb_rx_mcast_add);
2381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2383 if (ret == BNA_CB_SUCCESS)
2384 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2388 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2394 /* Called with mutex_lock(&bnad->conf_mutex) held */
2396 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2399 unsigned long flags;
2401 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2402 spin_lock_irqsave(&bnad->bna_lock, flags);
2403 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2404 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2408 /* Statistics utilities */
2410 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2414 for (i = 0; i < bnad->num_rx; i++) {
2415 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2416 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2417 stats->rx_packets += bnad->rx_info[i].
2418 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2419 stats->rx_bytes += bnad->rx_info[i].
2420 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2421 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2422 bnad->rx_info[i].rx_ctrl[j].ccb->
2424 stats->rx_packets +=
2425 bnad->rx_info[i].rx_ctrl[j].
2426 ccb->rcb[1]->rxq->rx_packets;
2428 bnad->rx_info[i].rx_ctrl[j].
2429 ccb->rcb[1]->rxq->rx_bytes;
2434 for (i = 0; i < bnad->num_tx; i++) {
2435 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2436 if (bnad->tx_info[i].tcb[j]) {
2437 stats->tx_packets +=
2438 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2440 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2447 * Must be called with the bna_lock held.
2450 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2452 struct bfi_enet_stats_mac *mac_stats;
2456 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2458 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2459 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2460 mac_stats->rx_undersize;
2461 stats->tx_errors = mac_stats->tx_fcs_error +
2462 mac_stats->tx_undersize;
2463 stats->rx_dropped = mac_stats->rx_drop;
2464 stats->tx_dropped = mac_stats->tx_drop;
2465 stats->multicast = mac_stats->rx_multicast;
2466 stats->collisions = mac_stats->tx_total_collision;
2468 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2470 /* receive ring buffer overflow ?? */
2472 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2473 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2474 /* recv'r fifo overrun */
2475 bmap = bna_rx_rid_mask(&bnad->bna);
2476 for (i = 0; bmap; i++) {
2478 stats->rx_fifo_errors +=
2479 bnad->stats.bna_stats->
2480 hw_stats.rxf_stats[i].frame_drops;
2488 bnad_mbox_irq_sync(struct bnad *bnad)
2491 unsigned long flags;
2493 spin_lock_irqsave(&bnad->bna_lock, flags);
2494 if (bnad->cfg_flags & BNAD_CF_MSIX)
2495 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2497 irq = bnad->pcidev->irq;
2498 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2500 synchronize_irq(irq);
2503 /* Utility used by bnad_start_xmit, for doing TSO */
2505 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2509 err = skb_cow_head(skb, 0);
2511 BNAD_UPDATE_CTR(bnad, tso_err);
2516 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2517 * excluding the length field.
2519 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2520 struct iphdr *iph = ip_hdr(skb);
2522 /* Do we really need these? */
2526 tcp_hdr(skb)->check =
2527 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2529 BNAD_UPDATE_CTR(bnad, tso4);
2531 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2533 ipv6h->payload_len = 0;
2534 tcp_hdr(skb)->check =
2535 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2537 BNAD_UPDATE_CTR(bnad, tso6);
2544 * Initialize Q numbers depending on Rx Paths
2545 * Called with bnad->bna_lock held, because of cfg_flags
2549 bnad_q_num_init(struct bnad *bnad)
2553 rxps = min((uint)num_online_cpus(),
2554 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2556 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2557 rxps = 1; /* INTx */
2561 bnad->num_rxp_per_rx = rxps;
2562 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2566 * Adjusts the Q numbers, given a number of msix vectors
2567 * Give preference to RSS as opposed to Tx priority Queues,
2568 * in such a case, just use 1 Tx Q
2569 * Called with bnad->bna_lock held b'cos of cfg_flags access
2572 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2574 bnad->num_txq_per_tx = 1;
2575 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2576 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2577 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2578 bnad->num_rxp_per_rx = msix_vectors -
2579 (bnad->num_tx * bnad->num_txq_per_tx) -
2580 BNAD_MAILBOX_MSIX_VECTORS;
2582 bnad->num_rxp_per_rx = 1;
2585 /* Enable / disable ioceth */
2587 bnad_ioceth_disable(struct bnad *bnad)
2589 unsigned long flags;
2592 spin_lock_irqsave(&bnad->bna_lock, flags);
2593 init_completion(&bnad->bnad_completions.ioc_comp);
2594 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2595 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2597 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2598 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2600 err = bnad->bnad_completions.ioc_comp_status;
2605 bnad_ioceth_enable(struct bnad *bnad)
2608 unsigned long flags;
2610 spin_lock_irqsave(&bnad->bna_lock, flags);
2611 init_completion(&bnad->bnad_completions.ioc_comp);
2612 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2613 bna_ioceth_enable(&bnad->bna.ioceth);
2614 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2616 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2617 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2619 err = bnad->bnad_completions.ioc_comp_status;
2624 /* Free BNA resources */
2626 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2631 for (i = 0; i < res_val_max; i++)
2632 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2635 /* Allocates memory and interrupt resources for BNA */
2637 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2642 for (i = 0; i < res_val_max; i++) {
2643 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2650 bnad_res_free(bnad, res_info, res_val_max);
2654 /* Interrupt enable / disable */
2656 bnad_enable_msix(struct bnad *bnad)
2659 unsigned long flags;
2661 spin_lock_irqsave(&bnad->bna_lock, flags);
2662 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2663 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2666 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2668 if (bnad->msix_table)
2672 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2674 if (!bnad->msix_table)
2677 for (i = 0; i < bnad->msix_num; i++)
2678 bnad->msix_table[i].entry = i;
2680 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2684 } else if (ret < bnad->msix_num) {
2685 dev_warn(&bnad->pcidev->dev,
2686 "%d MSI-X vectors allocated < %d requested\n",
2687 ret, bnad->msix_num);
2689 spin_lock_irqsave(&bnad->bna_lock, flags);
2690 /* ret = #of vectors that we got */
2691 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2692 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2693 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2695 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2696 BNAD_MAILBOX_MSIX_VECTORS;
2698 if (bnad->msix_num > ret) {
2699 pci_disable_msix(bnad->pcidev);
2704 pci_intx(bnad->pcidev, 0);
2709 dev_warn(&bnad->pcidev->dev,
2710 "MSI-X enable failed - operating in INTx mode\n");
2712 kfree(bnad->msix_table);
2713 bnad->msix_table = NULL;
2715 spin_lock_irqsave(&bnad->bna_lock, flags);
2716 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2717 bnad_q_num_init(bnad);
2718 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2722 bnad_disable_msix(struct bnad *bnad)
2725 unsigned long flags;
2727 spin_lock_irqsave(&bnad->bna_lock, flags);
2728 cfg_flags = bnad->cfg_flags;
2729 if (bnad->cfg_flags & BNAD_CF_MSIX)
2730 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2731 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2733 if (cfg_flags & BNAD_CF_MSIX) {
2734 pci_disable_msix(bnad->pcidev);
2735 kfree(bnad->msix_table);
2736 bnad->msix_table = NULL;
2740 /* Netdev entry points */
2742 bnad_open(struct net_device *netdev)
2745 struct bnad *bnad = netdev_priv(netdev);
2746 struct bna_pause_config pause_config;
2747 unsigned long flags;
2749 mutex_lock(&bnad->conf_mutex);
2752 err = bnad_setup_tx(bnad, 0);
2757 err = bnad_setup_rx(bnad, 0);
2762 pause_config.tx_pause = 0;
2763 pause_config.rx_pause = 0;
2765 spin_lock_irqsave(&bnad->bna_lock, flags);
2766 bna_enet_mtu_set(&bnad->bna.enet,
2767 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2768 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2769 bna_enet_enable(&bnad->bna.enet);
2770 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2772 /* Enable broadcast */
2773 bnad_enable_default_bcast(bnad);
2775 /* Restore VLANs, if any */
2776 bnad_restore_vlans(bnad, 0);
2778 /* Set the UCAST address */
2779 spin_lock_irqsave(&bnad->bna_lock, flags);
2780 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2781 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2783 /* Start the stats timer */
2784 bnad_stats_timer_start(bnad);
2786 mutex_unlock(&bnad->conf_mutex);
2791 bnad_destroy_tx(bnad, 0);
2794 mutex_unlock(&bnad->conf_mutex);
2799 bnad_stop(struct net_device *netdev)
2801 struct bnad *bnad = netdev_priv(netdev);
2802 unsigned long flags;
2804 mutex_lock(&bnad->conf_mutex);
2806 /* Stop the stats timer */
2807 bnad_stats_timer_stop(bnad);
2809 init_completion(&bnad->bnad_completions.enet_comp);
2811 spin_lock_irqsave(&bnad->bna_lock, flags);
2812 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2813 bnad_cb_enet_disabled);
2814 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2816 wait_for_completion(&bnad->bnad_completions.enet_comp);
2818 bnad_destroy_tx(bnad, 0);
2819 bnad_destroy_rx(bnad, 0);
2821 /* Synchronize mailbox IRQ */
2822 bnad_mbox_irq_sync(bnad);
2824 mutex_unlock(&bnad->conf_mutex);
2830 /* Returns 0 for success */
2832 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2833 struct sk_buff *skb, struct bna_txq_entry *txqent)
2839 if (skb_vlan_tag_present(skb)) {
2840 vlan_tag = (u16)skb_vlan_tag_get(skb);
2841 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2843 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2844 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2845 | (vlan_tag & 0x1fff);
2846 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2848 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2850 if (skb_is_gso(skb)) {
2851 gso_size = skb_shinfo(skb)->gso_size;
2852 if (unlikely(gso_size > bnad->netdev->mtu)) {
2853 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2856 if (unlikely((gso_size + skb_transport_offset(skb) +
2857 tcp_hdrlen(skb)) >= skb->len)) {
2858 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2859 txqent->hdr.wi.lso_mss = 0;
2860 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2862 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2863 txqent->hdr.wi.lso_mss = htons(gso_size);
2866 if (bnad_tso_prepare(bnad, skb)) {
2867 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2871 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2872 txqent->hdr.wi.l4_hdr_size_n_offset =
2873 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2874 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2876 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2877 txqent->hdr.wi.lso_mss = 0;
2879 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2880 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2884 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2885 __be16 net_proto = vlan_get_protocol(skb);
2888 if (net_proto == htons(ETH_P_IP))
2889 proto = ip_hdr(skb)->protocol;
2890 #ifdef NETIF_F_IPV6_CSUM
2891 else if (net_proto == htons(ETH_P_IPV6)) {
2892 /* nexthdr may not be TCP immediately. */
2893 proto = ipv6_hdr(skb)->nexthdr;
2896 if (proto == IPPROTO_TCP) {
2897 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2898 txqent->hdr.wi.l4_hdr_size_n_offset =
2899 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2900 (0, skb_transport_offset(skb)));
2902 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2904 if (unlikely(skb_headlen(skb) <
2905 skb_transport_offset(skb) +
2907 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2910 } else if (proto == IPPROTO_UDP) {
2911 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2912 txqent->hdr.wi.l4_hdr_size_n_offset =
2913 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2914 (0, skb_transport_offset(skb)));
2916 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2917 if (unlikely(skb_headlen(skb) <
2918 skb_transport_offset(skb) +
2919 sizeof(struct udphdr))) {
2920 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2925 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2929 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2932 txqent->hdr.wi.flags = htons(flags);
2933 txqent->hdr.wi.frame_length = htonl(skb->len);
2939 * bnad_start_xmit : Netdev entry point for Transmit
2940 * Called under lock held by net_device
2943 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2945 struct bnad *bnad = netdev_priv(netdev);
2947 struct bna_tcb *tcb = NULL;
2948 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2949 u32 prod, q_depth, vect_id;
2950 u32 wis, vectors, len;
2952 dma_addr_t dma_addr;
2953 struct bna_txq_entry *txqent;
2955 len = skb_headlen(skb);
2957 /* Sanity checks for the skb */
2959 if (unlikely(skb->len <= ETH_HLEN)) {
2960 dev_kfree_skb_any(skb);
2961 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2962 return NETDEV_TX_OK;
2964 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2965 dev_kfree_skb_any(skb);
2966 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2967 return NETDEV_TX_OK;
2969 if (unlikely(len == 0)) {
2970 dev_kfree_skb_any(skb);
2971 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2972 return NETDEV_TX_OK;
2975 tcb = bnad->tx_info[0].tcb[txq_id];
2978 * Takes care of the Tx that is scheduled between clearing the flag
2979 * and the netif_tx_stop_all_queues() call.
2981 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2982 dev_kfree_skb_any(skb);
2983 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2984 return NETDEV_TX_OK;
2987 q_depth = tcb->q_depth;
2988 prod = tcb->producer_index;
2989 unmap_q = tcb->unmap_q;
2991 vectors = 1 + skb_shinfo(skb)->nr_frags;
2992 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2994 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2995 dev_kfree_skb_any(skb);
2996 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2997 return NETDEV_TX_OK;
3000 /* Check for available TxQ resources */
3001 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3002 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
3003 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
3005 sent = bnad_txcmpl_process(bnad, tcb);
3006 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3007 bna_ib_ack(tcb->i_dbell, sent);
3008 smp_mb__before_atomic();
3009 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
3011 netif_stop_queue(netdev);
3012 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3017 * Check again to deal with race condition between
3018 * netif_stop_queue here, and netif_wake_queue in
3019 * interrupt handler which is not inside netif tx lock.
3021 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3022 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3023 return NETDEV_TX_BUSY;
3025 netif_wake_queue(netdev);
3026 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3030 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3031 head_unmap = &unmap_q[prod];
3033 /* Program the opcode, flags, frame_len, num_vectors in WI */
3034 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3035 dev_kfree_skb_any(skb);
3036 return NETDEV_TX_OK;
3038 txqent->hdr.wi.reserved = 0;
3039 txqent->hdr.wi.num_vectors = vectors;
3041 head_unmap->skb = skb;
3042 head_unmap->nvecs = 0;
3044 /* Program the vectors */
3046 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3047 len, DMA_TO_DEVICE);
3048 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3049 dev_kfree_skb_any(skb);
3050 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3051 return NETDEV_TX_OK;
3053 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3054 txqent->vector[0].length = htons(len);
3055 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3056 head_unmap->nvecs++;
3058 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3059 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3060 u32 size = skb_frag_size(frag);
3062 if (unlikely(size == 0)) {
3063 /* Undo the changes starting at tcb->producer_index */
3064 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3065 tcb->producer_index);
3066 dev_kfree_skb_any(skb);
3067 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3068 return NETDEV_TX_OK;
3074 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3076 BNA_QE_INDX_INC(prod, q_depth);
3077 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3078 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3079 unmap = &unmap_q[prod];
3082 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3083 0, size, DMA_TO_DEVICE);
3084 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3085 /* Undo the changes starting at tcb->producer_index */
3086 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3087 tcb->producer_index);
3088 dev_kfree_skb_any(skb);
3089 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3090 return NETDEV_TX_OK;
3093 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3094 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3095 txqent->vector[vect_id].length = htons(size);
3096 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3098 head_unmap->nvecs++;
3101 if (unlikely(len != skb->len)) {
3102 /* Undo the changes starting at tcb->producer_index */
3103 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3104 dev_kfree_skb_any(skb);
3105 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3106 return NETDEV_TX_OK;
3109 BNA_QE_INDX_INC(prod, q_depth);
3110 tcb->producer_index = prod;
3114 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3115 return NETDEV_TX_OK;
3117 skb_tx_timestamp(skb);
3119 bna_txq_prod_indx_doorbell(tcb);
3122 return NETDEV_TX_OK;
3126 * Used spin_lock to synchronize reading of stats structures, which
3127 * is written by BNA under the same lock.
3129 static struct rtnl_link_stats64 *
3130 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3132 struct bnad *bnad = netdev_priv(netdev);
3133 unsigned long flags;
3135 spin_lock_irqsave(&bnad->bna_lock, flags);
3137 bnad_netdev_qstats_fill(bnad, stats);
3138 bnad_netdev_hwstats_fill(bnad, stats);
3140 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3146 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3148 struct net_device *netdev = bnad->netdev;
3149 int uc_count = netdev_uc_count(netdev);
3150 enum bna_cb_status ret;
3152 struct netdev_hw_addr *ha;
3155 if (netdev_uc_empty(bnad->netdev)) {
3156 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3160 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3163 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3164 if (mac_list == NULL)
3168 netdev_for_each_uc_addr(ha, netdev) {
3169 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3173 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3176 if (ret != BNA_CB_SUCCESS)
3181 /* ucast packets not in UCAM are routed to default function */
3183 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3184 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3188 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3190 struct net_device *netdev = bnad->netdev;
3191 int mc_count = netdev_mc_count(netdev);
3192 enum bna_cb_status ret;
3195 if (netdev->flags & IFF_ALLMULTI)
3198 if (netdev_mc_empty(netdev))
3201 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3204 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3206 if (mac_list == NULL)
3209 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3211 /* copy rest of the MCAST addresses */
3212 bnad_netdev_mc_list_get(netdev, mac_list);
3213 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3216 if (ret != BNA_CB_SUCCESS)
3222 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3223 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3227 bnad_set_rx_mode(struct net_device *netdev)
3229 struct bnad *bnad = netdev_priv(netdev);
3230 enum bna_rxmode new_mode, mode_mask;
3231 unsigned long flags;
3233 spin_lock_irqsave(&bnad->bna_lock, flags);
3235 if (bnad->rx_info[0].rx == NULL) {
3236 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3240 /* clear bnad flags to update it with new settings */
3241 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3245 if (netdev->flags & IFF_PROMISC) {
3246 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3247 bnad->cfg_flags |= BNAD_CF_PROMISC;
3249 bnad_set_rx_mcast_fltr(bnad);
3251 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3252 new_mode |= BNA_RXMODE_ALLMULTI;
3254 bnad_set_rx_ucast_fltr(bnad);
3256 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3257 new_mode |= BNA_RXMODE_DEFAULT;
3260 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3261 BNA_RXMODE_ALLMULTI;
3262 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3264 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3268 * bna_lock is used to sync writes to netdev->addr
3269 * conf_lock cannot be used since this call may be made
3270 * in a non-blocking context.
3273 bnad_set_mac_address(struct net_device *netdev, void *addr)
3276 struct bnad *bnad = netdev_priv(netdev);
3277 struct sockaddr *sa = (struct sockaddr *)addr;
3278 unsigned long flags;
3280 spin_lock_irqsave(&bnad->bna_lock, flags);
3282 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3284 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3286 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3292 bnad_mtu_set(struct bnad *bnad, int frame_size)
3294 unsigned long flags;
3296 init_completion(&bnad->bnad_completions.mtu_comp);
3298 spin_lock_irqsave(&bnad->bna_lock, flags);
3299 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3300 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3302 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3304 return bnad->bnad_completions.mtu_comp_status;
3308 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3311 struct bnad *bnad = netdev_priv(netdev);
3312 u32 rx_count = 0, frame, new_frame;
3314 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3317 mutex_lock(&bnad->conf_mutex);
3320 netdev->mtu = new_mtu;
3322 frame = BNAD_FRAME_SIZE(mtu);
3323 new_frame = BNAD_FRAME_SIZE(new_mtu);
3325 /* check if multi-buffer needs to be enabled */
3326 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3327 netif_running(bnad->netdev)) {
3328 /* only when transition is over 4K */
3329 if ((frame <= 4096 && new_frame > 4096) ||
3330 (frame > 4096 && new_frame <= 4096))
3331 rx_count = bnad_reinit_rx(bnad);
3334 /* rx_count > 0 - new rx created
3335 * - Linux set err = 0 and return
3337 err = bnad_mtu_set(bnad, new_frame);
3341 mutex_unlock(&bnad->conf_mutex);
3346 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3348 struct bnad *bnad = netdev_priv(netdev);
3349 unsigned long flags;
3351 if (!bnad->rx_info[0].rx)
3354 mutex_lock(&bnad->conf_mutex);
3356 spin_lock_irqsave(&bnad->bna_lock, flags);
3357 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3358 set_bit(vid, bnad->active_vlans);
3359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3361 mutex_unlock(&bnad->conf_mutex);
3367 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3369 struct bnad *bnad = netdev_priv(netdev);
3370 unsigned long flags;
3372 if (!bnad->rx_info[0].rx)
3375 mutex_lock(&bnad->conf_mutex);
3377 spin_lock_irqsave(&bnad->bna_lock, flags);
3378 clear_bit(vid, bnad->active_vlans);
3379 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3382 mutex_unlock(&bnad->conf_mutex);
3387 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3389 struct bnad *bnad = netdev_priv(dev);
3390 netdev_features_t changed = features ^ dev->features;
3392 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3393 unsigned long flags;
3395 spin_lock_irqsave(&bnad->bna_lock, flags);
3397 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3398 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3400 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3402 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3408 #ifdef CONFIG_NET_POLL_CONTROLLER
3410 bnad_netpoll(struct net_device *netdev)
3412 struct bnad *bnad = netdev_priv(netdev);
3413 struct bnad_rx_info *rx_info;
3414 struct bnad_rx_ctrl *rx_ctrl;
3418 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3419 bna_intx_disable(&bnad->bna, curr_mask);
3420 bnad_isr(bnad->pcidev->irq, netdev);
3421 bna_intx_enable(&bnad->bna, curr_mask);
3424 * Tx processing may happen in sending context, so no need
3425 * to explicitly process completions here
3429 for (i = 0; i < bnad->num_rx; i++) {
3430 rx_info = &bnad->rx_info[i];
3433 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3434 rx_ctrl = &rx_info->rx_ctrl[j];
3436 bnad_netif_rx_schedule_poll(bnad,
3444 static const struct net_device_ops bnad_netdev_ops = {
3445 .ndo_open = bnad_open,
3446 .ndo_stop = bnad_stop,
3447 .ndo_start_xmit = bnad_start_xmit,
3448 .ndo_get_stats64 = bnad_get_stats64,
3449 .ndo_set_rx_mode = bnad_set_rx_mode,
3450 .ndo_validate_addr = eth_validate_addr,
3451 .ndo_set_mac_address = bnad_set_mac_address,
3452 .ndo_change_mtu = bnad_change_mtu,
3453 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3454 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3455 .ndo_set_features = bnad_set_features,
3456 #ifdef CONFIG_NET_POLL_CONTROLLER
3457 .ndo_poll_controller = bnad_netpoll
3462 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3464 struct net_device *netdev = bnad->netdev;
3466 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3467 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3468 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3469 NETIF_F_HW_VLAN_CTAG_RX;
3471 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3472 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3473 NETIF_F_TSO | NETIF_F_TSO6;
3475 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3478 netdev->features |= NETIF_F_HIGHDMA;
3480 netdev->mem_start = bnad->mmio_start;
3481 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3483 netdev->netdev_ops = &bnad_netdev_ops;
3484 bnad_set_ethtool_ops(netdev);
3488 * 1. Initialize the bnad structure
3489 * 2. Setup netdev pointer in pci_dev
3490 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3491 * 4. Initialize work queue.
3494 bnad_init(struct bnad *bnad,
3495 struct pci_dev *pdev, struct net_device *netdev)
3497 unsigned long flags;
3499 SET_NETDEV_DEV(netdev, &pdev->dev);
3500 pci_set_drvdata(pdev, netdev);
3502 bnad->netdev = netdev;
3503 bnad->pcidev = pdev;
3504 bnad->mmio_start = pci_resource_start(pdev, 0);
3505 bnad->mmio_len = pci_resource_len(pdev, 0);
3506 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3508 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3511 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3512 (unsigned long long) bnad->mmio_len);
3514 spin_lock_irqsave(&bnad->bna_lock, flags);
3515 if (!bnad_msix_disable)
3516 bnad->cfg_flags = BNAD_CF_MSIX;
3518 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3520 bnad_q_num_init(bnad);
3521 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3523 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3524 (bnad->num_rx * bnad->num_rxp_per_rx) +
3525 BNAD_MAILBOX_MSIX_VECTORS;
3527 bnad->txq_depth = BNAD_TXQ_DEPTH;
3528 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3530 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3531 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3533 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3534 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3535 if (!bnad->work_q) {
3536 iounmap(bnad->bar0);
3544 * Must be called after bnad_pci_uninit()
3545 * so that iounmap() and pci_set_drvdata(NULL)
3546 * happens only after PCI uninitialization.
3549 bnad_uninit(struct bnad *bnad)
3552 flush_workqueue(bnad->work_q);
3553 destroy_workqueue(bnad->work_q);
3554 bnad->work_q = NULL;
3558 iounmap(bnad->bar0);
3563 a) Per ioceth mutes used for serializing configuration
3564 changes from OS interface
3565 b) spin lock used to protect bna state machine
3568 bnad_lock_init(struct bnad *bnad)
3570 spin_lock_init(&bnad->bna_lock);
3571 mutex_init(&bnad->conf_mutex);
3572 mutex_init(&bnad_list_mutex);
3576 bnad_lock_uninit(struct bnad *bnad)
3578 mutex_destroy(&bnad->conf_mutex);
3579 mutex_destroy(&bnad_list_mutex);
3582 /* PCI Initialization */
3584 bnad_pci_init(struct bnad *bnad,
3585 struct pci_dev *pdev, bool *using_dac)
3589 err = pci_enable_device(pdev);
3592 err = pci_request_regions(pdev, BNAD_NAME);
3594 goto disable_device;
3595 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3598 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3600 goto release_regions;
3603 pci_set_master(pdev);
3607 pci_release_regions(pdev);
3609 pci_disable_device(pdev);
3615 bnad_pci_uninit(struct pci_dev *pdev)
3617 pci_release_regions(pdev);
3618 pci_disable_device(pdev);
3622 bnad_pci_probe(struct pci_dev *pdev,
3623 const struct pci_device_id *pcidev_id)
3629 struct net_device *netdev;
3630 struct bfa_pcidev pcidev_info;
3631 unsigned long flags;
3633 mutex_lock(&bnad_fwimg_mutex);
3634 if (!cna_get_firmware_buf(pdev)) {
3635 mutex_unlock(&bnad_fwimg_mutex);
3636 dev_err(&pdev->dev, "failed to load firmware image!\n");
3639 mutex_unlock(&bnad_fwimg_mutex);
3642 * Allocates sizeof(struct net_device + struct bnad)
3643 * bnad = netdev->priv
3645 netdev = alloc_etherdev(sizeof(struct bnad));
3650 bnad = netdev_priv(netdev);
3651 bnad_lock_init(bnad);
3652 bnad_add_to_list(bnad);
3653 bnad->id = atomic_inc_return(&bna_id) - 1;
3655 mutex_lock(&bnad->conf_mutex);
3657 * PCI initialization
3658 * Output : using_dac = 1 for 64 bit DMA
3659 * = 0 for 32 bit DMA
3662 err = bnad_pci_init(bnad, pdev, &using_dac);
3667 * Initialize bnad structure
3668 * Setup relation between pci_dev & netdev
3670 err = bnad_init(bnad, pdev, netdev);
3674 /* Initialize netdev structure, set up ethtool ops */
3675 bnad_netdev_init(bnad, using_dac);
3677 /* Set link to down state */
3678 netif_carrier_off(netdev);
3680 /* Setup the debugfs node for this bfad */
3681 if (bna_debugfs_enable)
3682 bnad_debugfs_init(bnad);
3684 /* Get resource requirement form bna */
3685 spin_lock_irqsave(&bnad->bna_lock, flags);
3686 bna_res_req(&bnad->res_info[0]);
3687 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3689 /* Allocate resources from bna */
3690 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3696 /* Setup pcidev_info for bna_init() */
3697 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3698 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3699 pcidev_info.device_id = bnad->pcidev->device;
3700 pcidev_info.pci_bar_kva = bnad->bar0;
3702 spin_lock_irqsave(&bnad->bna_lock, flags);
3703 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3706 bnad->stats.bna_stats = &bna->stats;
3708 bnad_enable_msix(bnad);
3709 err = bnad_mbox_irq_alloc(bnad);
3714 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3715 (unsigned long)bnad);
3716 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3717 (unsigned long)bnad);
3718 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3719 (unsigned long)bnad);
3720 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3721 (unsigned long)bnad);
3725 * If the call back comes with error, we bail out.
3726 * This is a catastrophic error.
3728 err = bnad_ioceth_enable(bnad);
3730 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3734 spin_lock_irqsave(&bnad->bna_lock, flags);
3735 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3736 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3737 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3738 bna_attr(bna)->num_rxp - 1);
3739 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3740 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3743 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3745 goto disable_ioceth;
3747 spin_lock_irqsave(&bnad->bna_lock, flags);
3748 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3749 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3751 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3754 goto disable_ioceth;
3757 spin_lock_irqsave(&bnad->bna_lock, flags);
3758 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3759 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3761 /* Get the burnt-in mac */
3762 spin_lock_irqsave(&bnad->bna_lock, flags);
3763 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3764 bnad_set_netdev_perm_addr(bnad);
3765 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3767 mutex_unlock(&bnad->conf_mutex);
3769 /* Finally, reguister with net_device layer */
3770 err = register_netdev(netdev);
3772 dev_err(&pdev->dev, "registering net device failed\n");
3775 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3780 mutex_unlock(&bnad->conf_mutex);
3784 mutex_lock(&bnad->conf_mutex);
3785 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3787 bnad_ioceth_disable(bnad);
3788 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3789 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3790 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3791 spin_lock_irqsave(&bnad->bna_lock, flags);
3793 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3794 bnad_mbox_irq_free(bnad);
3795 bnad_disable_msix(bnad);
3797 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3799 /* Remove the debugfs node for this bnad */
3800 kfree(bnad->regdata);
3801 bnad_debugfs_uninit(bnad);
3804 bnad_pci_uninit(pdev);
3806 mutex_unlock(&bnad->conf_mutex);
3807 bnad_remove_from_list(bnad);
3808 bnad_lock_uninit(bnad);
3809 free_netdev(netdev);
3814 bnad_pci_remove(struct pci_dev *pdev)
3816 struct net_device *netdev = pci_get_drvdata(pdev);
3819 unsigned long flags;
3824 bnad = netdev_priv(netdev);
3827 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3828 unregister_netdev(netdev);
3830 mutex_lock(&bnad->conf_mutex);
3831 bnad_ioceth_disable(bnad);
3832 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3833 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3834 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3835 spin_lock_irqsave(&bnad->bna_lock, flags);
3837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3839 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3840 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3841 bnad_mbox_irq_free(bnad);
3842 bnad_disable_msix(bnad);
3843 bnad_pci_uninit(pdev);
3844 mutex_unlock(&bnad->conf_mutex);
3845 bnad_remove_from_list(bnad);
3846 bnad_lock_uninit(bnad);
3847 /* Remove the debugfs node for this bnad */
3848 kfree(bnad->regdata);
3849 bnad_debugfs_uninit(bnad);
3851 free_netdev(netdev);
3854 static const struct pci_device_id bnad_pci_id_table[] = {
3856 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3857 PCI_DEVICE_ID_BROCADE_CT),
3858 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3859 .class_mask = 0xffff00
3862 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3863 BFA_PCI_DEVICE_ID_CT2),
3864 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3865 .class_mask = 0xffff00
3870 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3872 static struct pci_driver bnad_pci_driver = {
3874 .id_table = bnad_pci_id_table,
3875 .probe = bnad_pci_probe,
3876 .remove = bnad_pci_remove,
3880 bnad_module_init(void)
3884 pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3887 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3889 err = pci_register_driver(&bnad_pci_driver);
3891 pr_err("bna: PCI driver registration failed err=%d\n", err);
3899 bnad_module_exit(void)
3901 pci_unregister_driver(&bnad_pci_driver);
3902 release_firmware(bfi_fw);
3905 module_init(bnad_module_init);
3906 module_exit(bnad_module_exit);
3908 MODULE_AUTHOR("Brocade");
3909 MODULE_LICENSE("GPL");
3910 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3911 MODULE_VERSION(BNAD_VERSION);
3912 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3913 MODULE_FIRMWARE(CNA_FW_FILE_CT2);