2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
35 static DEFINE_MUTEX(bnad_fwimg_mutex);
40 static uint bnad_msix_disable;
41 module_param(bnad_msix_disable, uint, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
44 static uint bnad_ioc_auto_recover = 1;
45 module_param(bnad_ioc_auto_recover, uint, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
48 static uint bna_debugfs_enable = 1;
49 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51 " Range[false:0|true:1]");
56 static u32 bnad_rxqs_per_cq = 2;
57 static atomic_t bna_id;
58 static const u8 bnad_bcast_addr[] __aligned(2) =
59 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
78 * Reinitialize completions in CQ, once Rx is taken down
81 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
83 struct bna_cq_entry *cmpl;
86 for (i = 0; i < ccb->q_depth; i++) {
87 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
92 /* Tx Datapath functions */
95 /* Caller should ensure that the entry at unmap_q[index] is valid */
97 bnad_tx_buff_unmap(struct bnad *bnad,
98 struct bnad_tx_unmap *unmap_q,
99 u32 q_depth, u32 index)
101 struct bnad_tx_unmap *unmap;
105 unmap = &unmap_q[index];
106 nvecs = unmap->nvecs;
111 dma_unmap_single(&bnad->pcidev->dev,
112 dma_unmap_addr(&unmap->vectors[0], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
120 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
122 BNA_QE_INDX_INC(index, q_depth);
123 unmap = &unmap_q[index];
126 dma_unmap_page(&bnad->pcidev->dev,
127 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
128 dma_unmap_len(&unmap->vectors[vector], dma_len),
130 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
134 BNA_QE_INDX_INC(index, q_depth);
140 * Frees all pending Tx Bufs
141 * At this point no activity is expected on the Q,
142 * so DMA unmap & freeing is fine.
145 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
147 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
151 for (i = 0; i < tcb->q_depth; i++) {
152 skb = unmap_q[i].skb;
155 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
157 dev_kfree_skb_any(skb);
162 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
163 * Can be called in a) Interrupt context
167 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
169 u32 sent_packets = 0, sent_bytes = 0;
170 u32 wis, unmap_wis, hw_cons, cons, q_depth;
171 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
172 struct bnad_tx_unmap *unmap;
175 /* Just return if TX is stopped */
176 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
179 hw_cons = *(tcb->hw_consumer_index);
180 cons = tcb->consumer_index;
181 q_depth = tcb->q_depth;
183 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
184 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
187 unmap = &unmap_q[cons];
192 sent_bytes += skb->len;
194 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
197 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
198 dev_kfree_skb_any(skb);
201 /* Update consumer pointers. */
202 tcb->consumer_index = hw_cons;
204 tcb->txq->tx_packets += sent_packets;
205 tcb->txq->tx_bytes += sent_bytes;
211 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
213 struct net_device *netdev = bnad->netdev;
216 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
219 sent = bnad_txcmpl_process(bnad, tcb);
221 if (netif_queue_stopped(netdev) &&
222 netif_carrier_ok(netdev) &&
223 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
224 BNAD_NETIF_WAKE_THRESHOLD) {
225 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
226 netif_wake_queue(netdev);
227 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
232 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
233 bna_ib_ack(tcb->i_dbell, sent);
235 smp_mb__before_atomic();
236 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
241 /* MSIX Tx Completion Handler */
243 bnad_msix_tx(int irq, void *data)
245 struct bna_tcb *tcb = (struct bna_tcb *)data;
246 struct bnad *bnad = tcb->bnad;
248 bnad_tx_complete(bnad, tcb);
254 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
256 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
258 unmap_q->reuse_pi = -1;
259 unmap_q->alloc_order = -1;
260 unmap_q->map_size = 0;
261 unmap_q->type = BNAD_RXBUF_NONE;
264 /* Default is page-based allocation. Multi-buffer support - TBD */
266 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
268 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
271 bnad_rxq_alloc_uninit(bnad, rcb);
273 order = get_order(rcb->rxq->buffer_size);
275 unmap_q->type = BNAD_RXBUF_PAGE;
277 if (bna_is_small_rxq(rcb->id)) {
278 unmap_q->alloc_order = 0;
279 unmap_q->map_size = rcb->rxq->buffer_size;
281 if (rcb->rxq->multi_buffer) {
282 unmap_q->alloc_order = 0;
283 unmap_q->map_size = rcb->rxq->buffer_size;
284 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
286 unmap_q->alloc_order = order;
288 (rcb->rxq->buffer_size > 2048) ?
289 PAGE_SIZE << order : 2048;
293 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
299 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
304 dma_unmap_page(&bnad->pcidev->dev,
305 dma_unmap_addr(&unmap->vector, dma_addr),
306 unmap->vector.len, DMA_FROM_DEVICE);
307 put_page(unmap->page);
309 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
310 unmap->vector.len = 0;
314 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
319 dma_unmap_single(&bnad->pcidev->dev,
320 dma_unmap_addr(&unmap->vector, dma_addr),
321 unmap->vector.len, DMA_FROM_DEVICE);
322 dev_kfree_skb_any(unmap->skb);
324 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
325 unmap->vector.len = 0;
329 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
331 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
334 for (i = 0; i < rcb->q_depth; i++) {
335 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
337 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
338 bnad_rxq_cleanup_skb(bnad, unmap);
340 bnad_rxq_cleanup_page(bnad, unmap);
342 bnad_rxq_alloc_uninit(bnad, rcb);
346 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
348 u32 alloced, prod, q_depth;
349 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
350 struct bnad_rx_unmap *unmap, *prev;
351 struct bna_rxq_entry *rxent;
353 u32 page_offset, alloc_size;
356 prod = rcb->producer_index;
357 q_depth = rcb->q_depth;
359 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
363 unmap = &unmap_q->unmap[prod];
365 if (unmap_q->reuse_pi < 0) {
366 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
367 unmap_q->alloc_order);
370 prev = &unmap_q->unmap[unmap_q->reuse_pi];
372 page_offset = prev->page_offset + unmap_q->map_size;
376 if (unlikely(!page)) {
377 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
378 rcb->rxq->rxbuf_alloc_failed++;
382 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
383 unmap_q->map_size, DMA_FROM_DEVICE);
384 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
386 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
387 rcb->rxq->rxbuf_map_failed++;
392 unmap->page_offset = page_offset;
393 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
394 unmap->vector.len = unmap_q->map_size;
395 page_offset += unmap_q->map_size;
397 if (page_offset < alloc_size)
398 unmap_q->reuse_pi = prod;
400 unmap_q->reuse_pi = -1;
402 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
403 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
404 BNA_QE_INDX_INC(prod, q_depth);
409 if (likely(alloced)) {
410 rcb->producer_index = prod;
412 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
413 bna_rxq_prod_indx_doorbell(rcb);
420 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
422 u32 alloced, prod, q_depth, buff_sz;
423 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
424 struct bnad_rx_unmap *unmap;
425 struct bna_rxq_entry *rxent;
429 buff_sz = rcb->rxq->buffer_size;
430 prod = rcb->producer_index;
431 q_depth = rcb->q_depth;
435 unmap = &unmap_q->unmap[prod];
437 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
439 if (unlikely(!skb)) {
440 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
441 rcb->rxq->rxbuf_alloc_failed++;
445 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
446 buff_sz, DMA_FROM_DEVICE);
447 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
448 dev_kfree_skb_any(skb);
449 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
450 rcb->rxq->rxbuf_map_failed++;
455 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
456 unmap->vector.len = buff_sz;
458 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
459 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
460 BNA_QE_INDX_INC(prod, q_depth);
465 if (likely(alloced)) {
466 rcb->producer_index = prod;
468 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
469 bna_rxq_prod_indx_doorbell(rcb);
476 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
478 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
481 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
482 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
485 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
486 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
488 bnad_rxq_refill_page(bnad, rcb, to_alloc);
491 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
493 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
494 BNA_CQ_EF_L4_CKSUM_OK)
496 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
497 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
498 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
499 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
500 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
502 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
503 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
506 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
507 u32 sop_ci, u32 nvecs)
509 struct bnad_rx_unmap_q *unmap_q;
510 struct bnad_rx_unmap *unmap;
513 unmap_q = rcb->unmap_q;
514 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
515 unmap = &unmap_q->unmap[ci];
516 BNA_QE_INDX_INC(ci, rcb->q_depth);
518 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
519 bnad_rxq_cleanup_skb(bnad, unmap);
521 bnad_rxq_cleanup_page(bnad, unmap);
526 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
530 struct bnad_rx_unmap_q *unmap_q;
531 struct bna_cq_entry *cq, *cmpl;
532 u32 ci, pi, totlen = 0;
535 pi = ccb->producer_index;
538 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
539 unmap_q = rcb->unmap_q;
541 ci = rcb->consumer_index;
543 /* prefetch header */
544 prefetch(page_address(unmap_q->unmap[ci].page) +
545 unmap_q->unmap[ci].page_offset);
548 struct bnad_rx_unmap *unmap;
551 unmap = &unmap_q->unmap[ci];
552 BNA_QE_INDX_INC(ci, rcb->q_depth);
554 dma_unmap_page(&bnad->pcidev->dev,
555 dma_unmap_addr(&unmap->vector, dma_addr),
556 unmap->vector.len, DMA_FROM_DEVICE);
558 len = ntohs(cmpl->length);
559 skb->truesize += unmap->vector.len;
562 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
563 unmap->page, unmap->page_offset, len);
566 unmap->vector.len = 0;
568 BNA_QE_INDX_INC(pi, ccb->q_depth);
573 skb->data_len += totlen;
577 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
578 struct bnad_rx_unmap *unmap, u32 len)
582 dma_unmap_single(&bnad->pcidev->dev,
583 dma_unmap_addr(&unmap->vector, dma_addr),
584 unmap->vector.len, DMA_FROM_DEVICE);
587 skb->protocol = eth_type_trans(skb, bnad->netdev);
590 unmap->vector.len = 0;
594 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
596 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
597 struct bna_rcb *rcb = NULL;
598 struct bnad_rx_unmap_q *unmap_q;
599 struct bnad_rx_unmap *unmap = NULL;
600 struct sk_buff *skb = NULL;
601 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
602 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
603 u32 packets = 0, len = 0, totlen = 0;
604 u32 pi, vec, sop_ci = 0, nvecs = 0;
605 u32 flags, masked_flags;
607 prefetch(bnad->netdev);
611 while (packets < budget) {
612 cmpl = &cq[ccb->producer_index];
615 /* The 'valid' field is set by the adapter, only after writing
616 * the other fields of completion entry. Hence, do not load
617 * other fields of completion entry *before* the 'valid' is
618 * loaded. Adding the rmb() here prevents the compiler and/or
619 * CPU from reordering the reads which would potentially result
620 * in reading stale values in completion entry.
624 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
626 if (bna_is_small_rxq(cmpl->rxq_id))
631 unmap_q = rcb->unmap_q;
633 /* start of packet ci */
634 sop_ci = rcb->consumer_index;
636 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
637 unmap = &unmap_q->unmap[sop_ci];
640 skb = napi_get_frags(&rx_ctrl->napi);
646 flags = ntohl(cmpl->flags);
647 len = ntohs(cmpl->length);
651 /* Check all the completions for this frame.
652 * busy-wait doesn't help much, break here.
654 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
655 (flags & BNA_CQ_EF_EOP) == 0) {
656 pi = ccb->producer_index;
658 BNA_QE_INDX_INC(pi, ccb->q_depth);
661 if (!next_cmpl->valid)
663 /* The 'valid' field is set by the adapter, only
664 * after writing the other fields of completion
665 * entry. Hence, do not load other fields of
666 * completion entry *before* the 'valid' is
667 * loaded. Adding the rmb() here prevents the
668 * compiler and/or CPU from reordering the reads
669 * which would potentially result in reading
670 * stale values in completion entry.
674 len = ntohs(next_cmpl->length);
675 flags = ntohl(next_cmpl->flags);
679 } while ((flags & BNA_CQ_EF_EOP) == 0);
681 if (!next_cmpl->valid)
686 /* TODO: BNA_CQ_EF_LOCAL ? */
687 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
688 BNA_CQ_EF_FCS_ERROR |
689 BNA_CQ_EF_TOO_LONG))) {
690 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
691 rcb->rxq->rx_packets_with_error++;
696 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
697 bnad_cq_setup_skb(bnad, skb, unmap, len);
699 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
701 rcb->rxq->rx_packets++;
702 rcb->rxq->rx_bytes += totlen;
703 ccb->bytes_per_intr += totlen;
705 masked_flags = flags & flags_cksum_prot_mask;
708 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
709 ((masked_flags == flags_tcp4) ||
710 (masked_flags == flags_udp4) ||
711 (masked_flags == flags_tcp6) ||
712 (masked_flags == flags_udp6))))
713 skb->ip_summed = CHECKSUM_UNNECESSARY;
715 skb_checksum_none_assert(skb);
717 if ((flags & BNA_CQ_EF_VLAN) &&
718 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
719 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
721 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
722 netif_receive_skb(skb);
724 napi_gro_frags(&rx_ctrl->napi);
727 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
728 for (vec = 0; vec < nvecs; vec++) {
729 cmpl = &cq[ccb->producer_index];
731 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
735 napi_gro_flush(&rx_ctrl->napi, false);
736 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
737 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
739 bnad_rxq_post(bnad, ccb->rcb[0]);
741 bnad_rxq_post(bnad, ccb->rcb[1]);
747 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
749 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
750 struct napi_struct *napi = &rx_ctrl->napi;
752 if (likely(napi_schedule_prep(napi))) {
753 __napi_schedule(napi);
754 rx_ctrl->rx_schedule++;
758 /* MSIX Rx Path Handler */
760 bnad_msix_rx(int irq, void *data)
762 struct bna_ccb *ccb = (struct bna_ccb *)data;
765 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
766 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
772 /* Interrupt handlers */
774 /* Mbox Interrupt Handlers */
776 bnad_msix_mbox_handler(int irq, void *data)
780 struct bnad *bnad = (struct bnad *)data;
782 spin_lock_irqsave(&bnad->bna_lock, flags);
783 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
784 spin_unlock_irqrestore(&bnad->bna_lock, flags);
788 bna_intr_status_get(&bnad->bna, intr_status);
790 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
791 bna_mbox_handler(&bnad->bna, intr_status);
793 spin_unlock_irqrestore(&bnad->bna_lock, flags);
799 bnad_isr(int irq, void *data)
804 struct bnad *bnad = (struct bnad *)data;
805 struct bnad_rx_info *rx_info;
806 struct bnad_rx_ctrl *rx_ctrl;
807 struct bna_tcb *tcb = NULL;
809 spin_lock_irqsave(&bnad->bna_lock, flags);
810 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
815 bna_intr_status_get(&bnad->bna, intr_status);
817 if (unlikely(!intr_status)) {
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
822 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
823 bna_mbox_handler(&bnad->bna, intr_status);
825 spin_unlock_irqrestore(&bnad->bna_lock, flags);
827 if (!BNA_IS_INTX_DATA_INTR(intr_status))
830 /* Process data interrupts */
832 for (i = 0; i < bnad->num_tx; i++) {
833 for (j = 0; j < bnad->num_txq_per_tx; j++) {
834 tcb = bnad->tx_info[i].tcb[j];
835 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
836 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
840 for (i = 0; i < bnad->num_rx; i++) {
841 rx_info = &bnad->rx_info[i];
844 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
845 rx_ctrl = &rx_info->rx_ctrl[j];
847 bnad_netif_rx_schedule_poll(bnad,
855 * Called in interrupt / callback context
856 * with bna_lock held, so cfg_flags access is OK
859 bnad_enable_mbox_irq(struct bnad *bnad)
861 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
863 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
867 * Called with bnad->bna_lock held b'cos of
868 * bnad->cfg_flags access.
871 bnad_disable_mbox_irq(struct bnad *bnad)
873 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
875 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
879 bnad_set_netdev_perm_addr(struct bnad *bnad)
881 struct net_device *netdev = bnad->netdev;
883 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
884 if (is_zero_ether_addr(netdev->dev_addr))
885 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
888 /* Control Path Handlers */
892 bnad_cb_mbox_intr_enable(struct bnad *bnad)
894 bnad_enable_mbox_irq(bnad);
898 bnad_cb_mbox_intr_disable(struct bnad *bnad)
900 bnad_disable_mbox_irq(bnad);
904 bnad_cb_ioceth_ready(struct bnad *bnad)
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
907 complete(&bnad->bnad_completions.ioc_comp);
911 bnad_cb_ioceth_failed(struct bnad *bnad)
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
914 complete(&bnad->bnad_completions.ioc_comp);
918 bnad_cb_ioceth_disabled(struct bnad *bnad)
920 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
921 complete(&bnad->bnad_completions.ioc_comp);
925 bnad_cb_enet_disabled(void *arg)
927 struct bnad *bnad = (struct bnad *)arg;
929 netif_carrier_off(bnad->netdev);
930 complete(&bnad->bnad_completions.enet_comp);
934 bnad_cb_ethport_link_status(struct bnad *bnad,
935 enum bna_link_status link_status)
937 bool link_up = false;
939 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
941 if (link_status == BNA_CEE_UP) {
942 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
943 BNAD_UPDATE_CTR(bnad, cee_toggle);
944 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
946 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
947 BNAD_UPDATE_CTR(bnad, cee_toggle);
948 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
952 if (!netif_carrier_ok(bnad->netdev)) {
954 netdev_info(bnad->netdev, "link up\n");
955 netif_carrier_on(bnad->netdev);
956 BNAD_UPDATE_CTR(bnad, link_toggle);
957 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
958 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
960 struct bna_tcb *tcb =
961 bnad->tx_info[tx_id].tcb[tcb_id];
968 if (test_bit(BNAD_TXQ_TX_STARTED,
972 * Transmit Schedule */
976 BNAD_UPDATE_CTR(bnad,
982 BNAD_UPDATE_CTR(bnad,
989 if (netif_carrier_ok(bnad->netdev)) {
990 netdev_info(bnad->netdev, "link down\n");
991 netif_carrier_off(bnad->netdev);
992 BNAD_UPDATE_CTR(bnad, link_toggle);
998 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
1000 struct bnad *bnad = (struct bnad *)arg;
1002 complete(&bnad->bnad_completions.tx_comp);
1006 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1008 struct bnad_tx_info *tx_info =
1009 (struct bnad_tx_info *)tcb->txq->tx->priv;
1012 tx_info->tcb[tcb->id] = tcb;
1016 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1018 struct bnad_tx_info *tx_info =
1019 (struct bnad_tx_info *)tcb->txq->tx->priv;
1021 tx_info->tcb[tcb->id] = NULL;
1026 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1028 struct bnad_rx_info *rx_info =
1029 (struct bnad_rx_info *)ccb->cq->rx->priv;
1031 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1032 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1036 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1038 struct bnad_rx_info *rx_info =
1039 (struct bnad_rx_info *)ccb->cq->rx->priv;
1041 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1045 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1047 struct bnad_tx_info *tx_info =
1048 (struct bnad_tx_info *)tx->priv;
1049 struct bna_tcb *tcb;
1053 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1054 tcb = tx_info->tcb[i];
1058 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1059 netif_stop_subqueue(bnad->netdev, txq_id);
1064 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1066 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1067 struct bna_tcb *tcb;
1071 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1072 tcb = tx_info->tcb[i];
1077 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1078 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1079 BUG_ON(*(tcb->hw_consumer_index) != 0);
1081 if (netif_carrier_ok(bnad->netdev)) {
1082 netif_wake_subqueue(bnad->netdev, txq_id);
1083 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1088 * Workaround for first ioceth enable failure & we
1089 * get a 0 MAC address. We try to get the MAC address
1092 if (is_zero_ether_addr(bnad->perm_addr)) {
1093 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1094 bnad_set_netdev_perm_addr(bnad);
1099 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1102 bnad_tx_cleanup(struct delayed_work *work)
1104 struct bnad_tx_info *tx_info =
1105 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1106 struct bnad *bnad = NULL;
1107 struct bna_tcb *tcb;
1108 unsigned long flags;
1111 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1112 tcb = tx_info->tcb[i];
1118 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1123 bnad_txq_cleanup(bnad, tcb);
1125 smp_mb__before_atomic();
1126 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1130 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1131 msecs_to_jiffies(1));
1135 spin_lock_irqsave(&bnad->bna_lock, flags);
1136 bna_tx_cleanup_complete(tx_info->tx);
1137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1141 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1143 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1144 struct bna_tcb *tcb;
1147 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1148 tcb = tx_info->tcb[i];
1153 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1157 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1159 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1160 struct bna_ccb *ccb;
1161 struct bnad_rx_ctrl *rx_ctrl;
1164 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1165 rx_ctrl = &rx_info->rx_ctrl[i];
1170 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1173 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1178 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1181 bnad_rx_cleanup(void *work)
1183 struct bnad_rx_info *rx_info =
1184 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1185 struct bnad_rx_ctrl *rx_ctrl;
1186 struct bnad *bnad = NULL;
1187 unsigned long flags;
1190 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1191 rx_ctrl = &rx_info->rx_ctrl[i];
1196 bnad = rx_ctrl->ccb->bnad;
1199 * Wait till the poll handler has exited
1200 * and nothing can be scheduled anymore
1202 napi_disable(&rx_ctrl->napi);
1204 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1205 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1206 if (rx_ctrl->ccb->rcb[1])
1207 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1210 spin_lock_irqsave(&bnad->bna_lock, flags);
1211 bna_rx_cleanup_complete(rx_info->rx);
1212 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1216 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1218 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1219 struct bna_ccb *ccb;
1220 struct bnad_rx_ctrl *rx_ctrl;
1223 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1224 rx_ctrl = &rx_info->rx_ctrl[i];
1229 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1232 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1235 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1239 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1241 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1242 struct bna_ccb *ccb;
1243 struct bna_rcb *rcb;
1244 struct bnad_rx_ctrl *rx_ctrl;
1247 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1248 rx_ctrl = &rx_info->rx_ctrl[i];
1253 napi_enable(&rx_ctrl->napi);
1255 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1260 bnad_rxq_alloc_init(bnad, rcb);
1261 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1262 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1263 bnad_rxq_post(bnad, rcb);
1269 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1271 struct bnad *bnad = (struct bnad *)arg;
1273 complete(&bnad->bnad_completions.rx_comp);
1277 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1279 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1280 complete(&bnad->bnad_completions.mcast_comp);
1284 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1285 struct bna_stats *stats)
1287 if (status == BNA_CB_SUCCESS)
1288 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1290 if (!netif_running(bnad->netdev) ||
1291 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1294 mod_timer(&bnad->stats_timer,
1295 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1299 bnad_cb_enet_mtu_set(struct bnad *bnad)
1301 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1302 complete(&bnad->bnad_completions.mtu_comp);
1306 bnad_cb_completion(void *arg, enum bfa_status status)
1308 struct bnad_iocmd_comp *iocmd_comp =
1309 (struct bnad_iocmd_comp *)arg;
1311 iocmd_comp->comp_status = (u32) status;
1312 complete(&iocmd_comp->comp);
1315 /* Resource allocation, free functions */
1318 bnad_mem_free(struct bnad *bnad,
1319 struct bna_mem_info *mem_info)
1324 if (mem_info->mdl == NULL)
1327 for (i = 0; i < mem_info->num; i++) {
1328 if (mem_info->mdl[i].kva != NULL) {
1329 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1330 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1332 dma_free_coherent(&bnad->pcidev->dev,
1333 mem_info->mdl[i].len,
1334 mem_info->mdl[i].kva, dma_pa);
1336 kfree(mem_info->mdl[i].kva);
1339 kfree(mem_info->mdl);
1340 mem_info->mdl = NULL;
1344 bnad_mem_alloc(struct bnad *bnad,
1345 struct bna_mem_info *mem_info)
1350 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1351 mem_info->mdl = NULL;
1355 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1357 if (mem_info->mdl == NULL)
1360 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1361 for (i = 0; i < mem_info->num; i++) {
1362 mem_info->mdl[i].len = mem_info->len;
1363 mem_info->mdl[i].kva =
1364 dma_alloc_coherent(&bnad->pcidev->dev,
1365 mem_info->len, &dma_pa,
1367 if (mem_info->mdl[i].kva == NULL)
1370 BNA_SET_DMA_ADDR(dma_pa,
1371 &(mem_info->mdl[i].dma));
1374 for (i = 0; i < mem_info->num; i++) {
1375 mem_info->mdl[i].len = mem_info->len;
1376 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1378 if (mem_info->mdl[i].kva == NULL)
1386 bnad_mem_free(bnad, mem_info);
1390 /* Free IRQ for Mailbox */
1392 bnad_mbox_irq_free(struct bnad *bnad)
1395 unsigned long flags;
1397 spin_lock_irqsave(&bnad->bna_lock, flags);
1398 bnad_disable_mbox_irq(bnad);
1399 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1401 irq = BNAD_GET_MBOX_IRQ(bnad);
1402 free_irq(irq, bnad);
1406 * Allocates IRQ for Mailbox, but keep it disabled
1407 * This will be enabled once we get the mbox enable callback
1411 bnad_mbox_irq_alloc(struct bnad *bnad)
1414 unsigned long irq_flags, flags;
1416 irq_handler_t irq_handler;
1418 spin_lock_irqsave(&bnad->bna_lock, flags);
1419 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1420 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1421 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1424 irq_handler = (irq_handler_t)bnad_isr;
1425 irq = bnad->pcidev->irq;
1426 irq_flags = IRQF_SHARED;
1429 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1430 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1433 * Set the Mbox IRQ disable flag, so that the IRQ handler
1434 * called from request_irq() for SHARED IRQs do not execute
1436 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1438 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1440 err = request_irq(irq, irq_handler, irq_flags,
1441 bnad->mbox_irq_name, bnad);
1447 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1449 kfree(intr_info->idl);
1450 intr_info->idl = NULL;
1453 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1455 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1456 u32 txrx_id, struct bna_intr_info *intr_info)
1458 int i, vector_start = 0;
1460 unsigned long flags;
1462 spin_lock_irqsave(&bnad->bna_lock, flags);
1463 cfg_flags = bnad->cfg_flags;
1464 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1466 if (cfg_flags & BNAD_CF_MSIX) {
1467 intr_info->intr_type = BNA_INTR_T_MSIX;
1468 intr_info->idl = kcalloc(intr_info->num,
1469 sizeof(struct bna_intr_descr),
1471 if (!intr_info->idl)
1476 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1480 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1481 (bnad->num_tx * bnad->num_txq_per_tx) +
1489 for (i = 0; i < intr_info->num; i++)
1490 intr_info->idl[i].vector = vector_start + i;
1492 intr_info->intr_type = BNA_INTR_T_INTX;
1494 intr_info->idl = kcalloc(intr_info->num,
1495 sizeof(struct bna_intr_descr),
1497 if (!intr_info->idl)
1502 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1506 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1513 /* NOTE: Should be called for MSIX only
1514 * Unregisters Tx MSIX vector(s) from the kernel
1517 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1523 for (i = 0; i < num_txqs; i++) {
1524 if (tx_info->tcb[i] == NULL)
1527 vector_num = tx_info->tcb[i]->intr_vector;
1528 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1532 /* NOTE: Should be called for MSIX only
1533 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1536 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1537 u32 tx_id, int num_txqs)
1543 for (i = 0; i < num_txqs; i++) {
1544 vector_num = tx_info->tcb[i]->intr_vector;
1545 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1546 tx_id + tx_info->tcb[i]->id);
1547 err = request_irq(bnad->msix_table[vector_num].vector,
1548 (irq_handler_t)bnad_msix_tx, 0,
1549 tx_info->tcb[i]->name,
1559 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1563 /* NOTE: Should be called for MSIX only
1564 * Unregisters Rx MSIX vector(s) from the kernel
1567 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1573 for (i = 0; i < num_rxps; i++) {
1574 if (rx_info->rx_ctrl[i].ccb == NULL)
1577 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1578 free_irq(bnad->msix_table[vector_num].vector,
1579 rx_info->rx_ctrl[i].ccb);
1583 /* NOTE: Should be called for MSIX only
1584 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1587 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1588 u32 rx_id, int num_rxps)
1594 for (i = 0; i < num_rxps; i++) {
1595 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1596 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1598 rx_id + rx_info->rx_ctrl[i].ccb->id);
1599 err = request_irq(bnad->msix_table[vector_num].vector,
1600 (irq_handler_t)bnad_msix_rx, 0,
1601 rx_info->rx_ctrl[i].ccb->name,
1602 rx_info->rx_ctrl[i].ccb);
1611 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1615 /* Free Tx object Resources */
1617 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1621 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1622 if (res_info[i].res_type == BNA_RES_T_MEM)
1623 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1624 else if (res_info[i].res_type == BNA_RES_T_INTR)
1625 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1629 /* Allocates memory and interrupt resources for Tx object */
1631 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1636 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1637 if (res_info[i].res_type == BNA_RES_T_MEM)
1638 err = bnad_mem_alloc(bnad,
1639 &res_info[i].res_u.mem_info);
1640 else if (res_info[i].res_type == BNA_RES_T_INTR)
1641 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1642 &res_info[i].res_u.intr_info);
1649 bnad_tx_res_free(bnad, res_info);
1653 /* Free Rx object Resources */
1655 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1659 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1660 if (res_info[i].res_type == BNA_RES_T_MEM)
1661 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1662 else if (res_info[i].res_type == BNA_RES_T_INTR)
1663 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1667 /* Allocates memory and interrupt resources for Rx object */
1669 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1674 /* All memory needs to be allocated before setup_ccbs */
1675 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1676 if (res_info[i].res_type == BNA_RES_T_MEM)
1677 err = bnad_mem_alloc(bnad,
1678 &res_info[i].res_u.mem_info);
1679 else if (res_info[i].res_type == BNA_RES_T_INTR)
1680 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1681 &res_info[i].res_u.intr_info);
1688 bnad_rx_res_free(bnad, res_info);
1692 /* Timer callbacks */
1695 bnad_ioc_timeout(unsigned long data)
1697 struct bnad *bnad = (struct bnad *)data;
1698 unsigned long flags;
1700 spin_lock_irqsave(&bnad->bna_lock, flags);
1701 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1702 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706 bnad_ioc_hb_check(unsigned long data)
1708 struct bnad *bnad = (struct bnad *)data;
1709 unsigned long flags;
1711 spin_lock_irqsave(&bnad->bna_lock, flags);
1712 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1713 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1717 bnad_iocpf_timeout(unsigned long data)
1719 struct bnad *bnad = (struct bnad *)data;
1720 unsigned long flags;
1722 spin_lock_irqsave(&bnad->bna_lock, flags);
1723 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1724 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1728 bnad_iocpf_sem_timeout(unsigned long data)
1730 struct bnad *bnad = (struct bnad *)data;
1731 unsigned long flags;
1733 spin_lock_irqsave(&bnad->bna_lock, flags);
1734 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1735 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1739 * All timer routines use bnad->bna_lock to protect against
1740 * the following race, which may occur in case of no locking:
1748 /* b) Dynamic Interrupt Moderation Timer */
1750 bnad_dim_timeout(unsigned long data)
1752 struct bnad *bnad = (struct bnad *)data;
1753 struct bnad_rx_info *rx_info;
1754 struct bnad_rx_ctrl *rx_ctrl;
1756 unsigned long flags;
1758 if (!netif_carrier_ok(bnad->netdev))
1761 spin_lock_irqsave(&bnad->bna_lock, flags);
1762 for (i = 0; i < bnad->num_rx; i++) {
1763 rx_info = &bnad->rx_info[i];
1766 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1767 rx_ctrl = &rx_info->rx_ctrl[j];
1770 bna_rx_dim_update(rx_ctrl->ccb);
1774 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1775 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1776 mod_timer(&bnad->dim_timer,
1777 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1781 /* c) Statistics Timer */
1783 bnad_stats_timeout(unsigned long data)
1785 struct bnad *bnad = (struct bnad *)data;
1786 unsigned long flags;
1788 if (!netif_running(bnad->netdev) ||
1789 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1792 spin_lock_irqsave(&bnad->bna_lock, flags);
1793 bna_hw_stats_get(&bnad->bna);
1794 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1798 * Set up timer for DIM
1799 * Called with bnad->bna_lock held
1802 bnad_dim_timer_start(struct bnad *bnad)
1804 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1805 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1806 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1807 (unsigned long)bnad);
1808 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1809 mod_timer(&bnad->dim_timer,
1810 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1815 * Set up timer for statistics
1816 * Called with mutex_lock(&bnad->conf_mutex) held
1819 bnad_stats_timer_start(struct bnad *bnad)
1821 unsigned long flags;
1823 spin_lock_irqsave(&bnad->bna_lock, flags);
1824 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1825 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1826 (unsigned long)bnad);
1827 mod_timer(&bnad->stats_timer,
1828 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1830 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1834 * Stops the stats timer
1835 * Called with mutex_lock(&bnad->conf_mutex) held
1838 bnad_stats_timer_stop(struct bnad *bnad)
1841 unsigned long flags;
1843 spin_lock_irqsave(&bnad->bna_lock, flags);
1844 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1846 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1848 del_timer_sync(&bnad->stats_timer);
1854 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1856 int i = 1; /* Index 0 has broadcast address */
1857 struct netdev_hw_addr *mc_addr;
1859 netdev_for_each_mc_addr(mc_addr, netdev) {
1860 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1866 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1868 struct bnad_rx_ctrl *rx_ctrl =
1869 container_of(napi, struct bnad_rx_ctrl, napi);
1870 struct bnad *bnad = rx_ctrl->bnad;
1873 rx_ctrl->rx_poll_ctr++;
1875 if (!netif_carrier_ok(bnad->netdev))
1878 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1883 napi_complete(napi);
1885 rx_ctrl->rx_complete++;
1888 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1893 #define BNAD_NAPI_POLL_QUOTA 64
1895 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1897 struct bnad_rx_ctrl *rx_ctrl;
1900 /* Initialize & enable NAPI */
1901 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1902 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1903 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1904 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1909 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1913 /* First disable and then clean up */
1914 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1915 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1918 /* Should be held with conf_lock held */
1920 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1922 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1923 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1924 unsigned long flags;
1929 init_completion(&bnad->bnad_completions.tx_comp);
1930 spin_lock_irqsave(&bnad->bna_lock, flags);
1931 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1932 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1933 wait_for_completion(&bnad->bnad_completions.tx_comp);
1935 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1936 bnad_tx_msix_unregister(bnad, tx_info,
1937 bnad->num_txq_per_tx);
1939 spin_lock_irqsave(&bnad->bna_lock, flags);
1940 bna_tx_destroy(tx_info->tx);
1941 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1946 bnad_tx_res_free(bnad, res_info);
1949 /* Should be held with conf_lock held */
1951 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1954 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1955 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1956 struct bna_intr_info *intr_info =
1957 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1958 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1959 static const struct bna_tx_event_cbfn tx_cbfn = {
1960 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1961 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1962 .tx_stall_cbfn = bnad_cb_tx_stall,
1963 .tx_resume_cbfn = bnad_cb_tx_resume,
1964 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1968 unsigned long flags;
1970 tx_info->tx_id = tx_id;
1972 /* Initialize the Tx object configuration */
1973 tx_config->num_txq = bnad->num_txq_per_tx;
1974 tx_config->txq_depth = bnad->txq_depth;
1975 tx_config->tx_type = BNA_TX_T_REGULAR;
1976 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1978 /* Get BNA's resource requirement for one tx object */
1979 spin_lock_irqsave(&bnad->bna_lock, flags);
1980 bna_tx_res_req(bnad->num_txq_per_tx,
1981 bnad->txq_depth, res_info);
1982 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1984 /* Fill Unmap Q memory requirements */
1985 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1986 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1989 /* Allocate resources */
1990 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1994 /* Ask BNA to create one Tx object, supplying required resources */
1995 spin_lock_irqsave(&bnad->bna_lock, flags);
1996 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1998 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2005 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2006 (work_func_t)bnad_tx_cleanup);
2008 /* Register ISR for the Tx object */
2009 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2010 err = bnad_tx_msix_register(bnad, tx_info,
2011 tx_id, bnad->num_txq_per_tx);
2016 spin_lock_irqsave(&bnad->bna_lock, flags);
2018 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2023 spin_lock_irqsave(&bnad->bna_lock, flags);
2024 bna_tx_destroy(tx_info->tx);
2025 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2029 bnad_tx_res_free(bnad, res_info);
2033 /* Setup the rx config for bna_rx_create */
2034 /* bnad decides the configuration */
2036 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2038 memset(rx_config, 0, sizeof(*rx_config));
2039 rx_config->rx_type = BNA_RX_T_REGULAR;
2040 rx_config->num_paths = bnad->num_rxp_per_rx;
2041 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2043 if (bnad->num_rxp_per_rx > 1) {
2044 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2045 rx_config->rss_config.hash_type =
2046 (BFI_ENET_RSS_IPV6 |
2047 BFI_ENET_RSS_IPV6_TCP |
2049 BFI_ENET_RSS_IPV4_TCP);
2050 rx_config->rss_config.hash_mask =
2051 bnad->num_rxp_per_rx - 1;
2052 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2053 sizeof(rx_config->rss_config.toeplitz_hash_key));
2055 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2056 memset(&rx_config->rss_config, 0,
2057 sizeof(rx_config->rss_config));
2060 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2061 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2063 /* BNA_RXP_SINGLE - one data-buffer queue
2064 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2065 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2067 /* TODO: configurable param for queue type */
2068 rx_config->rxp_type = BNA_RXP_SLR;
2070 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2071 rx_config->frame_size > 4096) {
2072 /* though size_routing_enable is set in SLR,
2073 * small packets may get routed to same rxq.
2074 * set buf_size to 2048 instead of PAGE_SIZE.
2076 rx_config->q0_buf_size = 2048;
2077 /* this should be in multiples of 2 */
2078 rx_config->q0_num_vecs = 4;
2079 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2080 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2082 rx_config->q0_buf_size = rx_config->frame_size;
2083 rx_config->q0_num_vecs = 1;
2084 rx_config->q0_depth = bnad->rxq_depth;
2087 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2088 if (rx_config->rxp_type == BNA_RXP_SLR) {
2089 rx_config->q1_depth = bnad->rxq_depth;
2090 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2093 rx_config->vlan_strip_status =
2094 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2095 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2099 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2101 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2104 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2105 rx_info->rx_ctrl[i].bnad = bnad;
2108 /* Called with mutex_lock(&bnad->conf_mutex) held */
2110 bnad_reinit_rx(struct bnad *bnad)
2112 struct net_device *netdev = bnad->netdev;
2113 u32 err = 0, current_err = 0;
2114 u32 rx_id = 0, count = 0;
2115 unsigned long flags;
2117 /* destroy and create new rx objects */
2118 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2119 if (!bnad->rx_info[rx_id].rx)
2121 bnad_destroy_rx(bnad, rx_id);
2124 spin_lock_irqsave(&bnad->bna_lock, flags);
2125 bna_enet_mtu_set(&bnad->bna.enet,
2126 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2127 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2129 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2131 current_err = bnad_setup_rx(bnad, rx_id);
2132 if (current_err && !err) {
2134 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2138 /* restore rx configuration */
2139 if (bnad->rx_info[0].rx && !err) {
2140 bnad_restore_vlans(bnad, 0);
2141 bnad_enable_default_bcast(bnad);
2142 spin_lock_irqsave(&bnad->bna_lock, flags);
2143 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2144 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2145 bnad_set_rx_mode(netdev);
2151 /* Called with bnad_conf_lock() held */
2153 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2155 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2156 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2157 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2158 unsigned long flags;
2165 spin_lock_irqsave(&bnad->bna_lock, flags);
2166 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2167 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2168 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2171 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2173 del_timer_sync(&bnad->dim_timer);
2176 init_completion(&bnad->bnad_completions.rx_comp);
2177 spin_lock_irqsave(&bnad->bna_lock, flags);
2178 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2179 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2180 wait_for_completion(&bnad->bnad_completions.rx_comp);
2182 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2183 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2185 bnad_napi_delete(bnad, rx_id);
2187 spin_lock_irqsave(&bnad->bna_lock, flags);
2188 bna_rx_destroy(rx_info->rx);
2192 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2194 bnad_rx_res_free(bnad, res_info);
2197 /* Called with mutex_lock(&bnad->conf_mutex) held */
2199 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2202 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2203 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2204 struct bna_intr_info *intr_info =
2205 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2206 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2207 static const struct bna_rx_event_cbfn rx_cbfn = {
2208 .rcb_setup_cbfn = NULL,
2209 .rcb_destroy_cbfn = NULL,
2210 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2211 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2212 .rx_stall_cbfn = bnad_cb_rx_stall,
2213 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2214 .rx_post_cbfn = bnad_cb_rx_post,
2217 unsigned long flags;
2219 rx_info->rx_id = rx_id;
2221 /* Initialize the Rx object configuration */
2222 bnad_init_rx_config(bnad, rx_config);
2224 /* Get BNA's resource requirement for one Rx object */
2225 spin_lock_irqsave(&bnad->bna_lock, flags);
2226 bna_rx_res_req(rx_config, res_info);
2227 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2229 /* Fill Unmap Q memory requirements */
2230 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2231 rx_config->num_paths,
2232 (rx_config->q0_depth *
2233 sizeof(struct bnad_rx_unmap)) +
2234 sizeof(struct bnad_rx_unmap_q));
2236 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2237 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2238 rx_config->num_paths,
2239 (rx_config->q1_depth *
2240 sizeof(struct bnad_rx_unmap) +
2241 sizeof(struct bnad_rx_unmap_q)));
2243 /* Allocate resource */
2244 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2248 bnad_rx_ctrl_init(bnad, rx_id);
2250 /* Ask BNA to create one Rx object, supplying required resources */
2251 spin_lock_irqsave(&bnad->bna_lock, flags);
2252 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2256 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2260 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2262 INIT_WORK(&rx_info->rx_cleanup_work,
2263 (work_func_t)(bnad_rx_cleanup));
2266 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2267 * so that IRQ handler cannot schedule NAPI at this point.
2269 bnad_napi_add(bnad, rx_id);
2271 /* Register ISR for the Rx object */
2272 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2273 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2274 rx_config->num_paths);
2279 spin_lock_irqsave(&bnad->bna_lock, flags);
2281 /* Set up Dynamic Interrupt Moderation Vector */
2282 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2283 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2285 /* Enable VLAN filtering only on the default Rx */
2286 bna_rx_vlanfilter_enable(rx);
2288 /* Start the DIM timer */
2289 bnad_dim_timer_start(bnad);
2293 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2298 bnad_destroy_rx(bnad, rx_id);
2302 /* Called with conf_lock & bnad->bna_lock held */
2304 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2306 struct bnad_tx_info *tx_info;
2308 tx_info = &bnad->tx_info[0];
2312 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2315 /* Called with conf_lock & bnad->bna_lock held */
2317 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2319 struct bnad_rx_info *rx_info;
2322 for (i = 0; i < bnad->num_rx; i++) {
2323 rx_info = &bnad->rx_info[i];
2326 bna_rx_coalescing_timeo_set(rx_info->rx,
2327 bnad->rx_coalescing_timeo);
2332 * Called with bnad->bna_lock held
2335 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2339 if (!is_valid_ether_addr(mac_addr))
2340 return -EADDRNOTAVAIL;
2342 /* If datapath is down, pretend everything went through */
2343 if (!bnad->rx_info[0].rx)
2346 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2347 if (ret != BNA_CB_SUCCESS)
2348 return -EADDRNOTAVAIL;
2353 /* Should be called with conf_lock held */
2355 bnad_enable_default_bcast(struct bnad *bnad)
2357 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2359 unsigned long flags;
2361 init_completion(&bnad->bnad_completions.mcast_comp);
2363 spin_lock_irqsave(&bnad->bna_lock, flags);
2364 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2365 bnad_cb_rx_mcast_add);
2366 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2368 if (ret == BNA_CB_SUCCESS)
2369 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2373 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2379 /* Called with mutex_lock(&bnad->conf_mutex) held */
2381 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2384 unsigned long flags;
2386 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2387 spin_lock_irqsave(&bnad->bna_lock, flags);
2388 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2389 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2393 /* Statistics utilities */
2395 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2399 for (i = 0; i < bnad->num_rx; i++) {
2400 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2401 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2402 stats->rx_packets += bnad->rx_info[i].
2403 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2404 stats->rx_bytes += bnad->rx_info[i].
2405 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2406 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2407 bnad->rx_info[i].rx_ctrl[j].ccb->
2409 stats->rx_packets +=
2410 bnad->rx_info[i].rx_ctrl[j].
2411 ccb->rcb[1]->rxq->rx_packets;
2413 bnad->rx_info[i].rx_ctrl[j].
2414 ccb->rcb[1]->rxq->rx_bytes;
2419 for (i = 0; i < bnad->num_tx; i++) {
2420 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2421 if (bnad->tx_info[i].tcb[j]) {
2422 stats->tx_packets +=
2423 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2425 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2432 * Must be called with the bna_lock held.
2435 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2437 struct bfi_enet_stats_mac *mac_stats;
2441 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2443 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2444 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2445 mac_stats->rx_undersize;
2446 stats->tx_errors = mac_stats->tx_fcs_error +
2447 mac_stats->tx_undersize;
2448 stats->rx_dropped = mac_stats->rx_drop;
2449 stats->tx_dropped = mac_stats->tx_drop;
2450 stats->multicast = mac_stats->rx_multicast;
2451 stats->collisions = mac_stats->tx_total_collision;
2453 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2455 /* receive ring buffer overflow ?? */
2457 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2458 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2459 /* recv'r fifo overrun */
2460 bmap = bna_rx_rid_mask(&bnad->bna);
2461 for (i = 0; bmap; i++) {
2463 stats->rx_fifo_errors +=
2464 bnad->stats.bna_stats->
2465 hw_stats.rxf_stats[i].frame_drops;
2473 bnad_mbox_irq_sync(struct bnad *bnad)
2476 unsigned long flags;
2478 spin_lock_irqsave(&bnad->bna_lock, flags);
2479 if (bnad->cfg_flags & BNAD_CF_MSIX)
2480 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2482 irq = bnad->pcidev->irq;
2483 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2485 synchronize_irq(irq);
2488 /* Utility used by bnad_start_xmit, for doing TSO */
2490 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2494 err = skb_cow_head(skb, 0);
2496 BNAD_UPDATE_CTR(bnad, tso_err);
2501 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2502 * excluding the length field.
2504 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2505 struct iphdr *iph = ip_hdr(skb);
2507 /* Do we really need these? */
2511 tcp_hdr(skb)->check =
2512 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2514 BNAD_UPDATE_CTR(bnad, tso4);
2516 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2518 ipv6h->payload_len = 0;
2519 tcp_hdr(skb)->check =
2520 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2522 BNAD_UPDATE_CTR(bnad, tso6);
2529 * Initialize Q numbers depending on Rx Paths
2530 * Called with bnad->bna_lock held, because of cfg_flags
2534 bnad_q_num_init(struct bnad *bnad)
2538 rxps = min((uint)num_online_cpus(),
2539 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2541 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2542 rxps = 1; /* INTx */
2546 bnad->num_rxp_per_rx = rxps;
2547 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2551 * Adjusts the Q numbers, given a number of msix vectors
2552 * Give preference to RSS as opposed to Tx priority Queues,
2553 * in such a case, just use 1 Tx Q
2554 * Called with bnad->bna_lock held b'cos of cfg_flags access
2557 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2559 bnad->num_txq_per_tx = 1;
2560 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2561 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2562 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2563 bnad->num_rxp_per_rx = msix_vectors -
2564 (bnad->num_tx * bnad->num_txq_per_tx) -
2565 BNAD_MAILBOX_MSIX_VECTORS;
2567 bnad->num_rxp_per_rx = 1;
2570 /* Enable / disable ioceth */
2572 bnad_ioceth_disable(struct bnad *bnad)
2574 unsigned long flags;
2577 spin_lock_irqsave(&bnad->bna_lock, flags);
2578 init_completion(&bnad->bnad_completions.ioc_comp);
2579 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2580 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2582 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2583 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2585 err = bnad->bnad_completions.ioc_comp_status;
2590 bnad_ioceth_enable(struct bnad *bnad)
2593 unsigned long flags;
2595 spin_lock_irqsave(&bnad->bna_lock, flags);
2596 init_completion(&bnad->bnad_completions.ioc_comp);
2597 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2598 bna_ioceth_enable(&bnad->bna.ioceth);
2599 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2601 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2602 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2604 err = bnad->bnad_completions.ioc_comp_status;
2609 /* Free BNA resources */
2611 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2616 for (i = 0; i < res_val_max; i++)
2617 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2620 /* Allocates memory and interrupt resources for BNA */
2622 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2627 for (i = 0; i < res_val_max; i++) {
2628 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2635 bnad_res_free(bnad, res_info, res_val_max);
2639 /* Interrupt enable / disable */
2641 bnad_enable_msix(struct bnad *bnad)
2644 unsigned long flags;
2646 spin_lock_irqsave(&bnad->bna_lock, flags);
2647 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2651 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2653 if (bnad->msix_table)
2657 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2659 if (!bnad->msix_table)
2662 for (i = 0; i < bnad->msix_num; i++)
2663 bnad->msix_table[i].entry = i;
2665 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2669 } else if (ret < bnad->msix_num) {
2670 dev_warn(&bnad->pcidev->dev,
2671 "%d MSI-X vectors allocated < %d requested\n",
2672 ret, bnad->msix_num);
2674 spin_lock_irqsave(&bnad->bna_lock, flags);
2675 /* ret = #of vectors that we got */
2676 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2677 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2678 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2680 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2681 BNAD_MAILBOX_MSIX_VECTORS;
2683 if (bnad->msix_num > ret) {
2684 pci_disable_msix(bnad->pcidev);
2689 pci_intx(bnad->pcidev, 0);
2694 dev_warn(&bnad->pcidev->dev,
2695 "MSI-X enable failed - operating in INTx mode\n");
2697 kfree(bnad->msix_table);
2698 bnad->msix_table = NULL;
2700 spin_lock_irqsave(&bnad->bna_lock, flags);
2701 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2702 bnad_q_num_init(bnad);
2703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2707 bnad_disable_msix(struct bnad *bnad)
2710 unsigned long flags;
2712 spin_lock_irqsave(&bnad->bna_lock, flags);
2713 cfg_flags = bnad->cfg_flags;
2714 if (bnad->cfg_flags & BNAD_CF_MSIX)
2715 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2716 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2718 if (cfg_flags & BNAD_CF_MSIX) {
2719 pci_disable_msix(bnad->pcidev);
2720 kfree(bnad->msix_table);
2721 bnad->msix_table = NULL;
2725 /* Netdev entry points */
2727 bnad_open(struct net_device *netdev)
2730 struct bnad *bnad = netdev_priv(netdev);
2731 struct bna_pause_config pause_config;
2732 unsigned long flags;
2734 mutex_lock(&bnad->conf_mutex);
2737 err = bnad_setup_tx(bnad, 0);
2742 err = bnad_setup_rx(bnad, 0);
2747 pause_config.tx_pause = 0;
2748 pause_config.rx_pause = 0;
2750 spin_lock_irqsave(&bnad->bna_lock, flags);
2751 bna_enet_mtu_set(&bnad->bna.enet,
2752 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2753 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2754 bna_enet_enable(&bnad->bna.enet);
2755 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2757 /* Enable broadcast */
2758 bnad_enable_default_bcast(bnad);
2760 /* Restore VLANs, if any */
2761 bnad_restore_vlans(bnad, 0);
2763 /* Set the UCAST address */
2764 spin_lock_irqsave(&bnad->bna_lock, flags);
2765 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2766 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2768 /* Start the stats timer */
2769 bnad_stats_timer_start(bnad);
2771 mutex_unlock(&bnad->conf_mutex);
2776 bnad_destroy_tx(bnad, 0);
2779 mutex_unlock(&bnad->conf_mutex);
2784 bnad_stop(struct net_device *netdev)
2786 struct bnad *bnad = netdev_priv(netdev);
2787 unsigned long flags;
2789 mutex_lock(&bnad->conf_mutex);
2791 /* Stop the stats timer */
2792 bnad_stats_timer_stop(bnad);
2794 init_completion(&bnad->bnad_completions.enet_comp);
2796 spin_lock_irqsave(&bnad->bna_lock, flags);
2797 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2798 bnad_cb_enet_disabled);
2799 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2801 wait_for_completion(&bnad->bnad_completions.enet_comp);
2803 bnad_destroy_tx(bnad, 0);
2804 bnad_destroy_rx(bnad, 0);
2806 /* Synchronize mailbox IRQ */
2807 bnad_mbox_irq_sync(bnad);
2809 mutex_unlock(&bnad->conf_mutex);
2815 /* Returns 0 for success */
2817 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2818 struct sk_buff *skb, struct bna_txq_entry *txqent)
2824 if (skb_vlan_tag_present(skb)) {
2825 vlan_tag = (u16)skb_vlan_tag_get(skb);
2826 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2828 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2829 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2830 | (vlan_tag & 0x1fff);
2831 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2833 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2835 if (skb_is_gso(skb)) {
2836 gso_size = skb_shinfo(skb)->gso_size;
2837 if (unlikely(gso_size > bnad->netdev->mtu)) {
2838 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2841 if (unlikely((gso_size + skb_transport_offset(skb) +
2842 tcp_hdrlen(skb)) >= skb->len)) {
2843 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2844 txqent->hdr.wi.lso_mss = 0;
2845 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2847 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2848 txqent->hdr.wi.lso_mss = htons(gso_size);
2851 if (bnad_tso_prepare(bnad, skb)) {
2852 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2856 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2857 txqent->hdr.wi.l4_hdr_size_n_offset =
2858 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2859 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2861 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2862 txqent->hdr.wi.lso_mss = 0;
2864 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2865 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2869 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2870 __be16 net_proto = vlan_get_protocol(skb);
2873 if (net_proto == htons(ETH_P_IP))
2874 proto = ip_hdr(skb)->protocol;
2875 #ifdef NETIF_F_IPV6_CSUM
2876 else if (net_proto == htons(ETH_P_IPV6)) {
2877 /* nexthdr may not be TCP immediately. */
2878 proto = ipv6_hdr(skb)->nexthdr;
2881 if (proto == IPPROTO_TCP) {
2882 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2883 txqent->hdr.wi.l4_hdr_size_n_offset =
2884 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885 (0, skb_transport_offset(skb)));
2887 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2889 if (unlikely(skb_headlen(skb) <
2890 skb_transport_offset(skb) +
2892 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2895 } else if (proto == IPPROTO_UDP) {
2896 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2897 txqent->hdr.wi.l4_hdr_size_n_offset =
2898 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2899 (0, skb_transport_offset(skb)));
2901 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2902 if (unlikely(skb_headlen(skb) <
2903 skb_transport_offset(skb) +
2904 sizeof(struct udphdr))) {
2905 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2910 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2914 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2917 txqent->hdr.wi.flags = htons(flags);
2918 txqent->hdr.wi.frame_length = htonl(skb->len);
2924 * bnad_start_xmit : Netdev entry point for Transmit
2925 * Called under lock held by net_device
2928 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2930 struct bnad *bnad = netdev_priv(netdev);
2932 struct bna_tcb *tcb = NULL;
2933 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2934 u32 prod, q_depth, vect_id;
2935 u32 wis, vectors, len;
2937 dma_addr_t dma_addr;
2938 struct bna_txq_entry *txqent;
2940 len = skb_headlen(skb);
2942 /* Sanity checks for the skb */
2944 if (unlikely(skb->len <= ETH_HLEN)) {
2945 dev_kfree_skb_any(skb);
2946 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2947 return NETDEV_TX_OK;
2949 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2950 dev_kfree_skb_any(skb);
2951 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2952 return NETDEV_TX_OK;
2954 if (unlikely(len == 0)) {
2955 dev_kfree_skb_any(skb);
2956 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2957 return NETDEV_TX_OK;
2960 tcb = bnad->tx_info[0].tcb[txq_id];
2963 * Takes care of the Tx that is scheduled between clearing the flag
2964 * and the netif_tx_stop_all_queues() call.
2966 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2967 dev_kfree_skb_any(skb);
2968 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2969 return NETDEV_TX_OK;
2972 q_depth = tcb->q_depth;
2973 prod = tcb->producer_index;
2974 unmap_q = tcb->unmap_q;
2976 vectors = 1 + skb_shinfo(skb)->nr_frags;
2977 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2979 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2980 dev_kfree_skb_any(skb);
2981 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2982 return NETDEV_TX_OK;
2985 /* Check for available TxQ resources */
2986 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2987 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2988 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2990 sent = bnad_txcmpl_process(bnad, tcb);
2991 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2992 bna_ib_ack(tcb->i_dbell, sent);
2993 smp_mb__before_atomic();
2994 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2996 netif_stop_queue(netdev);
2997 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3002 * Check again to deal with race condition between
3003 * netif_stop_queue here, and netif_wake_queue in
3004 * interrupt handler which is not inside netif tx lock.
3006 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3007 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3008 return NETDEV_TX_BUSY;
3010 netif_wake_queue(netdev);
3011 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3015 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3016 head_unmap = &unmap_q[prod];
3018 /* Program the opcode, flags, frame_len, num_vectors in WI */
3019 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3020 dev_kfree_skb_any(skb);
3021 return NETDEV_TX_OK;
3023 txqent->hdr.wi.reserved = 0;
3024 txqent->hdr.wi.num_vectors = vectors;
3026 head_unmap->skb = skb;
3027 head_unmap->nvecs = 0;
3029 /* Program the vectors */
3031 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3032 len, DMA_TO_DEVICE);
3033 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3034 dev_kfree_skb_any(skb);
3035 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3036 return NETDEV_TX_OK;
3038 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3039 txqent->vector[0].length = htons(len);
3040 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3041 head_unmap->nvecs++;
3043 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3044 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3045 u32 size = skb_frag_size(frag);
3047 if (unlikely(size == 0)) {
3048 /* Undo the changes starting at tcb->producer_index */
3049 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3050 tcb->producer_index);
3051 dev_kfree_skb_any(skb);
3052 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3053 return NETDEV_TX_OK;
3059 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3061 BNA_QE_INDX_INC(prod, q_depth);
3062 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3063 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3064 unmap = &unmap_q[prod];
3067 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3068 0, size, DMA_TO_DEVICE);
3069 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3070 /* Undo the changes starting at tcb->producer_index */
3071 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3072 tcb->producer_index);
3073 dev_kfree_skb_any(skb);
3074 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3075 return NETDEV_TX_OK;
3078 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3079 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3080 txqent->vector[vect_id].length = htons(size);
3081 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3083 head_unmap->nvecs++;
3086 if (unlikely(len != skb->len)) {
3087 /* Undo the changes starting at tcb->producer_index */
3088 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3089 dev_kfree_skb_any(skb);
3090 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3091 return NETDEV_TX_OK;
3094 BNA_QE_INDX_INC(prod, q_depth);
3095 tcb->producer_index = prod;
3099 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3100 return NETDEV_TX_OK;
3102 skb_tx_timestamp(skb);
3104 bna_txq_prod_indx_doorbell(tcb);
3107 return NETDEV_TX_OK;
3111 * Used spin_lock to synchronize reading of stats structures, which
3112 * is written by BNA under the same lock.
3114 static struct rtnl_link_stats64 *
3115 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3117 struct bnad *bnad = netdev_priv(netdev);
3118 unsigned long flags;
3120 spin_lock_irqsave(&bnad->bna_lock, flags);
3122 bnad_netdev_qstats_fill(bnad, stats);
3123 bnad_netdev_hwstats_fill(bnad, stats);
3125 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3131 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3133 struct net_device *netdev = bnad->netdev;
3134 int uc_count = netdev_uc_count(netdev);
3135 enum bna_cb_status ret;
3137 struct netdev_hw_addr *ha;
3140 if (netdev_uc_empty(bnad->netdev)) {
3141 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3145 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3148 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3149 if (mac_list == NULL)
3153 netdev_for_each_uc_addr(ha, netdev) {
3154 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3158 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3161 if (ret != BNA_CB_SUCCESS)
3166 /* ucast packets not in UCAM are routed to default function */
3168 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3169 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3173 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3175 struct net_device *netdev = bnad->netdev;
3176 int mc_count = netdev_mc_count(netdev);
3177 enum bna_cb_status ret;
3180 if (netdev->flags & IFF_ALLMULTI)
3183 if (netdev_mc_empty(netdev))
3186 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3189 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3191 if (mac_list == NULL)
3194 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3196 /* copy rest of the MCAST addresses */
3197 bnad_netdev_mc_list_get(netdev, mac_list);
3198 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3201 if (ret != BNA_CB_SUCCESS)
3207 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3208 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3212 bnad_set_rx_mode(struct net_device *netdev)
3214 struct bnad *bnad = netdev_priv(netdev);
3215 enum bna_rxmode new_mode, mode_mask;
3216 unsigned long flags;
3218 spin_lock_irqsave(&bnad->bna_lock, flags);
3220 if (bnad->rx_info[0].rx == NULL) {
3221 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3225 /* clear bnad flags to update it with new settings */
3226 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3230 if (netdev->flags & IFF_PROMISC) {
3231 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3232 bnad->cfg_flags |= BNAD_CF_PROMISC;
3234 bnad_set_rx_mcast_fltr(bnad);
3236 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3237 new_mode |= BNA_RXMODE_ALLMULTI;
3239 bnad_set_rx_ucast_fltr(bnad);
3241 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3242 new_mode |= BNA_RXMODE_DEFAULT;
3245 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3246 BNA_RXMODE_ALLMULTI;
3247 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3249 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3253 * bna_lock is used to sync writes to netdev->addr
3254 * conf_lock cannot be used since this call may be made
3255 * in a non-blocking context.
3258 bnad_set_mac_address(struct net_device *netdev, void *addr)
3261 struct bnad *bnad = netdev_priv(netdev);
3262 struct sockaddr *sa = (struct sockaddr *)addr;
3263 unsigned long flags;
3265 spin_lock_irqsave(&bnad->bna_lock, flags);
3267 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3269 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3271 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3277 bnad_mtu_set(struct bnad *bnad, int frame_size)
3279 unsigned long flags;
3281 init_completion(&bnad->bnad_completions.mtu_comp);
3283 spin_lock_irqsave(&bnad->bna_lock, flags);
3284 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3285 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3287 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3289 return bnad->bnad_completions.mtu_comp_status;
3293 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3296 struct bnad *bnad = netdev_priv(netdev);
3297 u32 rx_count = 0, frame, new_frame;
3299 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3302 mutex_lock(&bnad->conf_mutex);
3305 netdev->mtu = new_mtu;
3307 frame = BNAD_FRAME_SIZE(mtu);
3308 new_frame = BNAD_FRAME_SIZE(new_mtu);
3310 /* check if multi-buffer needs to be enabled */
3311 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3312 netif_running(bnad->netdev)) {
3313 /* only when transition is over 4K */
3314 if ((frame <= 4096 && new_frame > 4096) ||
3315 (frame > 4096 && new_frame <= 4096))
3316 rx_count = bnad_reinit_rx(bnad);
3319 /* rx_count > 0 - new rx created
3320 * - Linux set err = 0 and return
3322 err = bnad_mtu_set(bnad, new_frame);
3326 mutex_unlock(&bnad->conf_mutex);
3331 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3333 struct bnad *bnad = netdev_priv(netdev);
3334 unsigned long flags;
3336 if (!bnad->rx_info[0].rx)
3339 mutex_lock(&bnad->conf_mutex);
3341 spin_lock_irqsave(&bnad->bna_lock, flags);
3342 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3343 set_bit(vid, bnad->active_vlans);
3344 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3346 mutex_unlock(&bnad->conf_mutex);
3352 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3354 struct bnad *bnad = netdev_priv(netdev);
3355 unsigned long flags;
3357 if (!bnad->rx_info[0].rx)
3360 mutex_lock(&bnad->conf_mutex);
3362 spin_lock_irqsave(&bnad->bna_lock, flags);
3363 clear_bit(vid, bnad->active_vlans);
3364 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3365 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3367 mutex_unlock(&bnad->conf_mutex);
3372 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3374 struct bnad *bnad = netdev_priv(dev);
3375 netdev_features_t changed = features ^ dev->features;
3377 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3378 unsigned long flags;
3380 spin_lock_irqsave(&bnad->bna_lock, flags);
3382 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3383 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3385 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3387 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3393 #ifdef CONFIG_NET_POLL_CONTROLLER
3395 bnad_netpoll(struct net_device *netdev)
3397 struct bnad *bnad = netdev_priv(netdev);
3398 struct bnad_rx_info *rx_info;
3399 struct bnad_rx_ctrl *rx_ctrl;
3403 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3404 bna_intx_disable(&bnad->bna, curr_mask);
3405 bnad_isr(bnad->pcidev->irq, netdev);
3406 bna_intx_enable(&bnad->bna, curr_mask);
3409 * Tx processing may happen in sending context, so no need
3410 * to explicitly process completions here
3414 for (i = 0; i < bnad->num_rx; i++) {
3415 rx_info = &bnad->rx_info[i];
3418 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3419 rx_ctrl = &rx_info->rx_ctrl[j];
3421 bnad_netif_rx_schedule_poll(bnad,
3429 static const struct net_device_ops bnad_netdev_ops = {
3430 .ndo_open = bnad_open,
3431 .ndo_stop = bnad_stop,
3432 .ndo_start_xmit = bnad_start_xmit,
3433 .ndo_get_stats64 = bnad_get_stats64,
3434 .ndo_set_rx_mode = bnad_set_rx_mode,
3435 .ndo_validate_addr = eth_validate_addr,
3436 .ndo_set_mac_address = bnad_set_mac_address,
3437 .ndo_change_mtu = bnad_change_mtu,
3438 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3439 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3440 .ndo_set_features = bnad_set_features,
3441 #ifdef CONFIG_NET_POLL_CONTROLLER
3442 .ndo_poll_controller = bnad_netpoll
3447 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3449 struct net_device *netdev = bnad->netdev;
3451 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3452 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3453 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3454 NETIF_F_HW_VLAN_CTAG_RX;
3456 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3457 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3458 NETIF_F_TSO | NETIF_F_TSO6;
3460 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3463 netdev->features |= NETIF_F_HIGHDMA;
3465 netdev->mem_start = bnad->mmio_start;
3466 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3468 netdev->netdev_ops = &bnad_netdev_ops;
3469 bnad_set_ethtool_ops(netdev);
3473 * 1. Initialize the bnad structure
3474 * 2. Setup netdev pointer in pci_dev
3475 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3476 * 4. Initialize work queue.
3479 bnad_init(struct bnad *bnad,
3480 struct pci_dev *pdev, struct net_device *netdev)
3482 unsigned long flags;
3484 SET_NETDEV_DEV(netdev, &pdev->dev);
3485 pci_set_drvdata(pdev, netdev);
3487 bnad->netdev = netdev;
3488 bnad->pcidev = pdev;
3489 bnad->mmio_start = pci_resource_start(pdev, 0);
3490 bnad->mmio_len = pci_resource_len(pdev, 0);
3491 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3493 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3496 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3497 (unsigned long long) bnad->mmio_len);
3499 spin_lock_irqsave(&bnad->bna_lock, flags);
3500 if (!bnad_msix_disable)
3501 bnad->cfg_flags = BNAD_CF_MSIX;
3503 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3505 bnad_q_num_init(bnad);
3506 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3508 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3509 (bnad->num_rx * bnad->num_rxp_per_rx) +
3510 BNAD_MAILBOX_MSIX_VECTORS;
3512 bnad->txq_depth = BNAD_TXQ_DEPTH;
3513 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3515 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3516 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3518 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3519 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3520 if (!bnad->work_q) {
3521 iounmap(bnad->bar0);
3529 * Must be called after bnad_pci_uninit()
3530 * so that iounmap() and pci_set_drvdata(NULL)
3531 * happens only after PCI uninitialization.
3534 bnad_uninit(struct bnad *bnad)
3537 flush_workqueue(bnad->work_q);
3538 destroy_workqueue(bnad->work_q);
3539 bnad->work_q = NULL;
3543 iounmap(bnad->bar0);
3548 a) Per ioceth mutes used for serializing configuration
3549 changes from OS interface
3550 b) spin lock used to protect bna state machine
3553 bnad_lock_init(struct bnad *bnad)
3555 spin_lock_init(&bnad->bna_lock);
3556 mutex_init(&bnad->conf_mutex);
3560 bnad_lock_uninit(struct bnad *bnad)
3562 mutex_destroy(&bnad->conf_mutex);
3565 /* PCI Initialization */
3567 bnad_pci_init(struct bnad *bnad,
3568 struct pci_dev *pdev, bool *using_dac)
3572 err = pci_enable_device(pdev);
3575 err = pci_request_regions(pdev, BNAD_NAME);
3577 goto disable_device;
3578 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3581 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3583 goto release_regions;
3586 pci_set_master(pdev);
3590 pci_release_regions(pdev);
3592 pci_disable_device(pdev);
3598 bnad_pci_uninit(struct pci_dev *pdev)
3600 pci_release_regions(pdev);
3601 pci_disable_device(pdev);
3605 bnad_pci_probe(struct pci_dev *pdev,
3606 const struct pci_device_id *pcidev_id)
3612 struct net_device *netdev;
3613 struct bfa_pcidev pcidev_info;
3614 unsigned long flags;
3616 mutex_lock(&bnad_fwimg_mutex);
3617 if (!cna_get_firmware_buf(pdev)) {
3618 mutex_unlock(&bnad_fwimg_mutex);
3619 dev_err(&pdev->dev, "failed to load firmware image!\n");
3622 mutex_unlock(&bnad_fwimg_mutex);
3625 * Allocates sizeof(struct net_device + struct bnad)
3626 * bnad = netdev->priv
3628 netdev = alloc_etherdev(sizeof(struct bnad));
3633 bnad = netdev_priv(netdev);
3634 bnad_lock_init(bnad);
3635 bnad->id = atomic_inc_return(&bna_id) - 1;
3637 mutex_lock(&bnad->conf_mutex);
3639 * PCI initialization
3640 * Output : using_dac = 1 for 64 bit DMA
3641 * = 0 for 32 bit DMA
3644 err = bnad_pci_init(bnad, pdev, &using_dac);
3649 * Initialize bnad structure
3650 * Setup relation between pci_dev & netdev
3652 err = bnad_init(bnad, pdev, netdev);
3656 /* Initialize netdev structure, set up ethtool ops */
3657 bnad_netdev_init(bnad, using_dac);
3659 /* Set link to down state */
3660 netif_carrier_off(netdev);
3662 /* Setup the debugfs node for this bfad */
3663 if (bna_debugfs_enable)
3664 bnad_debugfs_init(bnad);
3666 /* Get resource requirement form bna */
3667 spin_lock_irqsave(&bnad->bna_lock, flags);
3668 bna_res_req(&bnad->res_info[0]);
3669 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3671 /* Allocate resources from bna */
3672 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3678 /* Setup pcidev_info for bna_init() */
3679 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3680 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3681 pcidev_info.device_id = bnad->pcidev->device;
3682 pcidev_info.pci_bar_kva = bnad->bar0;
3684 spin_lock_irqsave(&bnad->bna_lock, flags);
3685 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3686 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3688 bnad->stats.bna_stats = &bna->stats;
3690 bnad_enable_msix(bnad);
3691 err = bnad_mbox_irq_alloc(bnad);
3696 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3697 (unsigned long)bnad);
3698 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3699 (unsigned long)bnad);
3700 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3701 (unsigned long)bnad);
3702 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3703 (unsigned long)bnad);
3707 * If the call back comes with error, we bail out.
3708 * This is a catastrophic error.
3710 err = bnad_ioceth_enable(bnad);
3712 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3716 spin_lock_irqsave(&bnad->bna_lock, flags);
3717 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3718 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3719 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3720 bna_attr(bna)->num_rxp - 1);
3721 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3722 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3727 goto disable_ioceth;
3729 spin_lock_irqsave(&bnad->bna_lock, flags);
3730 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3731 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3733 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3736 goto disable_ioceth;
3739 spin_lock_irqsave(&bnad->bna_lock, flags);
3740 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3741 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3743 /* Get the burnt-in mac */
3744 spin_lock_irqsave(&bnad->bna_lock, flags);
3745 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3746 bnad_set_netdev_perm_addr(bnad);
3747 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3749 mutex_unlock(&bnad->conf_mutex);
3751 /* Finally, reguister with net_device layer */
3752 err = register_netdev(netdev);
3754 dev_err(&pdev->dev, "registering net device failed\n");
3757 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3762 mutex_unlock(&bnad->conf_mutex);
3766 mutex_lock(&bnad->conf_mutex);
3767 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3769 bnad_ioceth_disable(bnad);
3770 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3771 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3772 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3773 spin_lock_irqsave(&bnad->bna_lock, flags);
3775 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3776 bnad_mbox_irq_free(bnad);
3777 bnad_disable_msix(bnad);
3779 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3781 /* Remove the debugfs node for this bnad */
3782 kfree(bnad->regdata);
3783 bnad_debugfs_uninit(bnad);
3786 bnad_pci_uninit(pdev);
3788 mutex_unlock(&bnad->conf_mutex);
3789 bnad_lock_uninit(bnad);
3790 free_netdev(netdev);
3795 bnad_pci_remove(struct pci_dev *pdev)
3797 struct net_device *netdev = pci_get_drvdata(pdev);
3800 unsigned long flags;
3805 bnad = netdev_priv(netdev);
3808 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3809 unregister_netdev(netdev);
3811 mutex_lock(&bnad->conf_mutex);
3812 bnad_ioceth_disable(bnad);
3813 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3814 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3815 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3816 spin_lock_irqsave(&bnad->bna_lock, flags);
3818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3820 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3821 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3822 bnad_mbox_irq_free(bnad);
3823 bnad_disable_msix(bnad);
3824 bnad_pci_uninit(pdev);
3825 mutex_unlock(&bnad->conf_mutex);
3826 bnad_lock_uninit(bnad);
3827 /* Remove the debugfs node for this bnad */
3828 kfree(bnad->regdata);
3829 bnad_debugfs_uninit(bnad);
3831 free_netdev(netdev);
3834 static const struct pci_device_id bnad_pci_id_table[] = {
3836 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3837 PCI_DEVICE_ID_BROCADE_CT),
3838 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3839 .class_mask = 0xffff00
3842 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3843 BFA_PCI_DEVICE_ID_CT2),
3844 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3845 .class_mask = 0xffff00
3850 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3852 static struct pci_driver bnad_pci_driver = {
3854 .id_table = bnad_pci_id_table,
3855 .probe = bnad_pci_probe,
3856 .remove = bnad_pci_remove,
3860 bnad_module_init(void)
3864 pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3867 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3869 err = pci_register_driver(&bnad_pci_driver);
3871 pr_err("bna: PCI driver registration failed err=%d\n", err);
3879 bnad_module_exit(void)
3881 pci_unregister_driver(&bnad_pci_driver);
3882 release_firmware(bfi_fw);
3885 module_init(bnad_module_init);
3886 module_exit(bnad_module_exit);
3888 MODULE_AUTHOR("Brocade");
3889 MODULE_LICENSE("GPL");
3890 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3891 MODULE_VERSION(BNAD_VERSION);
3892 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3893 MODULE_FIRMWARE(CNA_FW_FILE_CT2);