2c3c7950bfeaecc5cfba7a34069ae9da03e91cd9
[cascardo/linux.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/module.h>
11
12 #include <linux/stringify.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/errno.h>
16 #include <linux/ioport.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/bitops.h>
26 #include <linux/io.h>
27 #include <linux/irq.h>
28 #include <linux/delay.h>
29 #include <asm/byteorder.h>
30 #include <asm/page.h>
31 #include <linux/time.h>
32 #include <linux/mii.h>
33 #include <linux/if.h>
34 #include <linux/if_vlan.h>
35 #include <net/ip.h>
36 #include <net/tcp.h>
37 #include <net/udp.h>
38 #include <net/checksum.h>
39 #include <net/ip6_checksum.h>
40 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
41 #include <net/vxlan.h>
42 #endif
43 #ifdef CONFIG_NET_RX_BUSY_POLL
44 #include <net/busy_poll.h>
45 #endif
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53
54 #include "bnxt_hsi.h"
55 #include "bnxt.h"
56 #include "bnxt_sriov.h"
57 #include "bnxt_ethtool.h"
58
59 #define BNXT_TX_TIMEOUT         (5 * HZ)
60
61 static const char version[] =
62         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
63
64 MODULE_LICENSE("GPL");
65 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
66 MODULE_VERSION(DRV_MODULE_VERSION);
67
68 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
69 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70 #define BNXT_RX_COPY_THRESH 256
71
72 #define BNXT_TX_PUSH_THRESH 164
73
74 enum board_idx {
75         BCM57301,
76         BCM57302,
77         BCM57304,
78         BCM57402,
79         BCM57404,
80         BCM57406,
81         BCM57304_VF,
82         BCM57404_VF,
83 };
84
85 /* indexed by enum above */
86 static const struct {
87         char *name;
88 } board_info[] = {
89         { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
90         { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
91         { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
92         { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
93         { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
94         { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
95         { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
96         { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
97 };
98
99 static const struct pci_device_id bnxt_pci_tbl[] = {
100         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
101         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
102         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
103         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
104         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
105         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
106 #ifdef CONFIG_BNXT_SRIOV
107         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
108         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
109 #endif
110         { 0 }
111 };
112
113 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
114
115 static const u16 bnxt_vf_req_snif[] = {
116         HWRM_FUNC_CFG,
117         HWRM_PORT_PHY_QCFG,
118         HWRM_CFA_L2_FILTER_ALLOC,
119 };
120
121 static bool bnxt_vf_pciid(enum board_idx idx)
122 {
123         return (idx == BCM57304_VF || idx == BCM57404_VF);
124 }
125
126 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
127 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
128 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
129
130 #define BNXT_CP_DB_REARM(db, raw_cons)                                  \
131                 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
132
133 #define BNXT_CP_DB(db, raw_cons)                                        \
134                 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
135
136 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
137                 writel(DB_CP_IRQ_DIS_FLAGS, db)
138
139 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
140 {
141         /* Tell compiler to fetch tx indices from memory. */
142         barrier();
143
144         return bp->tx_ring_size -
145                 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
146 }
147
148 static const u16 bnxt_lhint_arr[] = {
149         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
150         TX_BD_FLAGS_LHINT_512_TO_1023,
151         TX_BD_FLAGS_LHINT_1024_TO_2047,
152         TX_BD_FLAGS_LHINT_1024_TO_2047,
153         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
154         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
155         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
156         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
157         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
158         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
159         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
160         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
161         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
162         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
163         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
164         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
165         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
166         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
167         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
168 };
169
170 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
171 {
172         struct bnxt *bp = netdev_priv(dev);
173         struct tx_bd *txbd;
174         struct tx_bd_ext *txbd1;
175         struct netdev_queue *txq;
176         int i;
177         dma_addr_t mapping;
178         unsigned int length, pad = 0;
179         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
180         u16 prod, last_frag;
181         struct pci_dev *pdev = bp->pdev;
182         struct bnxt_tx_ring_info *txr;
183         struct bnxt_sw_tx_bd *tx_buf;
184
185         i = skb_get_queue_mapping(skb);
186         if (unlikely(i >= bp->tx_nr_rings)) {
187                 dev_kfree_skb_any(skb);
188                 return NETDEV_TX_OK;
189         }
190
191         txr = &bp->tx_ring[i];
192         txq = netdev_get_tx_queue(dev, i);
193         prod = txr->tx_prod;
194
195         free_size = bnxt_tx_avail(bp, txr);
196         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
197                 netif_tx_stop_queue(txq);
198                 return NETDEV_TX_BUSY;
199         }
200
201         length = skb->len;
202         len = skb_headlen(skb);
203         last_frag = skb_shinfo(skb)->nr_frags;
204
205         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
206
207         txbd->tx_bd_opaque = prod;
208
209         tx_buf = &txr->tx_buf_ring[prod];
210         tx_buf->skb = skb;
211         tx_buf->nr_frags = last_frag;
212
213         vlan_tag_flags = 0;
214         cfa_action = 0;
215         if (skb_vlan_tag_present(skb)) {
216                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
217                                  skb_vlan_tag_get(skb);
218                 /* Currently supports 8021Q, 8021AD vlan offloads
219                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
220                  */
221                 if (skb->vlan_proto == htons(ETH_P_8021Q))
222                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
223         }
224
225         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
226                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
227                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
228                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
229                 void *pdata = tx_push_buf->data;
230                 u64 *end;
231                 int j, push_len;
232
233                 /* Set COAL_NOW to be ready quickly for the next push */
234                 tx_push->tx_bd_len_flags_type =
235                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
236                                         TX_BD_TYPE_LONG_TX_BD |
237                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
238                                         TX_BD_FLAGS_COAL_NOW |
239                                         TX_BD_FLAGS_PACKET_END |
240                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
241
242                 if (skb->ip_summed == CHECKSUM_PARTIAL)
243                         tx_push1->tx_bd_hsize_lflags =
244                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
245                 else
246                         tx_push1->tx_bd_hsize_lflags = 0;
247
248                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
249                 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
250
251                 end = pdata + length;
252                 end = PTR_ALIGN(end, 8) - 1;
253                 *end = 0;
254
255                 skb_copy_from_linear_data(skb, pdata, len);
256                 pdata += len;
257                 for (j = 0; j < last_frag; j++) {
258                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
259                         void *fptr;
260
261                         fptr = skb_frag_address_safe(frag);
262                         if (!fptr)
263                                 goto normal_tx;
264
265                         memcpy(pdata, fptr, skb_frag_size(frag));
266                         pdata += skb_frag_size(frag);
267                 }
268
269                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
270                 txbd->tx_bd_haddr = txr->data_mapping;
271                 prod = NEXT_TX(prod);
272                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
273                 memcpy(txbd, tx_push1, sizeof(*txbd));
274                 prod = NEXT_TX(prod);
275                 tx_push->doorbell =
276                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
277                 txr->tx_prod = prod;
278
279                 netdev_tx_sent_queue(txq, skb->len);
280
281                 push_len = (length + sizeof(*tx_push) + 7) / 8;
282                 if (push_len > 16) {
283                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
284                         __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
285                                          push_len - 16);
286                 } else {
287                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
288                                          push_len);
289                 }
290
291                 tx_buf->is_push = 1;
292                 goto tx_done;
293         }
294
295 normal_tx:
296         if (length < BNXT_MIN_PKT_SIZE) {
297                 pad = BNXT_MIN_PKT_SIZE - length;
298                 if (skb_pad(skb, pad)) {
299                         /* SKB already freed. */
300                         tx_buf->skb = NULL;
301                         return NETDEV_TX_OK;
302                 }
303                 length = BNXT_MIN_PKT_SIZE;
304         }
305
306         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
307
308         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
309                 dev_kfree_skb_any(skb);
310                 tx_buf->skb = NULL;
311                 return NETDEV_TX_OK;
312         }
313
314         dma_unmap_addr_set(tx_buf, mapping, mapping);
315         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
316                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
317
318         txbd->tx_bd_haddr = cpu_to_le64(mapping);
319
320         prod = NEXT_TX(prod);
321         txbd1 = (struct tx_bd_ext *)
322                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
323
324         txbd1->tx_bd_hsize_lflags = 0;
325         if (skb_is_gso(skb)) {
326                 u32 hdr_len;
327
328                 if (skb->encapsulation)
329                         hdr_len = skb_inner_network_offset(skb) +
330                                 skb_inner_network_header_len(skb) +
331                                 inner_tcp_hdrlen(skb);
332                 else
333                         hdr_len = skb_transport_offset(skb) +
334                                 tcp_hdrlen(skb);
335
336                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
337                                         TX_BD_FLAGS_T_IPID |
338                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
339                 length = skb_shinfo(skb)->gso_size;
340                 txbd1->tx_bd_mss = cpu_to_le32(length);
341                 length += hdr_len;
342         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
343                 txbd1->tx_bd_hsize_lflags =
344                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
345                 txbd1->tx_bd_mss = 0;
346         }
347
348         length >>= 9;
349         flags |= bnxt_lhint_arr[length];
350         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
351
352         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
353         txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
354         for (i = 0; i < last_frag; i++) {
355                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
356
357                 prod = NEXT_TX(prod);
358                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
359
360                 len = skb_frag_size(frag);
361                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
362                                            DMA_TO_DEVICE);
363
364                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
365                         goto tx_dma_error;
366
367                 tx_buf = &txr->tx_buf_ring[prod];
368                 dma_unmap_addr_set(tx_buf, mapping, mapping);
369
370                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
371
372                 flags = len << TX_BD_LEN_SHIFT;
373                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
374         }
375
376         flags &= ~TX_BD_LEN;
377         txbd->tx_bd_len_flags_type =
378                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
379                             TX_BD_FLAGS_PACKET_END);
380
381         netdev_tx_sent_queue(txq, skb->len);
382
383         /* Sync BD data before updating doorbell */
384         wmb();
385
386         prod = NEXT_TX(prod);
387         txr->tx_prod = prod;
388
389         writel(DB_KEY_TX | prod, txr->tx_doorbell);
390         writel(DB_KEY_TX | prod, txr->tx_doorbell);
391
392 tx_done:
393
394         mmiowb();
395
396         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
397                 netif_tx_stop_queue(txq);
398
399                 /* netif_tx_stop_queue() must be done before checking
400                  * tx index in bnxt_tx_avail() below, because in
401                  * bnxt_tx_int(), we update tx index before checking for
402                  * netif_tx_queue_stopped().
403                  */
404                 smp_mb();
405                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
406                         netif_tx_wake_queue(txq);
407         }
408         return NETDEV_TX_OK;
409
410 tx_dma_error:
411         last_frag = i;
412
413         /* start back at beginning and unmap skb */
414         prod = txr->tx_prod;
415         tx_buf = &txr->tx_buf_ring[prod];
416         tx_buf->skb = NULL;
417         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
418                          skb_headlen(skb), PCI_DMA_TODEVICE);
419         prod = NEXT_TX(prod);
420
421         /* unmap remaining mapped pages */
422         for (i = 0; i < last_frag; i++) {
423                 prod = NEXT_TX(prod);
424                 tx_buf = &txr->tx_buf_ring[prod];
425                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
426                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
427                                PCI_DMA_TODEVICE);
428         }
429
430         dev_kfree_skb_any(skb);
431         return NETDEV_TX_OK;
432 }
433
434 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
435 {
436         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
437         int index = txr - &bp->tx_ring[0];
438         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
439         u16 cons = txr->tx_cons;
440         struct pci_dev *pdev = bp->pdev;
441         int i;
442         unsigned int tx_bytes = 0;
443
444         for (i = 0; i < nr_pkts; i++) {
445                 struct bnxt_sw_tx_bd *tx_buf;
446                 struct sk_buff *skb;
447                 int j, last;
448
449                 tx_buf = &txr->tx_buf_ring[cons];
450                 cons = NEXT_TX(cons);
451                 skb = tx_buf->skb;
452                 tx_buf->skb = NULL;
453
454                 if (tx_buf->is_push) {
455                         tx_buf->is_push = 0;
456                         goto next_tx_int;
457                 }
458
459                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
460                                  skb_headlen(skb), PCI_DMA_TODEVICE);
461                 last = tx_buf->nr_frags;
462
463                 for (j = 0; j < last; j++) {
464                         cons = NEXT_TX(cons);
465                         tx_buf = &txr->tx_buf_ring[cons];
466                         dma_unmap_page(
467                                 &pdev->dev,
468                                 dma_unmap_addr(tx_buf, mapping),
469                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
470                                 PCI_DMA_TODEVICE);
471                 }
472
473 next_tx_int:
474                 cons = NEXT_TX(cons);
475
476                 tx_bytes += skb->len;
477                 dev_kfree_skb_any(skb);
478         }
479
480         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
481         txr->tx_cons = cons;
482
483         /* Need to make the tx_cons update visible to bnxt_start_xmit()
484          * before checking for netif_tx_queue_stopped().  Without the
485          * memory barrier, there is a small possibility that bnxt_start_xmit()
486          * will miss it and cause the queue to be stopped forever.
487          */
488         smp_mb();
489
490         if (unlikely(netif_tx_queue_stopped(txq)) &&
491             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
492                 __netif_tx_lock(txq, smp_processor_id());
493                 if (netif_tx_queue_stopped(txq) &&
494                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
495                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
496                         netif_tx_wake_queue(txq);
497                 __netif_tx_unlock(txq);
498         }
499 }
500
501 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
502                                        gfp_t gfp)
503 {
504         u8 *data;
505         struct pci_dev *pdev = bp->pdev;
506
507         data = kmalloc(bp->rx_buf_size, gfp);
508         if (!data)
509                 return NULL;
510
511         *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
512                                   bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
513
514         if (dma_mapping_error(&pdev->dev, *mapping)) {
515                 kfree(data);
516                 data = NULL;
517         }
518         return data;
519 }
520
521 static inline int bnxt_alloc_rx_data(struct bnxt *bp,
522                                      struct bnxt_rx_ring_info *rxr,
523                                      u16 prod, gfp_t gfp)
524 {
525         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
526         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
527         u8 *data;
528         dma_addr_t mapping;
529
530         data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
531         if (!data)
532                 return -ENOMEM;
533
534         rx_buf->data = data;
535         dma_unmap_addr_set(rx_buf, mapping, mapping);
536
537         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
538
539         return 0;
540 }
541
542 static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
543                                u8 *data)
544 {
545         u16 prod = rxr->rx_prod;
546         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
547         struct rx_bd *cons_bd, *prod_bd;
548
549         prod_rx_buf = &rxr->rx_buf_ring[prod];
550         cons_rx_buf = &rxr->rx_buf_ring[cons];
551
552         prod_rx_buf->data = data;
553
554         dma_unmap_addr_set(prod_rx_buf, mapping,
555                            dma_unmap_addr(cons_rx_buf, mapping));
556
557         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
558         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
559
560         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
561 }
562
563 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
564 {
565         u16 next, max = rxr->rx_agg_bmap_size;
566
567         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
568         if (next >= max)
569                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
570         return next;
571 }
572
573 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
574                                      struct bnxt_rx_ring_info *rxr,
575                                      u16 prod, gfp_t gfp)
576 {
577         struct rx_bd *rxbd =
578                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
579         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
580         struct pci_dev *pdev = bp->pdev;
581         struct page *page;
582         dma_addr_t mapping;
583         u16 sw_prod = rxr->rx_sw_agg_prod;
584
585         page = alloc_page(gfp);
586         if (!page)
587                 return -ENOMEM;
588
589         mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
590                                PCI_DMA_FROMDEVICE);
591         if (dma_mapping_error(&pdev->dev, mapping)) {
592                 __free_page(page);
593                 return -EIO;
594         }
595
596         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
597                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
598
599         __set_bit(sw_prod, rxr->rx_agg_bmap);
600         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
601         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
602
603         rx_agg_buf->page = page;
604         rx_agg_buf->mapping = mapping;
605         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
606         rxbd->rx_bd_opaque = sw_prod;
607         return 0;
608 }
609
610 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
611                                    u32 agg_bufs)
612 {
613         struct bnxt *bp = bnapi->bp;
614         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
615         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
616         u16 prod = rxr->rx_agg_prod;
617         u16 sw_prod = rxr->rx_sw_agg_prod;
618         u32 i;
619
620         for (i = 0; i < agg_bufs; i++) {
621                 u16 cons;
622                 struct rx_agg_cmp *agg;
623                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
624                 struct rx_bd *prod_bd;
625                 struct page *page;
626
627                 agg = (struct rx_agg_cmp *)
628                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
629                 cons = agg->rx_agg_cmp_opaque;
630                 __clear_bit(cons, rxr->rx_agg_bmap);
631
632                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
633                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
634
635                 __set_bit(sw_prod, rxr->rx_agg_bmap);
636                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
637                 cons_rx_buf = &rxr->rx_agg_ring[cons];
638
639                 /* It is possible for sw_prod to be equal to cons, so
640                  * set cons_rx_buf->page to NULL first.
641                  */
642                 page = cons_rx_buf->page;
643                 cons_rx_buf->page = NULL;
644                 prod_rx_buf->page = page;
645
646                 prod_rx_buf->mapping = cons_rx_buf->mapping;
647
648                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
649
650                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
651                 prod_bd->rx_bd_opaque = sw_prod;
652
653                 prod = NEXT_RX_AGG(prod);
654                 sw_prod = NEXT_RX_AGG(sw_prod);
655                 cp_cons = NEXT_CMP(cp_cons);
656         }
657         rxr->rx_agg_prod = prod;
658         rxr->rx_sw_agg_prod = sw_prod;
659 }
660
661 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
662                                    struct bnxt_rx_ring_info *rxr, u16 cons,
663                                    u16 prod, u8 *data, dma_addr_t dma_addr,
664                                    unsigned int len)
665 {
666         int err;
667         struct sk_buff *skb;
668
669         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
670         if (unlikely(err)) {
671                 bnxt_reuse_rx_data(rxr, cons, data);
672                 return NULL;
673         }
674
675         skb = build_skb(data, 0);
676         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
677                          PCI_DMA_FROMDEVICE);
678         if (!skb) {
679                 kfree(data);
680                 return NULL;
681         }
682
683         skb_reserve(skb, BNXT_RX_OFFSET);
684         skb_put(skb, len);
685         return skb;
686 }
687
688 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
689                                      struct sk_buff *skb, u16 cp_cons,
690                                      u32 agg_bufs)
691 {
692         struct pci_dev *pdev = bp->pdev;
693         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
694         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
695         u16 prod = rxr->rx_agg_prod;
696         u32 i;
697
698         for (i = 0; i < agg_bufs; i++) {
699                 u16 cons, frag_len;
700                 struct rx_agg_cmp *agg;
701                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
702                 struct page *page;
703                 dma_addr_t mapping;
704
705                 agg = (struct rx_agg_cmp *)
706                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
707                 cons = agg->rx_agg_cmp_opaque;
708                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
709                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
710
711                 cons_rx_buf = &rxr->rx_agg_ring[cons];
712                 skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
713                 __clear_bit(cons, rxr->rx_agg_bmap);
714
715                 /* It is possible for bnxt_alloc_rx_page() to allocate
716                  * a sw_prod index that equals the cons index, so we
717                  * need to clear the cons entry now.
718                  */
719                 mapping = dma_unmap_addr(cons_rx_buf, mapping);
720                 page = cons_rx_buf->page;
721                 cons_rx_buf->page = NULL;
722
723                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
724                         struct skb_shared_info *shinfo;
725                         unsigned int nr_frags;
726
727                         shinfo = skb_shinfo(skb);
728                         nr_frags = --shinfo->nr_frags;
729                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
730
731                         dev_kfree_skb(skb);
732
733                         cons_rx_buf->page = page;
734
735                         /* Update prod since possibly some pages have been
736                          * allocated already.
737                          */
738                         rxr->rx_agg_prod = prod;
739                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
740                         return NULL;
741                 }
742
743                 dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
744                                PCI_DMA_FROMDEVICE);
745
746                 skb->data_len += frag_len;
747                 skb->len += frag_len;
748                 skb->truesize += PAGE_SIZE;
749
750                 prod = NEXT_RX_AGG(prod);
751                 cp_cons = NEXT_CMP(cp_cons);
752         }
753         rxr->rx_agg_prod = prod;
754         return skb;
755 }
756
757 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
758                                u8 agg_bufs, u32 *raw_cons)
759 {
760         u16 last;
761         struct rx_agg_cmp *agg;
762
763         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
764         last = RING_CMP(*raw_cons);
765         agg = (struct rx_agg_cmp *)
766                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
767         return RX_AGG_CMP_VALID(agg, *raw_cons);
768 }
769
770 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
771                                             unsigned int len,
772                                             dma_addr_t mapping)
773 {
774         struct bnxt *bp = bnapi->bp;
775         struct pci_dev *pdev = bp->pdev;
776         struct sk_buff *skb;
777
778         skb = napi_alloc_skb(&bnapi->napi, len);
779         if (!skb)
780                 return NULL;
781
782         dma_sync_single_for_cpu(&pdev->dev, mapping,
783                                 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
784
785         memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
786
787         dma_sync_single_for_device(&pdev->dev, mapping,
788                                    bp->rx_copy_thresh,
789                                    PCI_DMA_FROMDEVICE);
790
791         skb_put(skb, len);
792         return skb;
793 }
794
795 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
796                            struct rx_tpa_start_cmp *tpa_start,
797                            struct rx_tpa_start_cmp_ext *tpa_start1)
798 {
799         u8 agg_id = TPA_START_AGG_ID(tpa_start);
800         u16 cons, prod;
801         struct bnxt_tpa_info *tpa_info;
802         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
803         struct rx_bd *prod_bd;
804         dma_addr_t mapping;
805
806         cons = tpa_start->rx_tpa_start_cmp_opaque;
807         prod = rxr->rx_prod;
808         cons_rx_buf = &rxr->rx_buf_ring[cons];
809         prod_rx_buf = &rxr->rx_buf_ring[prod];
810         tpa_info = &rxr->rx_tpa[agg_id];
811
812         prod_rx_buf->data = tpa_info->data;
813
814         mapping = tpa_info->mapping;
815         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
816
817         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
818
819         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
820
821         tpa_info->data = cons_rx_buf->data;
822         cons_rx_buf->data = NULL;
823         tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
824
825         tpa_info->len =
826                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
827                                 RX_TPA_START_CMP_LEN_SHIFT;
828         if (likely(TPA_START_HASH_VALID(tpa_start))) {
829                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
830
831                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
832                 tpa_info->gso_type = SKB_GSO_TCPV4;
833                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
834                 if (hash_type == 3)
835                         tpa_info->gso_type = SKB_GSO_TCPV6;
836                 tpa_info->rss_hash =
837                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
838         } else {
839                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
840                 tpa_info->gso_type = 0;
841                 if (netif_msg_rx_err(bp))
842                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
843         }
844         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
845         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
846
847         rxr->rx_prod = NEXT_RX(prod);
848         cons = NEXT_RX(cons);
849         cons_rx_buf = &rxr->rx_buf_ring[cons];
850
851         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
852         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
853         cons_rx_buf->data = NULL;
854 }
855
856 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
857                            u16 cp_cons, u32 agg_bufs)
858 {
859         if (agg_bufs)
860                 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
861 }
862
863 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
864 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
865
866 static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
867                                            struct rx_tpa_end_cmp *tpa_end,
868                                            struct rx_tpa_end_cmp_ext *tpa_end1,
869                                            struct sk_buff *skb)
870 {
871 #ifdef CONFIG_INET
872         struct tcphdr *th;
873         int payload_off, tcp_opt_len = 0;
874         int len, nw_off;
875         u16 segs;
876
877         segs = TPA_END_TPA_SEGS(tpa_end);
878         if (segs == 1)
879                 return skb;
880
881         NAPI_GRO_CB(skb)->count = segs;
882         skb_shinfo(skb)->gso_size =
883                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
884         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
885         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
886                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
887                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
888         if (TPA_END_GRO_TS(tpa_end))
889                 tcp_opt_len = 12;
890
891         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
892                 struct iphdr *iph;
893
894                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
895                          ETH_HLEN;
896                 skb_set_network_header(skb, nw_off);
897                 iph = ip_hdr(skb);
898                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
899                 len = skb->len - skb_transport_offset(skb);
900                 th = tcp_hdr(skb);
901                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
902         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
903                 struct ipv6hdr *iph;
904
905                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
906                          ETH_HLEN;
907                 skb_set_network_header(skb, nw_off);
908                 iph = ipv6_hdr(skb);
909                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
910                 len = skb->len - skb_transport_offset(skb);
911                 th = tcp_hdr(skb);
912                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
913         } else {
914                 dev_kfree_skb_any(skb);
915                 return NULL;
916         }
917         tcp_gro_complete(skb);
918
919         if (nw_off) { /* tunnel */
920                 struct udphdr *uh = NULL;
921
922                 if (skb->protocol == htons(ETH_P_IP)) {
923                         struct iphdr *iph = (struct iphdr *)skb->data;
924
925                         if (iph->protocol == IPPROTO_UDP)
926                                 uh = (struct udphdr *)(iph + 1);
927                 } else {
928                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
929
930                         if (iph->nexthdr == IPPROTO_UDP)
931                                 uh = (struct udphdr *)(iph + 1);
932                 }
933                 if (uh) {
934                         if (uh->check)
935                                 skb_shinfo(skb)->gso_type |=
936                                         SKB_GSO_UDP_TUNNEL_CSUM;
937                         else
938                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
939                 }
940         }
941 #endif
942         return skb;
943 }
944
945 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
946                                            struct bnxt_napi *bnapi,
947                                            u32 *raw_cons,
948                                            struct rx_tpa_end_cmp *tpa_end,
949                                            struct rx_tpa_end_cmp_ext *tpa_end1,
950                                            bool *agg_event)
951 {
952         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
953         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
954         u8 agg_id = TPA_END_AGG_ID(tpa_end);
955         u8 *data, agg_bufs;
956         u16 cp_cons = RING_CMP(*raw_cons);
957         unsigned int len;
958         struct bnxt_tpa_info *tpa_info;
959         dma_addr_t mapping;
960         struct sk_buff *skb;
961
962         tpa_info = &rxr->rx_tpa[agg_id];
963         data = tpa_info->data;
964         prefetch(data);
965         len = tpa_info->len;
966         mapping = tpa_info->mapping;
967
968         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
969                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
970
971         if (agg_bufs) {
972                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
973                         return ERR_PTR(-EBUSY);
974
975                 *agg_event = true;
976                 cp_cons = NEXT_CMP(cp_cons);
977         }
978
979         if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
980                 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
981                 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
982                             agg_bufs, (int)MAX_SKB_FRAGS);
983                 return NULL;
984         }
985
986         if (len <= bp->rx_copy_thresh) {
987                 skb = bnxt_copy_skb(bnapi, data, len, mapping);
988                 if (!skb) {
989                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
990                         return NULL;
991                 }
992         } else {
993                 u8 *new_data;
994                 dma_addr_t new_mapping;
995
996                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
997                 if (!new_data) {
998                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
999                         return NULL;
1000                 }
1001
1002                 tpa_info->data = new_data;
1003                 tpa_info->mapping = new_mapping;
1004
1005                 skb = build_skb(data, 0);
1006                 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1007                                  PCI_DMA_FROMDEVICE);
1008
1009                 if (!skb) {
1010                         kfree(data);
1011                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1012                         return NULL;
1013                 }
1014                 skb_reserve(skb, BNXT_RX_OFFSET);
1015                 skb_put(skb, len);
1016         }
1017
1018         if (agg_bufs) {
1019                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1020                 if (!skb) {
1021                         /* Page reuse already handled by bnxt_rx_pages(). */
1022                         return NULL;
1023                 }
1024         }
1025         skb->protocol = eth_type_trans(skb, bp->dev);
1026
1027         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1028                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1029
1030         if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1031                 netdev_features_t features = skb->dev->features;
1032                 u16 vlan_proto = tpa_info->metadata >>
1033                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1034
1035                 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1036                      vlan_proto == ETH_P_8021Q) ||
1037                     ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1038                      vlan_proto == ETH_P_8021AD)) {
1039                         __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1040                                                tpa_info->metadata &
1041                                                RX_CMP_FLAGS2_METADATA_VID_MASK);
1042                 }
1043         }
1044
1045         skb_checksum_none_assert(skb);
1046         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1047                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1048                 skb->csum_level =
1049                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1050         }
1051
1052         if (TPA_END_GRO(tpa_end))
1053                 skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
1054
1055         return skb;
1056 }
1057
1058 /* returns the following:
1059  * 1       - 1 packet successfully received
1060  * 0       - successful TPA_START, packet not completed yet
1061  * -EBUSY  - completion ring does not have all the agg buffers yet
1062  * -ENOMEM - packet aborted due to out of memory
1063  * -EIO    - packet aborted due to hw error indicated in BD
1064  */
1065 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1066                        bool *agg_event)
1067 {
1068         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1069         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1070         struct net_device *dev = bp->dev;
1071         struct rx_cmp *rxcmp;
1072         struct rx_cmp_ext *rxcmp1;
1073         u32 tmp_raw_cons = *raw_cons;
1074         u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1075         struct bnxt_sw_rx_bd *rx_buf;
1076         unsigned int len;
1077         u8 *data, agg_bufs, cmp_type;
1078         dma_addr_t dma_addr;
1079         struct sk_buff *skb;
1080         int rc = 0;
1081
1082         rxcmp = (struct rx_cmp *)
1083                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1084
1085         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1086         cp_cons = RING_CMP(tmp_raw_cons);
1087         rxcmp1 = (struct rx_cmp_ext *)
1088                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1089
1090         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1091                 return -EBUSY;
1092
1093         cmp_type = RX_CMP_TYPE(rxcmp);
1094
1095         prod = rxr->rx_prod;
1096
1097         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1098                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1099                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1100
1101                 goto next_rx_no_prod;
1102
1103         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1104                 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1105                                    (struct rx_tpa_end_cmp *)rxcmp,
1106                                    (struct rx_tpa_end_cmp_ext *)rxcmp1,
1107                                    agg_event);
1108
1109                 if (unlikely(IS_ERR(skb)))
1110                         return -EBUSY;
1111
1112                 rc = -ENOMEM;
1113                 if (likely(skb)) {
1114                         skb_record_rx_queue(skb, bnapi->index);
1115                         skb_mark_napi_id(skb, &bnapi->napi);
1116                         if (bnxt_busy_polling(bnapi))
1117                                 netif_receive_skb(skb);
1118                         else
1119                                 napi_gro_receive(&bnapi->napi, skb);
1120                         rc = 1;
1121                 }
1122                 goto next_rx_no_prod;
1123         }
1124
1125         cons = rxcmp->rx_cmp_opaque;
1126         rx_buf = &rxr->rx_buf_ring[cons];
1127         data = rx_buf->data;
1128         prefetch(data);
1129
1130         agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1131                                 RX_CMP_AGG_BUFS_SHIFT;
1132
1133         if (agg_bufs) {
1134                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1135                         return -EBUSY;
1136
1137                 cp_cons = NEXT_CMP(cp_cons);
1138                 *agg_event = true;
1139         }
1140
1141         rx_buf->data = NULL;
1142         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1143                 bnxt_reuse_rx_data(rxr, cons, data);
1144                 if (agg_bufs)
1145                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1146
1147                 rc = -EIO;
1148                 goto next_rx;
1149         }
1150
1151         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1152         dma_addr = dma_unmap_addr(rx_buf, mapping);
1153
1154         if (len <= bp->rx_copy_thresh) {
1155                 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1156                 bnxt_reuse_rx_data(rxr, cons, data);
1157                 if (!skb) {
1158                         rc = -ENOMEM;
1159                         goto next_rx;
1160                 }
1161         } else {
1162                 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1163                 if (!skb) {
1164                         rc = -ENOMEM;
1165                         goto next_rx;
1166                 }
1167         }
1168
1169         if (agg_bufs) {
1170                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1171                 if (!skb) {
1172                         rc = -ENOMEM;
1173                         goto next_rx;
1174                 }
1175         }
1176
1177         if (RX_CMP_HASH_VALID(rxcmp)) {
1178                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1179                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1180
1181                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1182                 if (hash_type != 1 && hash_type != 3)
1183                         type = PKT_HASH_TYPE_L3;
1184                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1185         }
1186
1187         skb->protocol = eth_type_trans(skb, dev);
1188
1189         if (rxcmp1->rx_cmp_flags2 &
1190             cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
1191                 netdev_features_t features = skb->dev->features;
1192                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1193                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1194
1195                 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1196                      vlan_proto == ETH_P_8021Q) ||
1197                     ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1198                      vlan_proto == ETH_P_8021AD))
1199                         __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1200                                                meta_data &
1201                                                RX_CMP_FLAGS2_METADATA_VID_MASK);
1202         }
1203
1204         skb_checksum_none_assert(skb);
1205         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1206                 if (dev->features & NETIF_F_RXCSUM) {
1207                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1208                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1209                 }
1210         } else {
1211                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1212                         if (dev->features & NETIF_F_RXCSUM)
1213                                 cpr->rx_l4_csum_errors++;
1214                 }
1215         }
1216
1217         skb_record_rx_queue(skb, bnapi->index);
1218         skb_mark_napi_id(skb, &bnapi->napi);
1219         if (bnxt_busy_polling(bnapi))
1220                 netif_receive_skb(skb);
1221         else
1222                 napi_gro_receive(&bnapi->napi, skb);
1223         rc = 1;
1224
1225 next_rx:
1226         rxr->rx_prod = NEXT_RX(prod);
1227
1228 next_rx_no_prod:
1229         *raw_cons = tmp_raw_cons;
1230
1231         return rc;
1232 }
1233
1234 static int bnxt_async_event_process(struct bnxt *bp,
1235                                     struct hwrm_async_event_cmpl *cmpl)
1236 {
1237         u16 event_id = le16_to_cpu(cmpl->event_id);
1238
1239         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1240         switch (event_id) {
1241         case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1242                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1243                 break;
1244         case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1245                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1246                 break;
1247         default:
1248                 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1249                            event_id);
1250                 goto async_event_process_exit;
1251         }
1252         schedule_work(&bp->sp_task);
1253 async_event_process_exit:
1254         return 0;
1255 }
1256
1257 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1258 {
1259         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1260         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1261         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1262                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1263
1264         switch (cmpl_type) {
1265         case CMPL_BASE_TYPE_HWRM_DONE:
1266                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1267                 if (seq_id == bp->hwrm_intr_seq_id)
1268                         bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1269                 else
1270                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1271                 break;
1272
1273         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1274                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1275
1276                 if ((vf_id < bp->pf.first_vf_id) ||
1277                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1278                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1279                                    vf_id);
1280                         return -EINVAL;
1281                 }
1282
1283                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1284                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1285                 schedule_work(&bp->sp_task);
1286                 break;
1287
1288         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1289                 bnxt_async_event_process(bp,
1290                                          (struct hwrm_async_event_cmpl *)txcmp);
1291
1292         default:
1293                 break;
1294         }
1295
1296         return 0;
1297 }
1298
1299 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1300 {
1301         struct bnxt_napi *bnapi = dev_instance;
1302         struct bnxt *bp = bnapi->bp;
1303         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1304         u32 cons = RING_CMP(cpr->cp_raw_cons);
1305
1306         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1307         napi_schedule(&bnapi->napi);
1308         return IRQ_HANDLED;
1309 }
1310
1311 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1312 {
1313         u32 raw_cons = cpr->cp_raw_cons;
1314         u16 cons = RING_CMP(raw_cons);
1315         struct tx_cmp *txcmp;
1316
1317         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1318
1319         return TX_CMP_VALID(txcmp, raw_cons);
1320 }
1321
1322 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1323 {
1324         struct bnxt_napi *bnapi = dev_instance;
1325         struct bnxt *bp = bnapi->bp;
1326         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1327         u32 cons = RING_CMP(cpr->cp_raw_cons);
1328         u32 int_status;
1329
1330         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1331
1332         if (!bnxt_has_work(bp, cpr)) {
1333                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1334                 /* return if erroneous interrupt */
1335                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1336                         return IRQ_NONE;
1337         }
1338
1339         /* disable ring IRQ */
1340         BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1341
1342         /* Return here if interrupt is shared and is disabled. */
1343         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1344                 return IRQ_HANDLED;
1345
1346         napi_schedule(&bnapi->napi);
1347         return IRQ_HANDLED;
1348 }
1349
1350 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1351 {
1352         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1353         u32 raw_cons = cpr->cp_raw_cons;
1354         u32 cons;
1355         int tx_pkts = 0;
1356         int rx_pkts = 0;
1357         bool rx_event = false;
1358         bool agg_event = false;
1359         struct tx_cmp *txcmp;
1360
1361         while (1) {
1362                 int rc;
1363
1364                 cons = RING_CMP(raw_cons);
1365                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1366
1367                 if (!TX_CMP_VALID(txcmp, raw_cons))
1368                         break;
1369
1370                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1371                         tx_pkts++;
1372                         /* return full budget so NAPI will complete. */
1373                         if (unlikely(tx_pkts > bp->tx_wake_thresh))
1374                                 rx_pkts = budget;
1375                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1376                         rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1377                         if (likely(rc >= 0))
1378                                 rx_pkts += rc;
1379                         else if (rc == -EBUSY)  /* partial completion */
1380                                 break;
1381                         rx_event = true;
1382                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1383                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1384                                     (TX_CMP_TYPE(txcmp) ==
1385                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1386                                     (TX_CMP_TYPE(txcmp) ==
1387                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1388                         bnxt_hwrm_handler(bp, txcmp);
1389                 }
1390                 raw_cons = NEXT_RAW_CMP(raw_cons);
1391
1392                 if (rx_pkts == budget)
1393                         break;
1394         }
1395
1396         cpr->cp_raw_cons = raw_cons;
1397         /* ACK completion ring before freeing tx ring and producing new
1398          * buffers in rx/agg rings to prevent overflowing the completion
1399          * ring.
1400          */
1401         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1402
1403         if (tx_pkts)
1404                 bnxt_tx_int(bp, bnapi, tx_pkts);
1405
1406         if (rx_event) {
1407                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1408
1409                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1410                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1411                 if (agg_event) {
1412                         writel(DB_KEY_RX | rxr->rx_agg_prod,
1413                                rxr->rx_agg_doorbell);
1414                         writel(DB_KEY_RX | rxr->rx_agg_prod,
1415                                rxr->rx_agg_doorbell);
1416                 }
1417         }
1418         return rx_pkts;
1419 }
1420
1421 static int bnxt_poll(struct napi_struct *napi, int budget)
1422 {
1423         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1424         struct bnxt *bp = bnapi->bp;
1425         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1426         int work_done = 0;
1427
1428         if (!bnxt_lock_napi(bnapi))
1429                 return budget;
1430
1431         while (1) {
1432                 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1433
1434                 if (work_done >= budget)
1435                         break;
1436
1437                 if (!bnxt_has_work(bp, cpr)) {
1438                         napi_complete(napi);
1439                         BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1440                         break;
1441                 }
1442         }
1443         mmiowb();
1444         bnxt_unlock_napi(bnapi);
1445         return work_done;
1446 }
1447
1448 #ifdef CONFIG_NET_RX_BUSY_POLL
1449 static int bnxt_busy_poll(struct napi_struct *napi)
1450 {
1451         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1452         struct bnxt *bp = bnapi->bp;
1453         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1454         int rx_work, budget = 4;
1455
1456         if (atomic_read(&bp->intr_sem) != 0)
1457                 return LL_FLUSH_FAILED;
1458
1459         if (!bnxt_lock_poll(bnapi))
1460                 return LL_FLUSH_BUSY;
1461
1462         rx_work = bnxt_poll_work(bp, bnapi, budget);
1463
1464         BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1465
1466         bnxt_unlock_poll(bnapi);
1467         return rx_work;
1468 }
1469 #endif
1470
1471 static void bnxt_free_tx_skbs(struct bnxt *bp)
1472 {
1473         int i, max_idx;
1474         struct pci_dev *pdev = bp->pdev;
1475
1476         if (!bp->tx_ring)
1477                 return;
1478
1479         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1480         for (i = 0; i < bp->tx_nr_rings; i++) {
1481                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1482                 int j;
1483
1484                 for (j = 0; j < max_idx;) {
1485                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1486                         struct sk_buff *skb = tx_buf->skb;
1487                         int k, last;
1488
1489                         if (!skb) {
1490                                 j++;
1491                                 continue;
1492                         }
1493
1494                         tx_buf->skb = NULL;
1495
1496                         if (tx_buf->is_push) {
1497                                 dev_kfree_skb(skb);
1498                                 j += 2;
1499                                 continue;
1500                         }
1501
1502                         dma_unmap_single(&pdev->dev,
1503                                          dma_unmap_addr(tx_buf, mapping),
1504                                          skb_headlen(skb),
1505                                          PCI_DMA_TODEVICE);
1506
1507                         last = tx_buf->nr_frags;
1508                         j += 2;
1509                         for (k = 0; k < last; k++, j++) {
1510                                 int ring_idx = j & bp->tx_ring_mask;
1511                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1512
1513                                 tx_buf = &txr->tx_buf_ring[ring_idx];
1514                                 dma_unmap_page(
1515                                         &pdev->dev,
1516                                         dma_unmap_addr(tx_buf, mapping),
1517                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
1518                         }
1519                         dev_kfree_skb(skb);
1520                 }
1521                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1522         }
1523 }
1524
1525 static void bnxt_free_rx_skbs(struct bnxt *bp)
1526 {
1527         int i, max_idx, max_agg_idx;
1528         struct pci_dev *pdev = bp->pdev;
1529
1530         if (!bp->rx_ring)
1531                 return;
1532
1533         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1534         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1535         for (i = 0; i < bp->rx_nr_rings; i++) {
1536                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1537                 int j;
1538
1539                 if (rxr->rx_tpa) {
1540                         for (j = 0; j < MAX_TPA; j++) {
1541                                 struct bnxt_tpa_info *tpa_info =
1542                                                         &rxr->rx_tpa[j];
1543                                 u8 *data = tpa_info->data;
1544
1545                                 if (!data)
1546                                         continue;
1547
1548                                 dma_unmap_single(
1549                                         &pdev->dev,
1550                                         dma_unmap_addr(tpa_info, mapping),
1551                                         bp->rx_buf_use_size,
1552                                         PCI_DMA_FROMDEVICE);
1553
1554                                 tpa_info->data = NULL;
1555
1556                                 kfree(data);
1557                         }
1558                 }
1559
1560                 for (j = 0; j < max_idx; j++) {
1561                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1562                         u8 *data = rx_buf->data;
1563
1564                         if (!data)
1565                                 continue;
1566
1567                         dma_unmap_single(&pdev->dev,
1568                                          dma_unmap_addr(rx_buf, mapping),
1569                                          bp->rx_buf_use_size,
1570                                          PCI_DMA_FROMDEVICE);
1571
1572                         rx_buf->data = NULL;
1573
1574                         kfree(data);
1575                 }
1576
1577                 for (j = 0; j < max_agg_idx; j++) {
1578                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1579                                 &rxr->rx_agg_ring[j];
1580                         struct page *page = rx_agg_buf->page;
1581
1582                         if (!page)
1583                                 continue;
1584
1585                         dma_unmap_page(&pdev->dev,
1586                                        dma_unmap_addr(rx_agg_buf, mapping),
1587                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
1588
1589                         rx_agg_buf->page = NULL;
1590                         __clear_bit(j, rxr->rx_agg_bmap);
1591
1592                         __free_page(page);
1593                 }
1594         }
1595 }
1596
1597 static void bnxt_free_skbs(struct bnxt *bp)
1598 {
1599         bnxt_free_tx_skbs(bp);
1600         bnxt_free_rx_skbs(bp);
1601 }
1602
1603 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1604 {
1605         struct pci_dev *pdev = bp->pdev;
1606         int i;
1607
1608         for (i = 0; i < ring->nr_pages; i++) {
1609                 if (!ring->pg_arr[i])
1610                         continue;
1611
1612                 dma_free_coherent(&pdev->dev, ring->page_size,
1613                                   ring->pg_arr[i], ring->dma_arr[i]);
1614
1615                 ring->pg_arr[i] = NULL;
1616         }
1617         if (ring->pg_tbl) {
1618                 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1619                                   ring->pg_tbl, ring->pg_tbl_map);
1620                 ring->pg_tbl = NULL;
1621         }
1622         if (ring->vmem_size && *ring->vmem) {
1623                 vfree(*ring->vmem);
1624                 *ring->vmem = NULL;
1625         }
1626 }
1627
1628 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1629 {
1630         int i;
1631         struct pci_dev *pdev = bp->pdev;
1632
1633         if (ring->nr_pages > 1) {
1634                 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1635                                                   ring->nr_pages * 8,
1636                                                   &ring->pg_tbl_map,
1637                                                   GFP_KERNEL);
1638                 if (!ring->pg_tbl)
1639                         return -ENOMEM;
1640         }
1641
1642         for (i = 0; i < ring->nr_pages; i++) {
1643                 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1644                                                      ring->page_size,
1645                                                      &ring->dma_arr[i],
1646                                                      GFP_KERNEL);
1647                 if (!ring->pg_arr[i])
1648                         return -ENOMEM;
1649
1650                 if (ring->nr_pages > 1)
1651                         ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1652         }
1653
1654         if (ring->vmem_size) {
1655                 *ring->vmem = vzalloc(ring->vmem_size);
1656                 if (!(*ring->vmem))
1657                         return -ENOMEM;
1658         }
1659         return 0;
1660 }
1661
1662 static void bnxt_free_rx_rings(struct bnxt *bp)
1663 {
1664         int i;
1665
1666         if (!bp->rx_ring)
1667                 return;
1668
1669         for (i = 0; i < bp->rx_nr_rings; i++) {
1670                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1671                 struct bnxt_ring_struct *ring;
1672
1673                 kfree(rxr->rx_tpa);
1674                 rxr->rx_tpa = NULL;
1675
1676                 kfree(rxr->rx_agg_bmap);
1677                 rxr->rx_agg_bmap = NULL;
1678
1679                 ring = &rxr->rx_ring_struct;
1680                 bnxt_free_ring(bp, ring);
1681
1682                 ring = &rxr->rx_agg_ring_struct;
1683                 bnxt_free_ring(bp, ring);
1684         }
1685 }
1686
1687 static int bnxt_alloc_rx_rings(struct bnxt *bp)
1688 {
1689         int i, rc, agg_rings = 0, tpa_rings = 0;
1690
1691         if (!bp->rx_ring)
1692                 return -ENOMEM;
1693
1694         if (bp->flags & BNXT_FLAG_AGG_RINGS)
1695                 agg_rings = 1;
1696
1697         if (bp->flags & BNXT_FLAG_TPA)
1698                 tpa_rings = 1;
1699
1700         for (i = 0; i < bp->rx_nr_rings; i++) {
1701                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1702                 struct bnxt_ring_struct *ring;
1703
1704                 ring = &rxr->rx_ring_struct;
1705
1706                 rc = bnxt_alloc_ring(bp, ring);
1707                 if (rc)
1708                         return rc;
1709
1710                 if (agg_rings) {
1711                         u16 mem_size;
1712
1713                         ring = &rxr->rx_agg_ring_struct;
1714                         rc = bnxt_alloc_ring(bp, ring);
1715                         if (rc)
1716                                 return rc;
1717
1718                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
1719                         mem_size = rxr->rx_agg_bmap_size / 8;
1720                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
1721                         if (!rxr->rx_agg_bmap)
1722                                 return -ENOMEM;
1723
1724                         if (tpa_rings) {
1725                                 rxr->rx_tpa = kcalloc(MAX_TPA,
1726                                                 sizeof(struct bnxt_tpa_info),
1727                                                 GFP_KERNEL);
1728                                 if (!rxr->rx_tpa)
1729                                         return -ENOMEM;
1730                         }
1731                 }
1732         }
1733         return 0;
1734 }
1735
1736 static void bnxt_free_tx_rings(struct bnxt *bp)
1737 {
1738         int i;
1739         struct pci_dev *pdev = bp->pdev;
1740
1741         if (!bp->tx_ring)
1742                 return;
1743
1744         for (i = 0; i < bp->tx_nr_rings; i++) {
1745                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1746                 struct bnxt_ring_struct *ring;
1747
1748                 if (txr->tx_push) {
1749                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
1750                                           txr->tx_push, txr->tx_push_mapping);
1751                         txr->tx_push = NULL;
1752                 }
1753
1754                 ring = &txr->tx_ring_struct;
1755
1756                 bnxt_free_ring(bp, ring);
1757         }
1758 }
1759
1760 static int bnxt_alloc_tx_rings(struct bnxt *bp)
1761 {
1762         int i, j, rc;
1763         struct pci_dev *pdev = bp->pdev;
1764
1765         bp->tx_push_size = 0;
1766         if (bp->tx_push_thresh) {
1767                 int push_size;
1768
1769                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1770                                         bp->tx_push_thresh);
1771
1772                 if (push_size > 256) {
1773                         push_size = 0;
1774                         bp->tx_push_thresh = 0;
1775                 }
1776
1777                 bp->tx_push_size = push_size;
1778         }
1779
1780         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
1781                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1782                 struct bnxt_ring_struct *ring;
1783
1784                 ring = &txr->tx_ring_struct;
1785
1786                 rc = bnxt_alloc_ring(bp, ring);
1787                 if (rc)
1788                         return rc;
1789
1790                 if (bp->tx_push_size) {
1791                         dma_addr_t mapping;
1792
1793                         /* One pre-allocated DMA buffer to backup
1794                          * TX push operation
1795                          */
1796                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
1797                                                 bp->tx_push_size,
1798                                                 &txr->tx_push_mapping,
1799                                                 GFP_KERNEL);
1800
1801                         if (!txr->tx_push)
1802                                 return -ENOMEM;
1803
1804                         mapping = txr->tx_push_mapping +
1805                                 sizeof(struct tx_push_bd);
1806                         txr->data_mapping = cpu_to_le64(mapping);
1807
1808                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1809                 }
1810                 ring->queue_id = bp->q_info[j].queue_id;
1811                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
1812                         j++;
1813         }
1814         return 0;
1815 }
1816
1817 static void bnxt_free_cp_rings(struct bnxt *bp)
1818 {
1819         int i;
1820
1821         if (!bp->bnapi)
1822                 return;
1823
1824         for (i = 0; i < bp->cp_nr_rings; i++) {
1825                 struct bnxt_napi *bnapi = bp->bnapi[i];
1826                 struct bnxt_cp_ring_info *cpr;
1827                 struct bnxt_ring_struct *ring;
1828
1829                 if (!bnapi)
1830                         continue;
1831
1832                 cpr = &bnapi->cp_ring;
1833                 ring = &cpr->cp_ring_struct;
1834
1835                 bnxt_free_ring(bp, ring);
1836         }
1837 }
1838
1839 static int bnxt_alloc_cp_rings(struct bnxt *bp)
1840 {
1841         int i, rc;
1842
1843         for (i = 0; i < bp->cp_nr_rings; i++) {
1844                 struct bnxt_napi *bnapi = bp->bnapi[i];
1845                 struct bnxt_cp_ring_info *cpr;
1846                 struct bnxt_ring_struct *ring;
1847
1848                 if (!bnapi)
1849                         continue;
1850
1851                 cpr = &bnapi->cp_ring;
1852                 ring = &cpr->cp_ring_struct;
1853
1854                 rc = bnxt_alloc_ring(bp, ring);
1855                 if (rc)
1856                         return rc;
1857         }
1858         return 0;
1859 }
1860
1861 static void bnxt_init_ring_struct(struct bnxt *bp)
1862 {
1863         int i;
1864
1865         for (i = 0; i < bp->cp_nr_rings; i++) {
1866                 struct bnxt_napi *bnapi = bp->bnapi[i];
1867                 struct bnxt_cp_ring_info *cpr;
1868                 struct bnxt_rx_ring_info *rxr;
1869                 struct bnxt_tx_ring_info *txr;
1870                 struct bnxt_ring_struct *ring;
1871
1872                 if (!bnapi)
1873                         continue;
1874
1875                 cpr = &bnapi->cp_ring;
1876                 ring = &cpr->cp_ring_struct;
1877                 ring->nr_pages = bp->cp_nr_pages;
1878                 ring->page_size = HW_CMPD_RING_SIZE;
1879                 ring->pg_arr = (void **)cpr->cp_desc_ring;
1880                 ring->dma_arr = cpr->cp_desc_mapping;
1881                 ring->vmem_size = 0;
1882
1883                 rxr = bnapi->rx_ring;
1884                 if (!rxr)
1885                         goto skip_rx;
1886
1887                 ring = &rxr->rx_ring_struct;
1888                 ring->nr_pages = bp->rx_nr_pages;
1889                 ring->page_size = HW_RXBD_RING_SIZE;
1890                 ring->pg_arr = (void **)rxr->rx_desc_ring;
1891                 ring->dma_arr = rxr->rx_desc_mapping;
1892                 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
1893                 ring->vmem = (void **)&rxr->rx_buf_ring;
1894
1895                 ring = &rxr->rx_agg_ring_struct;
1896                 ring->nr_pages = bp->rx_agg_nr_pages;
1897                 ring->page_size = HW_RXBD_RING_SIZE;
1898                 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
1899                 ring->dma_arr = rxr->rx_agg_desc_mapping;
1900                 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
1901                 ring->vmem = (void **)&rxr->rx_agg_ring;
1902
1903 skip_rx:
1904                 txr = bnapi->tx_ring;
1905                 if (!txr)
1906                         continue;
1907
1908                 ring = &txr->tx_ring_struct;
1909                 ring->nr_pages = bp->tx_nr_pages;
1910                 ring->page_size = HW_RXBD_RING_SIZE;
1911                 ring->pg_arr = (void **)txr->tx_desc_ring;
1912                 ring->dma_arr = txr->tx_desc_mapping;
1913                 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
1914                 ring->vmem = (void **)&txr->tx_buf_ring;
1915         }
1916 }
1917
1918 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
1919 {
1920         int i;
1921         u32 prod;
1922         struct rx_bd **rx_buf_ring;
1923
1924         rx_buf_ring = (struct rx_bd **)ring->pg_arr;
1925         for (i = 0, prod = 0; i < ring->nr_pages; i++) {
1926                 int j;
1927                 struct rx_bd *rxbd;
1928
1929                 rxbd = rx_buf_ring[i];
1930                 if (!rxbd)
1931                         continue;
1932
1933                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1934                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1935                         rxbd->rx_bd_opaque = prod;
1936                 }
1937         }
1938 }
1939
1940 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
1941 {
1942         struct net_device *dev = bp->dev;
1943         struct bnxt_rx_ring_info *rxr;
1944         struct bnxt_ring_struct *ring;
1945         u32 prod, type;
1946         int i;
1947
1948         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1949                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1950
1951         if (NET_IP_ALIGN == 2)
1952                 type |= RX_BD_FLAGS_SOP;
1953
1954         rxr = &bp->rx_ring[ring_nr];
1955         ring = &rxr->rx_ring_struct;
1956         bnxt_init_rxbd_pages(ring, type);
1957
1958         prod = rxr->rx_prod;
1959         for (i = 0; i < bp->rx_ring_size; i++) {
1960                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
1961                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
1962                                     ring_nr, i, bp->rx_ring_size);
1963                         break;
1964                 }
1965                 prod = NEXT_RX(prod);
1966         }
1967         rxr->rx_prod = prod;
1968         ring->fw_ring_id = INVALID_HW_RING_ID;
1969
1970         ring = &rxr->rx_agg_ring_struct;
1971         ring->fw_ring_id = INVALID_HW_RING_ID;
1972
1973         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
1974                 return 0;
1975
1976         type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
1977                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1978
1979         bnxt_init_rxbd_pages(ring, type);
1980
1981         prod = rxr->rx_agg_prod;
1982         for (i = 0; i < bp->rx_agg_ring_size; i++) {
1983                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
1984                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
1985                                     ring_nr, i, bp->rx_ring_size);
1986                         break;
1987                 }
1988                 prod = NEXT_RX_AGG(prod);
1989         }
1990         rxr->rx_agg_prod = prod;
1991
1992         if (bp->flags & BNXT_FLAG_TPA) {
1993                 if (rxr->rx_tpa) {
1994                         u8 *data;
1995                         dma_addr_t mapping;
1996
1997                         for (i = 0; i < MAX_TPA; i++) {
1998                                 data = __bnxt_alloc_rx_data(bp, &mapping,
1999                                                             GFP_KERNEL);
2000                                 if (!data)
2001                                         return -ENOMEM;
2002
2003                                 rxr->rx_tpa[i].data = data;
2004                                 rxr->rx_tpa[i].mapping = mapping;
2005                         }
2006                 } else {
2007                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2008                         return -ENOMEM;
2009                 }
2010         }
2011
2012         return 0;
2013 }
2014
2015 static int bnxt_init_rx_rings(struct bnxt *bp)
2016 {
2017         int i, rc = 0;
2018
2019         for (i = 0; i < bp->rx_nr_rings; i++) {
2020                 rc = bnxt_init_one_rx_ring(bp, i);
2021                 if (rc)
2022                         break;
2023         }
2024
2025         return rc;
2026 }
2027
2028 static int bnxt_init_tx_rings(struct bnxt *bp)
2029 {
2030         u16 i;
2031
2032         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2033                                    MAX_SKB_FRAGS + 1);
2034
2035         for (i = 0; i < bp->tx_nr_rings; i++) {
2036                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2037                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2038
2039                 ring->fw_ring_id = INVALID_HW_RING_ID;
2040         }
2041
2042         return 0;
2043 }
2044
2045 static void bnxt_free_ring_grps(struct bnxt *bp)
2046 {
2047         kfree(bp->grp_info);
2048         bp->grp_info = NULL;
2049 }
2050
2051 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2052 {
2053         int i;
2054
2055         if (irq_re_init) {
2056                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2057                                        sizeof(struct bnxt_ring_grp_info),
2058                                        GFP_KERNEL);
2059                 if (!bp->grp_info)
2060                         return -ENOMEM;
2061         }
2062         for (i = 0; i < bp->cp_nr_rings; i++) {
2063                 if (irq_re_init)
2064                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2065                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2066                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2067                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2068                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2069         }
2070         return 0;
2071 }
2072
2073 static void bnxt_free_vnics(struct bnxt *bp)
2074 {
2075         kfree(bp->vnic_info);
2076         bp->vnic_info = NULL;
2077         bp->nr_vnics = 0;
2078 }
2079
2080 static int bnxt_alloc_vnics(struct bnxt *bp)
2081 {
2082         int num_vnics = 1;
2083
2084 #ifdef CONFIG_RFS_ACCEL
2085         if (bp->flags & BNXT_FLAG_RFS)
2086                 num_vnics += bp->rx_nr_rings;
2087 #endif
2088
2089         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2090                                 GFP_KERNEL);
2091         if (!bp->vnic_info)
2092                 return -ENOMEM;
2093
2094         bp->nr_vnics = num_vnics;
2095         return 0;
2096 }
2097
2098 static void bnxt_init_vnics(struct bnxt *bp)
2099 {
2100         int i;
2101
2102         for (i = 0; i < bp->nr_vnics; i++) {
2103                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2104
2105                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2106                 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
2107                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2108
2109                 if (bp->vnic_info[i].rss_hash_key) {
2110                         if (i == 0)
2111                                 prandom_bytes(vnic->rss_hash_key,
2112                                               HW_HASH_KEY_SIZE);
2113                         else
2114                                 memcpy(vnic->rss_hash_key,
2115                                        bp->vnic_info[0].rss_hash_key,
2116                                        HW_HASH_KEY_SIZE);
2117                 }
2118         }
2119 }
2120
2121 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2122 {
2123         int pages;
2124
2125         pages = ring_size / desc_per_pg;
2126
2127         if (!pages)
2128                 return 1;
2129
2130         pages++;
2131
2132         while (pages & (pages - 1))
2133                 pages++;
2134
2135         return pages;
2136 }
2137
2138 static void bnxt_set_tpa_flags(struct bnxt *bp)
2139 {
2140         bp->flags &= ~BNXT_FLAG_TPA;
2141         if (bp->dev->features & NETIF_F_LRO)
2142                 bp->flags |= BNXT_FLAG_LRO;
2143         if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
2144                 bp->flags |= BNXT_FLAG_GRO;
2145 }
2146
2147 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2148  * be set on entry.
2149  */
2150 void bnxt_set_ring_params(struct bnxt *bp)
2151 {
2152         u32 ring_size, rx_size, rx_space;
2153         u32 agg_factor = 0, agg_ring_size = 0;
2154
2155         /* 8 for CRC and VLAN */
2156         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2157
2158         rx_space = rx_size + NET_SKB_PAD +
2159                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2160
2161         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2162         ring_size = bp->rx_ring_size;
2163         bp->rx_agg_ring_size = 0;
2164         bp->rx_agg_nr_pages = 0;
2165
2166         if (bp->flags & BNXT_FLAG_TPA)
2167                 agg_factor = 4;
2168
2169         bp->flags &= ~BNXT_FLAG_JUMBO;
2170         if (rx_space > PAGE_SIZE) {
2171                 u32 jumbo_factor;
2172
2173                 bp->flags |= BNXT_FLAG_JUMBO;
2174                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2175                 if (jumbo_factor > agg_factor)
2176                         agg_factor = jumbo_factor;
2177         }
2178         agg_ring_size = ring_size * agg_factor;
2179
2180         if (agg_ring_size) {
2181                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2182                                                         RX_DESC_CNT);
2183                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2184                         u32 tmp = agg_ring_size;
2185
2186                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2187                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2188                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2189                                     tmp, agg_ring_size);
2190                 }
2191                 bp->rx_agg_ring_size = agg_ring_size;
2192                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2193                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2194                 rx_space = rx_size + NET_SKB_PAD +
2195                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2196         }
2197
2198         bp->rx_buf_use_size = rx_size;
2199         bp->rx_buf_size = rx_space;
2200
2201         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2202         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2203
2204         ring_size = bp->tx_ring_size;
2205         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2206         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2207
2208         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2209         bp->cp_ring_size = ring_size;
2210
2211         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2212         if (bp->cp_nr_pages > MAX_CP_PAGES) {
2213                 bp->cp_nr_pages = MAX_CP_PAGES;
2214                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2215                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2216                             ring_size, bp->cp_ring_size);
2217         }
2218         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2219         bp->cp_ring_mask = bp->cp_bit - 1;
2220 }
2221
2222 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2223 {
2224         int i;
2225         struct bnxt_vnic_info *vnic;
2226         struct pci_dev *pdev = bp->pdev;
2227
2228         if (!bp->vnic_info)
2229                 return;
2230
2231         for (i = 0; i < bp->nr_vnics; i++) {
2232                 vnic = &bp->vnic_info[i];
2233
2234                 kfree(vnic->fw_grp_ids);
2235                 vnic->fw_grp_ids = NULL;
2236
2237                 kfree(vnic->uc_list);
2238                 vnic->uc_list = NULL;
2239
2240                 if (vnic->mc_list) {
2241                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2242                                           vnic->mc_list, vnic->mc_list_mapping);
2243                         vnic->mc_list = NULL;
2244                 }
2245
2246                 if (vnic->rss_table) {
2247                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
2248                                           vnic->rss_table,
2249                                           vnic->rss_table_dma_addr);
2250                         vnic->rss_table = NULL;
2251                 }
2252
2253                 vnic->rss_hash_key = NULL;
2254                 vnic->flags = 0;
2255         }
2256 }
2257
2258 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2259 {
2260         int i, rc = 0, size;
2261         struct bnxt_vnic_info *vnic;
2262         struct pci_dev *pdev = bp->pdev;
2263         int max_rings;
2264
2265         for (i = 0; i < bp->nr_vnics; i++) {
2266                 vnic = &bp->vnic_info[i];
2267
2268                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2269                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2270
2271                         if (mem_size > 0) {
2272                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2273                                 if (!vnic->uc_list) {
2274                                         rc = -ENOMEM;
2275                                         goto out;
2276                                 }
2277                         }
2278                 }
2279
2280                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2281                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2282                         vnic->mc_list =
2283                                 dma_alloc_coherent(&pdev->dev,
2284                                                    vnic->mc_list_size,
2285                                                    &vnic->mc_list_mapping,
2286                                                    GFP_KERNEL);
2287                         if (!vnic->mc_list) {
2288                                 rc = -ENOMEM;
2289                                 goto out;
2290                         }
2291                 }
2292
2293                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2294                         max_rings = bp->rx_nr_rings;
2295                 else
2296                         max_rings = 1;
2297
2298                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2299                 if (!vnic->fw_grp_ids) {
2300                         rc = -ENOMEM;
2301                         goto out;
2302                 }
2303
2304                 /* Allocate rss table and hash key */
2305                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2306                                                      &vnic->rss_table_dma_addr,
2307                                                      GFP_KERNEL);
2308                 if (!vnic->rss_table) {
2309                         rc = -ENOMEM;
2310                         goto out;
2311                 }
2312
2313                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2314
2315                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2316                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2317         }
2318         return 0;
2319
2320 out:
2321         return rc;
2322 }
2323
2324 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2325 {
2326         struct pci_dev *pdev = bp->pdev;
2327
2328         dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2329                           bp->hwrm_cmd_resp_dma_addr);
2330
2331         bp->hwrm_cmd_resp_addr = NULL;
2332         if (bp->hwrm_dbg_resp_addr) {
2333                 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2334                                   bp->hwrm_dbg_resp_addr,
2335                                   bp->hwrm_dbg_resp_dma_addr);
2336
2337                 bp->hwrm_dbg_resp_addr = NULL;
2338         }
2339 }
2340
2341 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2342 {
2343         struct pci_dev *pdev = bp->pdev;
2344
2345         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2346                                                    &bp->hwrm_cmd_resp_dma_addr,
2347                                                    GFP_KERNEL);
2348         if (!bp->hwrm_cmd_resp_addr)
2349                 return -ENOMEM;
2350         bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2351                                                     HWRM_DBG_REG_BUF_SIZE,
2352                                                     &bp->hwrm_dbg_resp_dma_addr,
2353                                                     GFP_KERNEL);
2354         if (!bp->hwrm_dbg_resp_addr)
2355                 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2356
2357         return 0;
2358 }
2359
2360 static void bnxt_free_stats(struct bnxt *bp)
2361 {
2362         u32 size, i;
2363         struct pci_dev *pdev = bp->pdev;
2364
2365         if (bp->hw_rx_port_stats) {
2366                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2367                                   bp->hw_rx_port_stats,
2368                                   bp->hw_rx_port_stats_map);
2369                 bp->hw_rx_port_stats = NULL;
2370                 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2371         }
2372
2373         if (!bp->bnapi)
2374                 return;
2375
2376         size = sizeof(struct ctx_hw_stats);
2377
2378         for (i = 0; i < bp->cp_nr_rings; i++) {
2379                 struct bnxt_napi *bnapi = bp->bnapi[i];
2380                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2381
2382                 if (cpr->hw_stats) {
2383                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2384                                           cpr->hw_stats_map);
2385                         cpr->hw_stats = NULL;
2386                 }
2387         }
2388 }
2389
2390 static int bnxt_alloc_stats(struct bnxt *bp)
2391 {
2392         u32 size, i;
2393         struct pci_dev *pdev = bp->pdev;
2394
2395         size = sizeof(struct ctx_hw_stats);
2396
2397         for (i = 0; i < bp->cp_nr_rings; i++) {
2398                 struct bnxt_napi *bnapi = bp->bnapi[i];
2399                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2400
2401                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2402                                                    &cpr->hw_stats_map,
2403                                                    GFP_KERNEL);
2404                 if (!cpr->hw_stats)
2405                         return -ENOMEM;
2406
2407                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2408         }
2409
2410         if (BNXT_PF(bp)) {
2411                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2412                                          sizeof(struct tx_port_stats) + 1024;
2413
2414                 bp->hw_rx_port_stats =
2415                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2416                                            &bp->hw_rx_port_stats_map,
2417                                            GFP_KERNEL);
2418                 if (!bp->hw_rx_port_stats)
2419                         return -ENOMEM;
2420
2421                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2422                                        512;
2423                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2424                                            sizeof(struct rx_port_stats) + 512;
2425                 bp->flags |= BNXT_FLAG_PORT_STATS;
2426         }
2427         return 0;
2428 }
2429
2430 static void bnxt_clear_ring_indices(struct bnxt *bp)
2431 {
2432         int i;
2433
2434         if (!bp->bnapi)
2435                 return;
2436
2437         for (i = 0; i < bp->cp_nr_rings; i++) {
2438                 struct bnxt_napi *bnapi = bp->bnapi[i];
2439                 struct bnxt_cp_ring_info *cpr;
2440                 struct bnxt_rx_ring_info *rxr;
2441                 struct bnxt_tx_ring_info *txr;
2442
2443                 if (!bnapi)
2444                         continue;
2445
2446                 cpr = &bnapi->cp_ring;
2447                 cpr->cp_raw_cons = 0;
2448
2449                 txr = bnapi->tx_ring;
2450                 if (txr) {
2451                         txr->tx_prod = 0;
2452                         txr->tx_cons = 0;
2453                 }
2454
2455                 rxr = bnapi->rx_ring;
2456                 if (rxr) {
2457                         rxr->rx_prod = 0;
2458                         rxr->rx_agg_prod = 0;
2459                         rxr->rx_sw_agg_prod = 0;
2460                 }
2461         }
2462 }
2463
2464 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2465 {
2466 #ifdef CONFIG_RFS_ACCEL
2467         int i;
2468
2469         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
2470          * safe to delete the hash table.
2471          */
2472         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2473                 struct hlist_head *head;
2474                 struct hlist_node *tmp;
2475                 struct bnxt_ntuple_filter *fltr;
2476
2477                 head = &bp->ntp_fltr_hash_tbl[i];
2478                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2479                         hlist_del(&fltr->hash);
2480                         kfree(fltr);
2481                 }
2482         }
2483         if (irq_reinit) {
2484                 kfree(bp->ntp_fltr_bmap);
2485                 bp->ntp_fltr_bmap = NULL;
2486         }
2487         bp->ntp_fltr_count = 0;
2488 #endif
2489 }
2490
2491 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2492 {
2493 #ifdef CONFIG_RFS_ACCEL
2494         int i, rc = 0;
2495
2496         if (!(bp->flags & BNXT_FLAG_RFS))
2497                 return 0;
2498
2499         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2500                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2501
2502         bp->ntp_fltr_count = 0;
2503         bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2504                                     GFP_KERNEL);
2505
2506         if (!bp->ntp_fltr_bmap)
2507                 rc = -ENOMEM;
2508
2509         return rc;
2510 #else
2511         return 0;
2512 #endif
2513 }
2514
2515 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2516 {
2517         bnxt_free_vnic_attributes(bp);
2518         bnxt_free_tx_rings(bp);
2519         bnxt_free_rx_rings(bp);
2520         bnxt_free_cp_rings(bp);
2521         bnxt_free_ntp_fltrs(bp, irq_re_init);
2522         if (irq_re_init) {
2523                 bnxt_free_stats(bp);
2524                 bnxt_free_ring_grps(bp);
2525                 bnxt_free_vnics(bp);
2526                 kfree(bp->tx_ring);
2527                 bp->tx_ring = NULL;
2528                 kfree(bp->rx_ring);
2529                 bp->rx_ring = NULL;
2530                 kfree(bp->bnapi);
2531                 bp->bnapi = NULL;
2532         } else {
2533                 bnxt_clear_ring_indices(bp);
2534         }
2535 }
2536
2537 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2538 {
2539         int i, j, rc, size, arr_size;
2540         void *bnapi;
2541
2542         if (irq_re_init) {
2543                 /* Allocate bnapi mem pointer array and mem block for
2544                  * all queues
2545                  */
2546                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2547                                 bp->cp_nr_rings);
2548                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2549                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2550                 if (!bnapi)
2551                         return -ENOMEM;
2552
2553                 bp->bnapi = bnapi;
2554                 bnapi += arr_size;
2555                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2556                         bp->bnapi[i] = bnapi;
2557                         bp->bnapi[i]->index = i;
2558                         bp->bnapi[i]->bp = bp;
2559                 }
2560
2561                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
2562                                       sizeof(struct bnxt_rx_ring_info),
2563                                       GFP_KERNEL);
2564                 if (!bp->rx_ring)
2565                         return -ENOMEM;
2566
2567                 for (i = 0; i < bp->rx_nr_rings; i++) {
2568                         bp->rx_ring[i].bnapi = bp->bnapi[i];
2569                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2570                 }
2571
2572                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
2573                                       sizeof(struct bnxt_tx_ring_info),
2574                                       GFP_KERNEL);
2575                 if (!bp->tx_ring)
2576                         return -ENOMEM;
2577
2578                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2579                         j = 0;
2580                 else
2581                         j = bp->rx_nr_rings;
2582
2583                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
2584                         bp->tx_ring[i].bnapi = bp->bnapi[j];
2585                         bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
2586                 }
2587
2588                 rc = bnxt_alloc_stats(bp);
2589                 if (rc)
2590                         goto alloc_mem_err;
2591
2592                 rc = bnxt_alloc_ntp_fltrs(bp);
2593                 if (rc)
2594                         goto alloc_mem_err;
2595
2596                 rc = bnxt_alloc_vnics(bp);
2597                 if (rc)
2598                         goto alloc_mem_err;
2599         }
2600
2601         bnxt_init_ring_struct(bp);
2602
2603         rc = bnxt_alloc_rx_rings(bp);
2604         if (rc)
2605                 goto alloc_mem_err;
2606
2607         rc = bnxt_alloc_tx_rings(bp);
2608         if (rc)
2609                 goto alloc_mem_err;
2610
2611         rc = bnxt_alloc_cp_rings(bp);
2612         if (rc)
2613                 goto alloc_mem_err;
2614
2615         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2616                                   BNXT_VNIC_UCAST_FLAG;
2617         rc = bnxt_alloc_vnic_attributes(bp);
2618         if (rc)
2619                 goto alloc_mem_err;
2620         return 0;
2621
2622 alloc_mem_err:
2623         bnxt_free_mem(bp, true);
2624         return rc;
2625 }
2626
2627 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2628                             u16 cmpl_ring, u16 target_id)
2629 {
2630         struct input *req = request;
2631
2632         req->req_type = cpu_to_le16(req_type);
2633         req->cmpl_ring = cpu_to_le16(cmpl_ring);
2634         req->target_id = cpu_to_le16(target_id);
2635         req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2636 }
2637
2638 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
2639                                  int timeout, bool silent)
2640 {
2641         int i, intr_process, rc;
2642         struct input *req = msg;
2643         u32 *data = msg;
2644         __le32 *resp_len, *valid;
2645         u16 cp_ring_id, len = 0;
2646         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2647
2648         req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
2649         memset(resp, 0, PAGE_SIZE);
2650         cp_ring_id = le16_to_cpu(req->cmpl_ring);
2651         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2652
2653         /* Write request msg to hwrm channel */
2654         __iowrite32_copy(bp->bar0, data, msg_len / 4);
2655
2656         for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
2657                 writel(0, bp->bar0 + i);
2658
2659         /* currently supports only one outstanding message */
2660         if (intr_process)
2661                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
2662
2663         /* Ring channel doorbell */
2664         writel(1, bp->bar0 + 0x100);
2665
2666         if (!timeout)
2667                 timeout = DFLT_HWRM_CMD_TIMEOUT;
2668
2669         i = 0;
2670         if (intr_process) {
2671                 /* Wait until hwrm response cmpl interrupt is processed */
2672                 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
2673                        i++ < timeout) {
2674                         usleep_range(600, 800);
2675                 }
2676
2677                 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
2678                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
2679                                    le16_to_cpu(req->req_type));
2680                         return -1;
2681                 }
2682         } else {
2683                 /* Check if response len is updated */
2684                 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
2685                 for (i = 0; i < timeout; i++) {
2686                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
2687                               HWRM_RESP_LEN_SFT;
2688                         if (len)
2689                                 break;
2690                         usleep_range(600, 800);
2691                 }
2692
2693                 if (i >= timeout) {
2694                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
2695                                    timeout, le16_to_cpu(req->req_type),
2696                                    le16_to_cpu(req->seq_id), *resp_len);
2697                         return -1;
2698                 }
2699
2700                 /* Last word of resp contains valid bit */
2701                 valid = bp->hwrm_cmd_resp_addr + len - 4;
2702                 for (i = 0; i < timeout; i++) {
2703                         if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
2704                                 break;
2705                         usleep_range(600, 800);
2706                 }
2707
2708                 if (i >= timeout) {
2709                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
2710                                    timeout, le16_to_cpu(req->req_type),
2711                                    le16_to_cpu(req->seq_id), len, *valid);
2712                         return -1;
2713                 }
2714         }
2715
2716         rc = le16_to_cpu(resp->error_code);
2717         if (rc && !silent)
2718                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
2719                            le16_to_cpu(resp->req_type),
2720                            le16_to_cpu(resp->seq_id), rc);
2721         return rc;
2722 }
2723
2724 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2725 {
2726         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
2727 }
2728
2729 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2730 {
2731         int rc;
2732
2733         mutex_lock(&bp->hwrm_cmd_lock);
2734         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
2735         mutex_unlock(&bp->hwrm_cmd_lock);
2736         return rc;
2737 }
2738
2739 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
2740                              int timeout)
2741 {
2742         int rc;
2743
2744         mutex_lock(&bp->hwrm_cmd_lock);
2745         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
2746         mutex_unlock(&bp->hwrm_cmd_lock);
2747         return rc;
2748 }
2749
2750 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2751 {
2752         struct hwrm_func_drv_rgtr_input req = {0};
2753         int i;
2754
2755         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2756
2757         req.enables =
2758                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2759                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
2760                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2761
2762         /* TODO: current async event fwd bits are not defined and the firmware
2763          * only checks if it is non-zero to enable async event forwarding
2764          */
2765         req.async_event_fwd[0] |= cpu_to_le32(1);
2766         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
2767         req.ver_maj = DRV_VER_MAJ;
2768         req.ver_min = DRV_VER_MIN;
2769         req.ver_upd = DRV_VER_UPD;
2770
2771         if (BNXT_PF(bp)) {
2772                 DECLARE_BITMAP(vf_req_snif_bmap, 256);
2773                 u32 *data = (u32 *)vf_req_snif_bmap;
2774
2775                 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2776                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2777                         __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2778
2779                 for (i = 0; i < 8; i++)
2780                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2781
2782                 req.enables |=
2783                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2784         }
2785
2786         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2787 }
2788
2789 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
2790 {
2791         struct hwrm_func_drv_unrgtr_input req = {0};
2792
2793         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
2794         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2795 }
2796
2797 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
2798 {
2799         u32 rc = 0;
2800         struct hwrm_tunnel_dst_port_free_input req = {0};
2801
2802         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
2803         req.tunnel_type = tunnel_type;
2804
2805         switch (tunnel_type) {
2806         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
2807                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
2808                 break;
2809         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
2810                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
2811                 break;
2812         default:
2813                 break;
2814         }
2815
2816         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2817         if (rc)
2818                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
2819                            rc);
2820         return rc;
2821 }
2822
2823 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
2824                                            u8 tunnel_type)
2825 {
2826         u32 rc = 0;
2827         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2828         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2829
2830         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
2831
2832         req.tunnel_type = tunnel_type;
2833         req.tunnel_dst_port_val = port;
2834
2835         mutex_lock(&bp->hwrm_cmd_lock);
2836         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2837         if (rc) {
2838                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
2839                            rc);
2840                 goto err_out;
2841         }
2842
2843         if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
2844                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2845
2846         else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
2847                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
2848 err_out:
2849         mutex_unlock(&bp->hwrm_cmd_lock);
2850         return rc;
2851 }
2852
2853 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
2854 {
2855         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2856         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2857
2858         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
2859         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
2860
2861         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
2862         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
2863         req.mask = cpu_to_le32(vnic->rx_mask);
2864         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2865 }
2866
2867 #ifdef CONFIG_RFS_ACCEL
2868 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
2869                                             struct bnxt_ntuple_filter *fltr)
2870 {
2871         struct hwrm_cfa_ntuple_filter_free_input req = {0};
2872
2873         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
2874         req.ntuple_filter_id = fltr->filter_id;
2875         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2876 }
2877
2878 #define BNXT_NTP_FLTR_FLAGS                                     \
2879         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
2880          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
2881          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
2882          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
2883          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
2884          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
2885          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
2886          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
2887          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
2888          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
2889          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
2890          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
2891          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
2892          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
2893
2894 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
2895                                              struct bnxt_ntuple_filter *fltr)
2896 {
2897         int rc = 0;
2898         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
2899         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
2900                 bp->hwrm_cmd_resp_addr;
2901         struct flow_keys *keys = &fltr->fkeys;
2902         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
2903
2904         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
2905         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
2906
2907         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
2908
2909         req.ethertype = htons(ETH_P_IP);
2910         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
2911         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
2912         req.ip_protocol = keys->basic.ip_proto;
2913
2914         req.src_ipaddr[0] = keys->addrs.v4addrs.src;
2915         req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2916         req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
2917         req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2918
2919         req.src_port = keys->ports.src;
2920         req.src_port_mask = cpu_to_be16(0xffff);
2921         req.dst_port = keys->ports.dst;
2922         req.dst_port_mask = cpu_to_be16(0xffff);
2923
2924         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
2925         mutex_lock(&bp->hwrm_cmd_lock);
2926         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2927         if (!rc)
2928                 fltr->filter_id = resp->ntuple_filter_id;
2929         mutex_unlock(&bp->hwrm_cmd_lock);
2930         return rc;
2931 }
2932 #endif
2933
2934 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
2935                                      u8 *mac_addr)
2936 {
2937         u32 rc = 0;
2938         struct hwrm_cfa_l2_filter_alloc_input req = {0};
2939         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2940
2941         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
2942         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
2943                                 CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
2944         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
2945         req.enables =
2946                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
2947                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
2948                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
2949         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
2950         req.l2_addr_mask[0] = 0xff;
2951         req.l2_addr_mask[1] = 0xff;
2952         req.l2_addr_mask[2] = 0xff;
2953         req.l2_addr_mask[3] = 0xff;
2954         req.l2_addr_mask[4] = 0xff;
2955         req.l2_addr_mask[5] = 0xff;
2956
2957         mutex_lock(&bp->hwrm_cmd_lock);
2958         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2959         if (!rc)
2960                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
2961                                                         resp->l2_filter_id;
2962         mutex_unlock(&bp->hwrm_cmd_lock);
2963         return rc;
2964 }
2965
2966 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
2967 {
2968         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
2969         int rc = 0;
2970
2971         /* Any associated ntuple filters will also be cleared by firmware. */
2972         mutex_lock(&bp->hwrm_cmd_lock);
2973         for (i = 0; i < num_of_vnics; i++) {
2974                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2975
2976                 for (j = 0; j < vnic->uc_filter_count; j++) {
2977                         struct hwrm_cfa_l2_filter_free_input req = {0};
2978
2979                         bnxt_hwrm_cmd_hdr_init(bp, &req,
2980                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
2981
2982                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
2983
2984                         rc = _hwrm_send_message(bp, &req, sizeof(req),
2985                                                 HWRM_CMD_TIMEOUT);
2986                 }
2987                 vnic->uc_filter_count = 0;
2988         }
2989         mutex_unlock(&bp->hwrm_cmd_lock);
2990
2991         return rc;
2992 }
2993
2994 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
2995 {
2996         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2997         struct hwrm_vnic_tpa_cfg_input req = {0};
2998
2999         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3000
3001         if (tpa_flags) {
3002                 u16 mss = bp->dev->mtu - 40;
3003                 u32 nsegs, n, segs = 0, flags;
3004
3005                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3006                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3007                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3008                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3009                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3010                 if (tpa_flags & BNXT_FLAG_GRO)
3011                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3012
3013                 req.flags = cpu_to_le32(flags);
3014
3015                 req.enables =
3016                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3017                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3018                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3019
3020                 /* Number of segs are log2 units, and first packet is not
3021                  * included as part of this units.
3022                  */
3023                 if (mss <= PAGE_SIZE) {
3024                         n = PAGE_SIZE / mss;
3025                         nsegs = (MAX_SKB_FRAGS - 1) * n;
3026                 } else {
3027                         n = mss / PAGE_SIZE;
3028                         if (mss & (PAGE_SIZE - 1))
3029                                 n++;
3030                         nsegs = (MAX_SKB_FRAGS - n) / n;
3031                 }
3032
3033                 segs = ilog2(nsegs);
3034                 req.max_agg_segs = cpu_to_le16(segs);
3035                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3036
3037                 req.min_agg_len = cpu_to_le32(512);
3038         }
3039         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3040
3041         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3042 }
3043
3044 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3045 {
3046         u32 i, j, max_rings;
3047         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3048         struct hwrm_vnic_rss_cfg_input req = {0};
3049
3050         if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
3051                 return 0;
3052
3053         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3054         if (set_rss) {
3055                 vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
3056                                  BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
3057                                  BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
3058                                  BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
3059
3060                 req.hash_type = cpu_to_le32(vnic->hash_type);
3061
3062                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3063                         max_rings = bp->rx_nr_rings;
3064                 else
3065                         max_rings = 1;
3066
3067                 /* Fill the RSS indirection table with ring group ids */
3068                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3069                         if (j == max_rings)
3070                                 j = 0;
3071                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3072                 }
3073
3074                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3075                 req.hash_key_tbl_addr =
3076                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
3077         }
3078         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3079         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3080 }
3081
3082 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3083 {
3084         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3085         struct hwrm_vnic_plcmodes_cfg_input req = {0};
3086
3087         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3088         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3089                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3090                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3091         req.enables =
3092                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3093                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3094         /* thresholds not implemented in firmware yet */
3095         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3096         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3097         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3098         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3099 }
3100
3101 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
3102 {
3103         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3104
3105         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3106         req.rss_cos_lb_ctx_id =
3107                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
3108
3109         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3110         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3111 }
3112
3113 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3114 {
3115         int i;
3116
3117         for (i = 0; i < bp->nr_vnics; i++) {
3118                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3119
3120                 if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
3121                         bnxt_hwrm_vnic_ctx_free_one(bp, i);
3122         }
3123         bp->rsscos_nr_ctxs = 0;
3124 }
3125
3126 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
3127 {
3128         int rc;
3129         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3130         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3131                                                 bp->hwrm_cmd_resp_addr;
3132
3133         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3134                                -1);
3135
3136         mutex_lock(&bp->hwrm_cmd_lock);
3137         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3138         if (!rc)
3139                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
3140                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
3141         mutex_unlock(&bp->hwrm_cmd_lock);
3142
3143         return rc;
3144 }
3145
3146 static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3147 {
3148         unsigned int ring = 0, grp_idx;
3149         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3150         struct hwrm_vnic_cfg_input req = {0};
3151
3152         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3153         /* Only RSS support for now TBD: COS & LB */
3154         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3155                                   VNIC_CFG_REQ_ENABLES_RSS_RULE);
3156         req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3157         req.cos_rule = cpu_to_le16(0xffff);
3158         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3159                 ring = 0;
3160         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3161                 ring = vnic_id - 1;
3162
3163         grp_idx = bp->rx_ring[ring].bnapi->index;
3164         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3165         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3166
3167         req.lb_rule = cpu_to_le16(0xffff);
3168         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3169                               VLAN_HLEN);
3170
3171         if (bp->flags & BNXT_FLAG_STRIP_VLAN)
3172                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3173
3174         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3175 }
3176
3177 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3178 {
3179         u32 rc = 0;
3180
3181         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3182                 struct hwrm_vnic_free_input req = {0};
3183
3184                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3185                 req.vnic_id =
3186                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3187
3188                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3189                 if (rc)
3190                         return rc;
3191                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3192         }
3193         return rc;
3194 }
3195
3196 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3197 {
3198         u16 i;
3199
3200         for (i = 0; i < bp->nr_vnics; i++)
3201                 bnxt_hwrm_vnic_free_one(bp, i);
3202 }
3203
3204 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3205                                 unsigned int start_rx_ring_idx,
3206                                 unsigned int nr_rings)
3207 {
3208         int rc = 0;
3209         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
3210         struct hwrm_vnic_alloc_input req = {0};
3211         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3212
3213         /* map ring groups to this vnic */
3214         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3215                 grp_idx = bp->rx_ring[i].bnapi->index;
3216                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
3217                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3218                                    j, nr_rings);
3219                         break;
3220                 }
3221                 bp->vnic_info[vnic_id].fw_grp_ids[j] =
3222                                         bp->grp_info[grp_idx].fw_grp_id;
3223         }
3224
3225         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3226         if (vnic_id == 0)
3227                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3228
3229         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3230
3231         mutex_lock(&bp->hwrm_cmd_lock);
3232         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3233         if (!rc)
3234                 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3235         mutex_unlock(&bp->hwrm_cmd_lock);
3236         return rc;
3237 }
3238
3239 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3240 {
3241         u16 i;
3242         u32 rc = 0;
3243
3244         mutex_lock(&bp->hwrm_cmd_lock);
3245         for (i = 0; i < bp->rx_nr_rings; i++) {
3246                 struct hwrm_ring_grp_alloc_input req = {0};
3247                 struct hwrm_ring_grp_alloc_output *resp =
3248                                         bp->hwrm_cmd_resp_addr;
3249                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
3250
3251                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3252
3253                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3254                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3255                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3256                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
3257
3258                 rc = _hwrm_send_message(bp, &req, sizeof(req),
3259                                         HWRM_CMD_TIMEOUT);
3260                 if (rc)
3261                         break;
3262
3263                 bp->grp_info[grp_idx].fw_grp_id =
3264                         le32_to_cpu(resp->ring_group_id);
3265         }
3266         mutex_unlock(&bp->hwrm_cmd_lock);
3267         return rc;
3268 }
3269
3270 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3271 {
3272         u16 i;
3273         u32 rc = 0;
3274         struct hwrm_ring_grp_free_input req = {0};
3275
3276         if (!bp->grp_info)
3277                 return 0;
3278
3279         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3280
3281         mutex_lock(&bp->hwrm_cmd_lock);
3282         for (i = 0; i < bp->cp_nr_rings; i++) {
3283                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3284                         continue;
3285                 req.ring_group_id =
3286                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
3287
3288                 rc = _hwrm_send_message(bp, &req, sizeof(req),
3289                                         HWRM_CMD_TIMEOUT);
3290                 if (rc)
3291                         break;
3292                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3293         }
3294         mutex_unlock(&bp->hwrm_cmd_lock);
3295         return rc;
3296 }
3297
3298 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3299                                     struct bnxt_ring_struct *ring,
3300                                     u32 ring_type, u32 map_index,
3301                                     u32 stats_ctx_id)
3302 {
3303         int rc = 0, err = 0;
3304         struct hwrm_ring_alloc_input req = {0};
3305         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3306         u16 ring_id;
3307
3308         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3309
3310         req.enables = 0;
3311         if (ring->nr_pages > 1) {
3312                 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3313                 /* Page size is in log2 units */
3314                 req.page_size = BNXT_PAGE_SHIFT;
3315                 req.page_tbl_depth = 1;
3316         } else {
3317                 req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
3318         }
3319         req.fbo = 0;
3320         /* Association of ring index with doorbell index and MSIX number */
3321         req.logical_id = cpu_to_le16(map_index);
3322
3323         switch (ring_type) {
3324         case HWRM_RING_ALLOC_TX:
3325                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3326                 /* Association of transmit ring with completion ring */
3327                 req.cmpl_ring_id =
3328                         cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3329                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3330                 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3331                 req.queue_id = cpu_to_le16(ring->queue_id);
3332                 break;
3333         case HWRM_RING_ALLOC_RX:
3334                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3335                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3336                 break;
3337         case HWRM_RING_ALLOC_AGG:
3338                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3339                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3340                 break;
3341         case HWRM_RING_ALLOC_CMPL:
3342                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3343                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3344                 if (bp->flags & BNXT_FLAG_USING_MSIX)
3345                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3346                 break;
3347         default:
3348                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3349                            ring_type);
3350                 return -1;
3351         }
3352
3353         mutex_lock(&bp->hwrm_cmd_lock);
3354         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3355         err = le16_to_cpu(resp->error_code);
3356         ring_id = le16_to_cpu(resp->ring_id);
3357         mutex_unlock(&bp->hwrm_cmd_lock);
3358
3359         if (rc || err) {
3360                 switch (ring_type) {
3361                 case RING_FREE_REQ_RING_TYPE_CMPL:
3362                         netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3363                                    rc, err);
3364                         return -1;
3365
3366                 case RING_FREE_REQ_RING_TYPE_RX:
3367                         netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3368                                    rc, err);
3369                         return -1;
3370
3371                 case RING_FREE_REQ_RING_TYPE_TX:
3372                         netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3373                                    rc, err);
3374                         return -1;
3375
3376                 default:
3377                         netdev_err(bp->dev, "Invalid ring\n");
3378                         return -1;
3379                 }
3380         }
3381         ring->fw_ring_id = ring_id;
3382         return rc;
3383 }
3384
3385 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3386 {
3387         int i, rc = 0;
3388
3389         for (i = 0; i < bp->cp_nr_rings; i++) {
3390                 struct bnxt_napi *bnapi = bp->bnapi[i];
3391                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3392                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3393
3394                 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3395                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3396                                               INVALID_STATS_CTX_ID);
3397                 if (rc)
3398                         goto err_out;
3399                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3400                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3401         }
3402
3403         for (i = 0; i < bp->tx_nr_rings; i++) {
3404                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3405                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3406                 u32 map_idx = txr->bnapi->index;
3407                 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
3408
3409                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3410                                               map_idx, fw_stats_ctx);
3411                 if (rc)
3412                         goto err_out;
3413                 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
3414         }
3415
3416         for (i = 0; i < bp->rx_nr_rings; i++) {
3417                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3418                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3419                 u32 map_idx = rxr->bnapi->index;
3420
3421                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3422                                               map_idx, INVALID_STATS_CTX_ID);
3423                 if (rc)
3424                         goto err_out;
3425                 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
3426                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3427                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3428         }
3429
3430         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3431                 for (i = 0; i < bp->rx_nr_rings; i++) {
3432                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3433                         struct bnxt_ring_struct *ring =
3434                                                 &rxr->rx_agg_ring_struct;
3435                         u32 grp_idx = rxr->bnapi->index;
3436                         u32 map_idx = grp_idx + bp->rx_nr_rings;
3437
3438                         rc = hwrm_ring_alloc_send_msg(bp, ring,
3439                                                       HWRM_RING_ALLOC_AGG,
3440                                                       map_idx,
3441                                                       INVALID_STATS_CTX_ID);
3442                         if (rc)
3443                                 goto err_out;
3444
3445                         rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
3446                         writel(DB_KEY_RX | rxr->rx_agg_prod,
3447                                rxr->rx_agg_doorbell);
3448                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
3449                 }
3450         }
3451 err_out:
3452         return rc;
3453 }
3454
3455 static int hwrm_ring_free_send_msg(struct bnxt *bp,
3456                                    struct bnxt_ring_struct *ring,
3457                                    u32 ring_type, int cmpl_ring_id)
3458 {
3459         int rc;
3460         struct hwrm_ring_free_input req = {0};
3461         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3462         u16 error_code;
3463
3464         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
3465         req.ring_type = ring_type;
3466         req.ring_id = cpu_to_le16(ring->fw_ring_id);
3467
3468         mutex_lock(&bp->hwrm_cmd_lock);
3469         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3470         error_code = le16_to_cpu(resp->error_code);
3471         mutex_unlock(&bp->hwrm_cmd_lock);
3472
3473         if (rc || error_code) {
3474                 switch (ring_type) {
3475                 case RING_FREE_REQ_RING_TYPE_CMPL:
3476                         netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3477                                    rc);
3478                         return rc;
3479                 case RING_FREE_REQ_RING_TYPE_RX:
3480                         netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3481                                    rc);
3482                         return rc;
3483                 case RING_FREE_REQ_RING_TYPE_TX:
3484                         netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3485                                    rc);
3486                         return rc;
3487                 default:
3488                         netdev_err(bp->dev, "Invalid ring\n");
3489                         return -1;
3490                 }
3491         }
3492         return 0;
3493 }
3494
3495 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
3496 {
3497         int i;
3498
3499         if (!bp->bnapi)
3500                 return;
3501
3502         for (i = 0; i < bp->tx_nr_rings; i++) {
3503                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3504                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3505                 u32 grp_idx = txr->bnapi->index;
3506                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3507
3508                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3509                         hwrm_ring_free_send_msg(bp, ring,
3510                                                 RING_FREE_REQ_RING_TYPE_TX,
3511                                                 close_path ? cmpl_ring_id :
3512                                                 INVALID_HW_RING_ID);
3513                         ring->fw_ring_id = INVALID_HW_RING_ID;
3514                 }
3515         }
3516
3517         for (i = 0; i < bp->rx_nr_rings; i++) {
3518                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3519                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3520                 u32 grp_idx = rxr->bnapi->index;
3521                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3522
3523                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3524                         hwrm_ring_free_send_msg(bp, ring,
3525                                                 RING_FREE_REQ_RING_TYPE_RX,
3526                                                 close_path ? cmpl_ring_id :
3527                                                 INVALID_HW_RING_ID);
3528                         ring->fw_ring_id = INVALID_HW_RING_ID;
3529                         bp->grp_info[grp_idx].rx_fw_ring_id =
3530                                 INVALID_HW_RING_ID;
3531                 }
3532         }
3533
3534         for (i = 0; i < bp->rx_nr_rings; i++) {
3535                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3536                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
3537                 u32 grp_idx = rxr->bnapi->index;
3538                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3539
3540                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3541                         hwrm_ring_free_send_msg(bp, ring,
3542                                                 RING_FREE_REQ_RING_TYPE_RX,
3543                                                 close_path ? cmpl_ring_id :
3544                                                 INVALID_HW_RING_ID);
3545                         ring->fw_ring_id = INVALID_HW_RING_ID;
3546                         bp->grp_info[grp_idx].agg_fw_ring_id =
3547                                 INVALID_HW_RING_ID;
3548                 }
3549         }
3550
3551         for (i = 0; i < bp->cp_nr_rings; i++) {
3552                 struct bnxt_napi *bnapi = bp->bnapi[i];
3553                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3554                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3555
3556                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3557                         hwrm_ring_free_send_msg(bp, ring,
3558                                                 RING_FREE_REQ_RING_TYPE_CMPL,
3559                                                 INVALID_HW_RING_ID);
3560                         ring->fw_ring_id = INVALID_HW_RING_ID;
3561                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3562                 }
3563         }
3564 }
3565
3566 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
3567         u32 buf_tmrs, u16 flags,
3568         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3569 {
3570         req->flags = cpu_to_le16(flags);
3571         req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
3572         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
3573         req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
3574         req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
3575         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
3576         req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
3577         req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
3578         req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
3579 }
3580
3581 int bnxt_hwrm_set_coal(struct bnxt *bp)
3582 {
3583         int i, rc = 0;
3584         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3585                                                            req_tx = {0}, *req;
3586         u16 max_buf, max_buf_irq;
3587         u16 buf_tmr, buf_tmr_irq;
3588         u32 flags;
3589
3590         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
3591                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3592         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
3593                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3594
3595         /* Each rx completion (2 records) should be DMAed immediately.
3596          * DMA 1/4 of the completion buffers at a time.
3597          */
3598         max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
3599         /* max_buf must not be zero */
3600         max_buf = clamp_t(u16, max_buf, 1, 63);
3601         max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
3602         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
3603         /* buf timer set to 1/4 of interrupt timer */
3604         buf_tmr = max_t(u16, buf_tmr / 4, 1);
3605         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
3606         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
3607
3608         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3609
3610         /* RING_IDLE generates more IRQs for lower latency.  Enable it only
3611          * if coal_ticks is less than 25 us.
3612          */
3613         if (bp->rx_coal_ticks < 25)
3614                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
3615
3616         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
3617                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
3618
3619         /* max_buf must not be zero */
3620         max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
3621         max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
3622         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
3623         /* buf timer set to 1/4 of interrupt timer */
3624         buf_tmr = max_t(u16, buf_tmr / 4, 1);
3625         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
3626         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
3627
3628         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3629         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
3630                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
3631
3632         mutex_lock(&bp->hwrm_cmd_lock);
3633         for (i = 0; i < bp->cp_nr_rings; i++) {
3634                 struct bnxt_napi *bnapi = bp->bnapi[i];
3635
3636                 req = &req_rx;
3637                 if (!bnapi->rx_ring)
3638                         req = &req_tx;
3639                 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3640
3641                 rc = _hwrm_send_message(bp, req, sizeof(*req),
3642                                         HWRM_CMD_TIMEOUT);
3643                 if (rc)
3644                         break;
3645         }
3646         mutex_unlock(&bp->hwrm_cmd_lock);
3647         return rc;
3648 }
3649
3650 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
3651 {
3652         int rc = 0, i;
3653         struct hwrm_stat_ctx_free_input req = {0};
3654
3655         if (!bp->bnapi)
3656                 return 0;
3657
3658         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
3659
3660         mutex_lock(&bp->hwrm_cmd_lock);
3661         for (i = 0; i < bp->cp_nr_rings; i++) {
3662                 struct bnxt_napi *bnapi = bp->bnapi[i];
3663                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3664
3665                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
3666                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
3667
3668                         rc = _hwrm_send_message(bp, &req, sizeof(req),
3669                                                 HWRM_CMD_TIMEOUT);
3670                         if (rc)
3671                                 break;
3672
3673                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3674                 }
3675         }
3676         mutex_unlock(&bp->hwrm_cmd_lock);
3677         return rc;
3678 }
3679
3680 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3681 {
3682         int rc = 0, i;
3683         struct hwrm_stat_ctx_alloc_input req = {0};
3684         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3685
3686         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3687
3688         req.update_period_ms = cpu_to_le32(1000);
3689
3690         mutex_lock(&bp->hwrm_cmd_lock);
3691         for (i = 0; i < bp->cp_nr_rings; i++) {
3692                 struct bnxt_napi *bnapi = bp->bnapi[i];
3693                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3694
3695                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
3696
3697                 rc = _hwrm_send_message(bp, &req, sizeof(req),
3698                                         HWRM_CMD_TIMEOUT);
3699                 if (rc)
3700                         break;
3701
3702                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
3703
3704                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
3705         }
3706         mutex_unlock(&bp->hwrm_cmd_lock);
3707         return 0;
3708 }
3709
3710 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3711 {
3712         int rc = 0;
3713         struct hwrm_func_qcaps_input req = {0};
3714         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3715
3716         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
3717         req.fid = cpu_to_le16(0xffff);
3718
3719         mutex_lock(&bp->hwrm_cmd_lock);
3720         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3721         if (rc)
3722                 goto hwrm_func_qcaps_exit;
3723
3724         if (BNXT_PF(bp)) {
3725                 struct bnxt_pf_info *pf = &bp->pf;
3726
3727                 pf->fw_fid = le16_to_cpu(resp->fid);
3728                 pf->port_id = le16_to_cpu(resp->port_id);
3729                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
3730                 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3731                 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3732                 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3733                 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3734                 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3735                 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3736                 if (!pf->max_hw_ring_grps)
3737                         pf->max_hw_ring_grps = pf->max_tx_rings;
3738                 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3739                 pf->max_vnics = le16_to_cpu(resp->max_vnics);
3740                 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3741                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
3742                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
3743                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
3744                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
3745                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
3746                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
3747                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
3748                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
3749         } else {
3750 #ifdef CONFIG_BNXT_SRIOV
3751                 struct bnxt_vf_info *vf = &bp->vf;
3752
3753                 vf->fw_fid = le16_to_cpu(resp->fid);
3754                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
3755                 if (is_valid_ether_addr(vf->mac_addr))
3756                         /* overwrite netdev dev_adr with admin VF MAC */
3757                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3758                 else
3759                         random_ether_addr(bp->dev->dev_addr);
3760
3761                 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3762                 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3763                 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3764                 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3765                 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3766                 if (!vf->max_hw_ring_grps)
3767                         vf->max_hw_ring_grps = vf->max_tx_rings;
3768                 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3769                 vf->max_vnics = le16_to_cpu(resp->max_vnics);
3770                 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3771 #endif
3772         }
3773
3774         bp->tx_push_thresh = 0;
3775         if (resp->flags &
3776             cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
3777                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
3778
3779 hwrm_func_qcaps_exit:
3780         mutex_unlock(&bp->hwrm_cmd_lock);
3781         return rc;
3782 }
3783
3784 static int bnxt_hwrm_func_reset(struct bnxt *bp)
3785 {
3786         struct hwrm_func_reset_input req = {0};
3787
3788         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
3789         req.enables = 0;
3790
3791         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
3792 }
3793
3794 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
3795 {
3796         int rc = 0;
3797         struct hwrm_queue_qportcfg_input req = {0};
3798         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
3799         u8 i, *qptr;
3800
3801         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
3802
3803         mutex_lock(&bp->hwrm_cmd_lock);
3804         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3805         if (rc)
3806                 goto qportcfg_exit;
3807
3808         if (!resp->max_configurable_queues) {
3809                 rc = -EINVAL;
3810                 goto qportcfg_exit;
3811         }
3812         bp->max_tc = resp->max_configurable_queues;
3813         if (bp->max_tc > BNXT_MAX_QUEUE)
3814                 bp->max_tc = BNXT_MAX_QUEUE;
3815
3816         qptr = &resp->queue_id0;
3817         for (i = 0; i < bp->max_tc; i++) {
3818                 bp->q_info[i].queue_id = *qptr++;
3819                 bp->q_info[i].queue_profile = *qptr++;
3820         }
3821
3822 qportcfg_exit:
3823         mutex_unlock(&bp->hwrm_cmd_lock);
3824         return rc;
3825 }
3826
3827 static int bnxt_hwrm_ver_get(struct bnxt *bp)
3828 {
3829         int rc;
3830         struct hwrm_ver_get_input req = {0};
3831         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3832
3833         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
3834         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3835         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3836         req.hwrm_intf_min = HWRM_VERSION_MINOR;
3837         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
3838         mutex_lock(&bp->hwrm_cmd_lock);
3839         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3840         if (rc)
3841                 goto hwrm_ver_get_exit;
3842
3843         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3844
3845         bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
3846                              resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
3847         if (resp->hwrm_intf_maj < 1) {
3848                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
3849                             resp->hwrm_intf_maj, resp->hwrm_intf_min,
3850                             resp->hwrm_intf_upd);
3851                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
3852         }
3853         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
3854                  resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
3855                  resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
3856
3857         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
3858         if (!bp->hwrm_cmd_timeout)
3859                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
3860
3861         if (resp->hwrm_intf_maj >= 1)
3862                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
3863
3864 hwrm_ver_get_exit:
3865         mutex_unlock(&bp->hwrm_cmd_lock);
3866         return rc;
3867 }
3868
3869 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
3870 {
3871         int rc;
3872         struct bnxt_pf_info *pf = &bp->pf;
3873         struct hwrm_port_qstats_input req = {0};
3874
3875         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3876                 return 0;
3877
3878         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
3879         req.port_id = cpu_to_le16(pf->port_id);
3880         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
3881         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
3882         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3883         return rc;
3884 }
3885
3886 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
3887 {
3888         if (bp->vxlan_port_cnt) {
3889                 bnxt_hwrm_tunnel_dst_port_free(
3890                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
3891         }
3892         bp->vxlan_port_cnt = 0;
3893         if (bp->nge_port_cnt) {
3894                 bnxt_hwrm_tunnel_dst_port_free(
3895                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
3896         }
3897         bp->nge_port_cnt = 0;
3898 }
3899
3900 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
3901 {
3902         int rc, i;
3903         u32 tpa_flags = 0;
3904
3905         if (set_tpa)
3906                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
3907         for (i = 0; i < bp->nr_vnics; i++) {
3908                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
3909                 if (rc) {
3910                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
3911                                    rc, i);
3912                         return rc;
3913                 }
3914         }
3915         return 0;
3916 }
3917
3918 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
3919 {
3920         int i;
3921
3922         for (i = 0; i < bp->nr_vnics; i++)
3923                 bnxt_hwrm_vnic_set_rss(bp, i, false);
3924 }
3925
3926 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
3927                                     bool irq_re_init)
3928 {
3929         if (bp->vnic_info) {
3930                 bnxt_hwrm_clear_vnic_filter(bp);
3931                 /* clear all RSS setting before free vnic ctx */
3932                 bnxt_hwrm_clear_vnic_rss(bp);
3933                 bnxt_hwrm_vnic_ctx_free(bp);
3934                 /* before free the vnic, undo the vnic tpa settings */
3935                 if (bp->flags & BNXT_FLAG_TPA)
3936                         bnxt_set_tpa(bp, false);
3937                 bnxt_hwrm_vnic_free(bp);
3938         }
3939         bnxt_hwrm_ring_free(bp, close_path);
3940         bnxt_hwrm_ring_grp_free(bp);
3941         if (irq_re_init) {
3942                 bnxt_hwrm_stat_ctx_free(bp);
3943                 bnxt_hwrm_free_tunnel_ports(bp);
3944         }
3945 }
3946
3947 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
3948 {
3949         int rc;
3950
3951         /* allocate context for vnic */
3952         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
3953         if (rc) {
3954                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3955                            vnic_id, rc);
3956                 goto vnic_setup_err;
3957         }
3958         bp->rsscos_nr_ctxs++;
3959
3960         /* configure default vnic, ring grp */
3961         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
3962         if (rc) {
3963                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
3964                            vnic_id, rc);
3965                 goto vnic_setup_err;
3966         }
3967
3968         /* Enable RSS hashing on vnic */
3969         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
3970         if (rc) {
3971                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
3972                            vnic_id, rc);
3973                 goto vnic_setup_err;
3974         }
3975
3976         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3977                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
3978                 if (rc) {
3979                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
3980                                    vnic_id, rc);
3981                 }
3982         }
3983
3984 vnic_setup_err:
3985         return rc;
3986 }
3987
3988 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3989 {
3990 #ifdef CONFIG_RFS_ACCEL
3991         int i, rc = 0;
3992
3993         for (i = 0; i < bp->rx_nr_rings; i++) {
3994                 u16 vnic_id = i + 1;
3995                 u16 ring_id = i;
3996
3997                 if (vnic_id >= bp->nr_vnics)
3998                         break;
3999
4000                 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
4001                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
4002                 if (rc) {
4003                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4004                                    vnic_id, rc);
4005                         break;
4006                 }
4007                 rc = bnxt_setup_vnic(bp, vnic_id);
4008                 if (rc)
4009                         break;
4010         }
4011         return rc;
4012 #else
4013         return 0;
4014 #endif
4015 }
4016
4017 static int bnxt_cfg_rx_mode(struct bnxt *);
4018
4019 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4020 {
4021         int rc = 0;
4022
4023         if (irq_re_init) {
4024                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
4025                 if (rc) {
4026                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4027                                    rc);
4028                         goto err_out;
4029                 }
4030         }
4031
4032         rc = bnxt_hwrm_ring_alloc(bp);
4033         if (rc) {
4034                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4035                 goto err_out;
4036         }
4037
4038         rc = bnxt_hwrm_ring_grp_alloc(bp);
4039         if (rc) {
4040                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4041                 goto err_out;
4042         }
4043
4044         /* default vnic 0 */
4045         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
4046         if (rc) {
4047                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4048                 goto err_out;
4049         }
4050
4051         rc = bnxt_setup_vnic(bp, 0);
4052         if (rc)
4053                 goto err_out;
4054
4055         if (bp->flags & BNXT_FLAG_RFS) {
4056                 rc = bnxt_alloc_rfs_vnics(bp);
4057                 if (rc)
4058                         goto err_out;
4059         }
4060
4061         if (bp->flags & BNXT_FLAG_TPA) {
4062                 rc = bnxt_set_tpa(bp, true);
4063                 if (rc)
4064                         goto err_out;
4065         }
4066
4067         if (BNXT_VF(bp))
4068                 bnxt_update_vf_mac(bp);
4069
4070         /* Filter for default vnic 0 */
4071         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4072         if (rc) {
4073                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4074                 goto err_out;
4075         }
4076         bp->vnic_info[0].uc_filter_count = 1;
4077
4078         bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
4079
4080         if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4081                 bp->vnic_info[0].rx_mask |=
4082                                 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4083
4084         rc = bnxt_cfg_rx_mode(bp);
4085         if (rc)
4086                 goto err_out;
4087
4088         rc = bnxt_hwrm_set_coal(bp);
4089         if (rc)
4090                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
4091                             rc);
4092
4093         return 0;
4094
4095 err_out:
4096         bnxt_hwrm_resource_free(bp, 0, true);
4097
4098         return rc;
4099 }
4100
4101 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4102 {
4103         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4104         return 0;
4105 }
4106
4107 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4108 {
4109         bnxt_init_rx_rings(bp);
4110         bnxt_init_tx_rings(bp);
4111         bnxt_init_ring_grps(bp, irq_re_init);
4112         bnxt_init_vnics(bp);
4113
4114         return bnxt_init_chip(bp, irq_re_init);
4115 }
4116
4117 static void bnxt_disable_int(struct bnxt *bp)
4118 {
4119         int i;
4120
4121         if (!bp->bnapi)
4122                 return;
4123
4124         for (i = 0; i < bp->cp_nr_rings; i++) {
4125                 struct bnxt_napi *bnapi = bp->bnapi[i];
4126                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4127
4128                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4129         }
4130 }
4131
4132 static void bnxt_enable_int(struct bnxt *bp)
4133 {
4134         int i;
4135
4136         atomic_set(&bp->intr_sem, 0);
4137         for (i = 0; i < bp->cp_nr_rings; i++) {
4138                 struct bnxt_napi *bnapi = bp->bnapi[i];
4139                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4140
4141                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4142         }
4143 }
4144
4145 static int bnxt_set_real_num_queues(struct bnxt *bp)
4146 {
4147         int rc;
4148         struct net_device *dev = bp->dev;
4149
4150         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4151         if (rc)
4152                 return rc;
4153
4154         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4155         if (rc)
4156                 return rc;
4157
4158 #ifdef CONFIG_RFS_ACCEL
4159         if (bp->flags & BNXT_FLAG_RFS)
4160                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
4161 #endif
4162
4163         return rc;
4164 }
4165
4166 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4167                            bool shared)
4168 {
4169         int _rx = *rx, _tx = *tx;
4170
4171         if (shared) {
4172                 *rx = min_t(int, _rx, max);
4173                 *tx = min_t(int, _tx, max);
4174         } else {
4175                 if (max < 2)
4176                         return -ENOMEM;
4177
4178                 while (_rx + _tx > max) {
4179                         if (_rx > _tx && _rx > 1)
4180                                 _rx--;
4181                         else if (_tx > 1)
4182                                 _tx--;
4183                 }
4184                 *rx = _rx;
4185                 *tx = _tx;
4186         }
4187         return 0;
4188 }
4189
4190 static int bnxt_setup_msix(struct bnxt *bp)
4191 {
4192         struct msix_entry *msix_ent;
4193         struct net_device *dev = bp->dev;
4194         int i, total_vecs, rc = 0, min = 1;
4195         const int len = sizeof(bp->irq_tbl[0].name);
4196
4197         bp->flags &= ~BNXT_FLAG_USING_MSIX;
4198         total_vecs = bp->cp_nr_rings;
4199
4200         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4201         if (!msix_ent)
4202                 return -ENOMEM;
4203
4204         for (i = 0; i < total_vecs; i++) {
4205                 msix_ent[i].entry = i;
4206                 msix_ent[i].vector = 0;
4207         }
4208
4209         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
4210                 min = 2;
4211
4212         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
4213         if (total_vecs < 0) {
4214                 rc = -ENODEV;
4215                 goto msix_setup_exit;
4216         }
4217
4218         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4219         if (bp->irq_tbl) {
4220                 int tcs;
4221
4222                 /* Trim rings based upon num of vectors allocated */
4223                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
4224                                      total_vecs, min == 1);
4225                 if (rc)
4226                         goto msix_setup_exit;
4227
4228                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4229                 tcs = netdev_get_num_tc(dev);
4230                 if (tcs > 1) {
4231                         bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4232                         if (bp->tx_nr_rings_per_tc == 0) {
4233                                 netdev_reset_tc(dev);
4234                                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4235                         } else {
4236                                 int i, off, count;
4237
4238                                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4239                                 for (i = 0; i < tcs; i++) {
4240                                         count = bp->tx_nr_rings_per_tc;
4241                                         off = i * count;
4242                                         netdev_set_tc_queue(dev, i, count, off);
4243                                 }
4244                         }
4245                 }
4246                 bp->cp_nr_rings = total_vecs;
4247
4248                 for (i = 0; i < bp->cp_nr_rings; i++) {
4249                         char *attr;
4250
4251                         bp->irq_tbl[i].vector = msix_ent[i].vector;
4252                         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4253                                 attr = "TxRx";
4254                         else if (i < bp->rx_nr_rings)
4255                                 attr = "rx";
4256                         else
4257                                 attr = "tx";
4258
4259                         snprintf(bp->irq_tbl[i].name, len,
4260                                  "%s-%s-%d", dev->name, attr, i);
4261                         bp->irq_tbl[i].handler = bnxt_msix;
4262                 }
4263                 rc = bnxt_set_real_num_queues(bp);
4264                 if (rc)
4265                         goto msix_setup_exit;
4266         } else {
4267                 rc = -ENOMEM;
4268                 goto msix_setup_exit;
4269         }
4270         bp->flags |= BNXT_FLAG_USING_MSIX;
4271         kfree(msix_ent);
4272         return 0;
4273
4274 msix_setup_exit:
4275         netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4276         pci_disable_msix(bp->pdev);
4277         kfree(msix_ent);
4278         return rc;
4279 }
4280
4281 static int bnxt_setup_inta(struct bnxt *bp)
4282 {
4283         int rc;
4284         const int len = sizeof(bp->irq_tbl[0].name);
4285
4286         if (netdev_get_num_tc(bp->dev))
4287                 netdev_reset_tc(bp->dev);
4288
4289         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4290         if (!bp->irq_tbl) {
4291                 rc = -ENOMEM;
4292                 return rc;
4293         }
4294         bp->rx_nr_rings = 1;
4295         bp->tx_nr_rings = 1;
4296         bp->cp_nr_rings = 1;
4297         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4298         bp->flags |= BNXT_FLAG_SHARED_RINGS;
4299         bp->irq_tbl[0].vector = bp->pdev->irq;
4300         snprintf(bp->irq_tbl[0].name, len,
4301                  "%s-%s-%d", bp->dev->name, "TxRx", 0);
4302         bp->irq_tbl[0].handler = bnxt_inta;
4303         rc = bnxt_set_real_num_queues(bp);
4304         return rc;
4305 }
4306
4307 static int bnxt_setup_int_mode(struct bnxt *bp)
4308 {
4309         int rc = 0;
4310
4311         if (bp->flags & BNXT_FLAG_MSIX_CAP)
4312                 rc = bnxt_setup_msix(bp);
4313
4314         if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
4315                 /* fallback to INTA */
4316                 rc = bnxt_setup_inta(bp);
4317         }
4318         return rc;
4319 }
4320
4321 static void bnxt_free_irq(struct bnxt *bp)
4322 {
4323         struct bnxt_irq *irq;
4324         int i;
4325
4326 #ifdef CONFIG_RFS_ACCEL
4327         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4328         bp->dev->rx_cpu_rmap = NULL;
4329 #endif
4330         if (!bp->irq_tbl)
4331                 return;
4332
4333         for (i = 0; i < bp->cp_nr_rings; i++) {
4334                 irq = &bp->irq_tbl[i];
4335                 if (irq->requested)
4336                         free_irq(irq->vector, bp->bnapi[i]);
4337                 irq->requested = 0;
4338         }
4339         if (bp->flags & BNXT_FLAG_USING_MSIX)
4340                 pci_disable_msix(bp->pdev);
4341         kfree(bp->irq_tbl);
4342         bp->irq_tbl = NULL;
4343 }
4344
4345 static int bnxt_request_irq(struct bnxt *bp)
4346 {
4347         int i, j, rc = 0;
4348         unsigned long flags = 0;
4349 #ifdef CONFIG_RFS_ACCEL
4350         struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4351 #endif
4352
4353         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4354                 flags = IRQF_SHARED;
4355
4356         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4357                 struct bnxt_irq *irq = &bp->irq_tbl[i];
4358 #ifdef CONFIG_RFS_ACCEL
4359                 if (rmap && bp->bnapi[i]->rx_ring) {
4360                         rc = irq_cpu_rmap_add(rmap, irq->vector);
4361                         if (rc)
4362                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
4363                                             j);
4364                         j++;
4365                 }
4366 #endif
4367                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4368                                  bp->bnapi[i]);
4369                 if (rc)
4370                         break;
4371
4372                 irq->requested = 1;
4373         }
4374         return rc;
4375 }
4376
4377 static void bnxt_del_napi(struct bnxt *bp)
4378 {
4379         int i;
4380
4381         if (!bp->bnapi)
4382                 return;
4383
4384         for (i = 0; i < bp->cp_nr_rings; i++) {
4385                 struct bnxt_napi *bnapi = bp->bnapi[i];
4386
4387                 napi_hash_del(&bnapi->napi);
4388                 netif_napi_del(&bnapi->napi);
4389         }
4390 }
4391
4392 static void bnxt_init_napi(struct bnxt *bp)
4393 {
4394         int i;
4395         struct bnxt_napi *bnapi;
4396
4397         if (bp->flags & BNXT_FLAG_USING_MSIX) {
4398                 for (i = 0; i < bp->cp_nr_rings; i++) {
4399                         bnapi = bp->bnapi[i];
4400                         netif_napi_add(bp->dev, &bnapi->napi,
4401                                        bnxt_poll, 64);
4402                 }
4403         } else {
4404                 bnapi = bp->bnapi[0];
4405                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
4406         }
4407 }
4408
4409 static void bnxt_disable_napi(struct bnxt *bp)
4410 {
4411         int i;
4412
4413         if (!bp->bnapi)
4414                 return;
4415
4416         for (i = 0; i < bp->cp_nr_rings; i++) {
4417                 napi_disable(&bp->bnapi[i]->napi);
4418                 bnxt_disable_poll(bp->bnapi[i]);
4419         }
4420 }
4421
4422 static void bnxt_enable_napi(struct bnxt *bp)
4423 {
4424         int i;
4425
4426         for (i = 0; i < bp->cp_nr_rings; i++) {
4427                 bnxt_enable_poll(bp->bnapi[i]);
4428                 napi_enable(&bp->bnapi[i]->napi);
4429         }
4430 }
4431
4432 static void bnxt_tx_disable(struct bnxt *bp)
4433 {
4434         int i;
4435         struct bnxt_tx_ring_info *txr;
4436         struct netdev_queue *txq;
4437
4438         if (bp->tx_ring) {
4439                 for (i = 0; i < bp->tx_nr_rings; i++) {
4440                         txr = &bp->tx_ring[i];
4441                         txq = netdev_get_tx_queue(bp->dev, i);
4442                         __netif_tx_lock(txq, smp_processor_id());
4443                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
4444                         __netif_tx_unlock(txq);
4445                 }
4446         }
4447         /* Stop all TX queues */
4448         netif_tx_disable(bp->dev);
4449         netif_carrier_off(bp->dev);
4450 }
4451
4452 static void bnxt_tx_enable(struct bnxt *bp)
4453 {
4454         int i;
4455         struct bnxt_tx_ring_info *txr;
4456         struct netdev_queue *txq;
4457
4458         for (i = 0; i < bp->tx_nr_rings; i++) {
4459                 txr = &bp->tx_ring[i];
4460                 txq = netdev_get_tx_queue(bp->dev, i);
4461                 txr->dev_state = 0;
4462         }
4463         netif_tx_wake_all_queues(bp->dev);
4464         if (bp->link_info.link_up)
4465                 netif_carrier_on(bp->dev);
4466 }
4467
4468 static void bnxt_report_link(struct bnxt *bp)
4469 {
4470         if (bp->link_info.link_up) {
4471                 const char *duplex;
4472                 const char *flow_ctrl;
4473                 u16 speed;
4474
4475                 netif_carrier_on(bp->dev);
4476                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
4477                         duplex = "full";
4478                 else
4479                         duplex = "half";
4480                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
4481                         flow_ctrl = "ON - receive & transmit";
4482                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
4483                         flow_ctrl = "ON - transmit";
4484                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
4485                         flow_ctrl = "ON - receive";
4486                 else
4487                         flow_ctrl = "none";
4488                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4489                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4490                             speed, duplex, flow_ctrl);
4491                 if (bp->flags & BNXT_FLAG_EEE_CAP)
4492                         netdev_info(bp->dev, "EEE is %s\n",
4493                                     bp->eee.eee_active ? "active" :
4494                                                          "not active");
4495         } else {
4496                 netif_carrier_off(bp->dev);
4497                 netdev_err(bp->dev, "NIC Link is Down\n");
4498         }
4499 }
4500
4501 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
4502 {
4503         int rc = 0;
4504         struct hwrm_port_phy_qcaps_input req = {0};
4505         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4506
4507         if (bp->hwrm_spec_code < 0x10201)
4508                 return 0;
4509
4510         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
4511
4512         mutex_lock(&bp->hwrm_cmd_lock);
4513         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4514         if (rc)
4515                 goto hwrm_phy_qcaps_exit;
4516
4517         if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
4518                 struct ethtool_eee *eee = &bp->eee;
4519                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
4520
4521                 bp->flags |= BNXT_FLAG_EEE_CAP;
4522                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4523                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
4524                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
4525                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
4526                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
4527         }
4528
4529 hwrm_phy_qcaps_exit:
4530         mutex_unlock(&bp->hwrm_cmd_lock);
4531         return rc;
4532 }
4533
4534 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4535 {
4536         int rc = 0;
4537         struct bnxt_link_info *link_info = &bp->link_info;
4538         struct hwrm_port_phy_qcfg_input req = {0};
4539         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4540         u8 link_up = link_info->link_up;
4541
4542         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
4543
4544         mutex_lock(&bp->hwrm_cmd_lock);
4545         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4546         if (rc) {
4547                 mutex_unlock(&bp->hwrm_cmd_lock);
4548                 return rc;
4549         }
4550
4551         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
4552         link_info->phy_link_status = resp->link;
4553         link_info->duplex =  resp->duplex;
4554         link_info->pause = resp->pause;
4555         link_info->auto_mode = resp->auto_mode;
4556         link_info->auto_pause_setting = resp->auto_pause;
4557         link_info->lp_pause = resp->link_partner_adv_pause;
4558         link_info->force_pause_setting = resp->force_pause;
4559         link_info->duplex_setting = resp->duplex;
4560         if (link_info->phy_link_status == BNXT_LINK_LINK)
4561                 link_info->link_speed = le16_to_cpu(resp->link_speed);
4562         else
4563                 link_info->link_speed = 0;
4564         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4565         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4566         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4567         link_info->lp_auto_link_speeds =
4568                 le16_to_cpu(resp->link_partner_adv_speeds);
4569         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
4570         link_info->phy_ver[0] = resp->phy_maj;
4571         link_info->phy_ver[1] = resp->phy_min;
4572         link_info->phy_ver[2] = resp->phy_bld;
4573         link_info->media_type = resp->media_type;
4574         link_info->transceiver = resp->xcvr_pkg_type;
4575         link_info->phy_addr = resp->eee_config_phy_addr &
4576                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
4577
4578         if (bp->flags & BNXT_FLAG_EEE_CAP) {
4579                 struct ethtool_eee *eee = &bp->eee;
4580                 u16 fw_speeds;
4581
4582                 eee->eee_active = 0;
4583                 if (resp->eee_config_phy_addr &
4584                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
4585                         eee->eee_active = 1;
4586                         fw_speeds = le16_to_cpu(
4587                                 resp->link_partner_adv_eee_link_speed_mask);
4588                         eee->lp_advertised =
4589                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4590                 }
4591
4592                 /* Pull initial EEE config */
4593                 if (!chng_link_state) {
4594                         if (resp->eee_config_phy_addr &
4595                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
4596                                 eee->eee_enabled = 1;
4597
4598                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
4599                         eee->advertised =
4600                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4601
4602                         if (resp->eee_config_phy_addr &
4603                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
4604                                 __le32 tmr;
4605
4606                                 eee->tx_lpi_enabled = 1;
4607                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
4608                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
4609                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
4610                         }
4611                 }
4612         }
4613         /* TODO: need to add more logic to report VF link */
4614         if (chng_link_state) {
4615                 if (link_info->phy_link_status == BNXT_LINK_LINK)
4616                         link_info->link_up = 1;
4617                 else
4618                         link_info->link_up = 0;
4619                 if (link_up != link_info->link_up)
4620                         bnxt_report_link(bp);
4621         } else {
4622                 /* alwasy link down if not require to update link state */
4623                 link_info->link_up = 0;
4624         }
4625         mutex_unlock(&bp->hwrm_cmd_lock);
4626         return 0;
4627 }
4628
4629 static void
4630 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4631 {
4632         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4633                 if (bp->hwrm_spec_code >= 0x10201)
4634                         req->auto_pause =
4635                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
4636                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4637                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4638                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4639                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
4640                 req->enables |=
4641                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4642         } else {
4643                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4644                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
4645                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4646                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4647                 req->enables |=
4648                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4649                 if (bp->hwrm_spec_code >= 0x10201) {
4650                         req->auto_pause = req->force_pause;
4651                         req->enables |= cpu_to_le32(
4652                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4653                 }
4654         }
4655 }
4656
4657 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4658                                       struct hwrm_port_phy_cfg_input *req)
4659 {
4660         u8 autoneg = bp->link_info.autoneg;
4661         u16 fw_link_speed = bp->link_info.req_link_speed;
4662         u32 advertising = bp->link_info.advertising;
4663
4664         if (autoneg & BNXT_AUTONEG_SPEED) {
4665                 req->auto_mode |=
4666                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
4667
4668                 req->enables |= cpu_to_le32(
4669                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
4670                 req->auto_link_speed_mask = cpu_to_le16(advertising);
4671
4672                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
4673                 req->flags |=
4674                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
4675         } else {
4676                 req->force_link_speed = cpu_to_le16(fw_link_speed);
4677                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4678         }
4679
4680         /* tell chimp that the setting takes effect immediately */
4681         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4682 }
4683
4684 int bnxt_hwrm_set_pause(struct bnxt *bp)
4685 {
4686         struct hwrm_port_phy_cfg_input req = {0};
4687         int rc;
4688
4689         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4690         bnxt_hwrm_set_pause_common(bp, &req);
4691
4692         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
4693             bp->link_info.force_link_chng)
4694                 bnxt_hwrm_set_link_common(bp, &req);
4695
4696         mutex_lock(&bp->hwrm_cmd_lock);
4697         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4698         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
4699                 /* since changing of pause setting doesn't trigger any link
4700                  * change event, the driver needs to update the current pause
4701                  * result upon successfully return of the phy_cfg command
4702                  */
4703                 bp->link_info.pause =
4704                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
4705                 bp->link_info.auto_pause_setting = 0;
4706                 if (!bp->link_info.force_link_chng)
4707                         bnxt_report_link(bp);
4708         }
4709         bp->link_info.force_link_chng = false;
4710         mutex_unlock(&bp->hwrm_cmd_lock);
4711         return rc;
4712 }
4713
4714 static void bnxt_hwrm_set_eee(struct bnxt *bp,
4715                               struct hwrm_port_phy_cfg_input *req)
4716 {
4717         struct ethtool_eee *eee = &bp->eee;
4718
4719         if (eee->eee_enabled) {
4720                 u16 eee_speeds;
4721                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
4722
4723                 if (eee->tx_lpi_enabled)
4724                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
4725                 else
4726                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
4727
4728                 req->flags |= cpu_to_le32(flags);
4729                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
4730                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
4731                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
4732         } else {
4733                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
4734         }
4735 }
4736
4737 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
4738 {
4739         struct hwrm_port_phy_cfg_input req = {0};
4740
4741         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4742         if (set_pause)
4743                 bnxt_hwrm_set_pause_common(bp, &req);
4744
4745         bnxt_hwrm_set_link_common(bp, &req);
4746
4747         if (set_eee)
4748                 bnxt_hwrm_set_eee(bp, &req);
4749         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4750 }
4751
4752 static bool bnxt_eee_config_ok(struct bnxt *bp)
4753 {
4754         struct ethtool_eee *eee = &bp->eee;
4755         struct bnxt_link_info *link_info = &bp->link_info;
4756
4757         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
4758                 return true;
4759
4760         if (eee->eee_enabled) {
4761                 u32 advertising =
4762                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
4763
4764                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4765                         eee->eee_enabled = 0;
4766                         return false;
4767                 }
4768                 if (eee->advertised & ~advertising) {
4769                         eee->advertised = advertising & eee->supported;
4770                         return false;
4771                 }
4772         }
4773         return true;
4774 }
4775
4776 static int bnxt_update_phy_setting(struct bnxt *bp)
4777 {
4778         int rc;
4779         bool update_link = false;
4780         bool update_pause = false;
4781         bool update_eee = false;
4782         struct bnxt_link_info *link_info = &bp->link_info;
4783
4784         rc = bnxt_update_link(bp, true);
4785         if (rc) {
4786                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
4787                            rc);
4788                 return rc;
4789         }
4790         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4791             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
4792             link_info->req_flow_ctrl)
4793                 update_pause = true;
4794         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4795             link_info->force_pause_setting != link_info->req_flow_ctrl)
4796                 update_pause = true;
4797         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4798                 if (BNXT_AUTO_MODE(link_info->auto_mode))
4799                         update_link = true;
4800                 if (link_info->req_link_speed != link_info->force_link_speed)
4801                         update_link = true;
4802                 if (link_info->req_duplex != link_info->duplex_setting)
4803                         update_link = true;
4804         } else {
4805                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4806                         update_link = true;
4807                 if (link_info->advertising != link_info->auto_link_speeds)
4808                         update_link = true;
4809         }
4810
4811         if (!bnxt_eee_config_ok(bp))
4812                 update_eee = true;
4813
4814         if (update_link)
4815                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
4816         else if (update_pause)
4817                 rc = bnxt_hwrm_set_pause(bp);
4818         if (rc) {
4819                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
4820                            rc);
4821                 return rc;
4822         }
4823
4824         return rc;
4825 }
4826
4827 /* Common routine to pre-map certain register block to different GRC window.
4828  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
4829  * in PF and 3 windows in VF that can be customized to map in different
4830  * register blocks.
4831  */
4832 static void bnxt_preset_reg_win(struct bnxt *bp)
4833 {
4834         if (BNXT_PF(bp)) {
4835                 /* CAG registers map to GRC window #4 */
4836                 writel(BNXT_CAG_REG_BASE,
4837                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
4838         }
4839 }
4840
4841 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4842 {
4843         int rc = 0;
4844
4845         bnxt_preset_reg_win(bp);
4846         netif_carrier_off(bp->dev);
4847         if (irq_re_init) {
4848                 rc = bnxt_setup_int_mode(bp);
4849                 if (rc) {
4850                         netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
4851                                    rc);
4852                         return rc;
4853                 }
4854         }
4855         if ((bp->flags & BNXT_FLAG_RFS) &&
4856             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
4857                 /* disable RFS if falling back to INTA */
4858                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
4859                 bp->flags &= ~BNXT_FLAG_RFS;
4860         }
4861
4862         rc = bnxt_alloc_mem(bp, irq_re_init);
4863         if (rc) {
4864                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
4865                 goto open_err_free_mem;
4866         }
4867
4868         if (irq_re_init) {
4869                 bnxt_init_napi(bp);
4870                 rc = bnxt_request_irq(bp);
4871                 if (rc) {
4872                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
4873                         goto open_err;
4874                 }
4875         }
4876
4877         bnxt_enable_napi(bp);
4878
4879         rc = bnxt_init_nic(bp, irq_re_init);
4880         if (rc) {
4881                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
4882                 goto open_err;
4883         }
4884
4885         if (link_re_init) {
4886                 rc = bnxt_update_phy_setting(bp);
4887                 if (rc)
4888                         netdev_warn(bp->dev, "failed to update phy settings\n");
4889         }
4890
4891         if (irq_re_init) {
4892 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
4893                 vxlan_get_rx_port(bp->dev);
4894 #endif
4895                 if (!bnxt_hwrm_tunnel_dst_port_alloc(
4896                                 bp, htons(0x17c1),
4897                                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
4898                         bp->nge_port_cnt = 1;
4899         }
4900
4901         set_bit(BNXT_STATE_OPEN, &bp->state);
4902         bnxt_enable_int(bp);
4903         /* Enable TX queues */
4904         bnxt_tx_enable(bp);
4905         mod_timer(&bp->timer, jiffies + bp->current_interval);
4906         bnxt_update_link(bp, true);
4907
4908         return 0;
4909
4910 open_err:
4911         bnxt_disable_napi(bp);
4912         bnxt_del_napi(bp);
4913
4914 open_err_free_mem:
4915         bnxt_free_skbs(bp);
4916         bnxt_free_irq(bp);
4917         bnxt_free_mem(bp, true);
4918         return rc;
4919 }
4920
4921 /* rtnl_lock held */
4922 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4923 {
4924         int rc = 0;
4925
4926         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
4927         if (rc) {
4928                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
4929                 dev_close(bp->dev);
4930         }
4931         return rc;
4932 }
4933
4934 static int bnxt_open(struct net_device *dev)
4935 {
4936         struct bnxt *bp = netdev_priv(dev);
4937         int rc = 0;
4938
4939         rc = bnxt_hwrm_func_reset(bp);
4940         if (rc) {
4941                 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
4942                            rc);
4943                 rc = -1;
4944                 return rc;
4945         }
4946         return __bnxt_open_nic(bp, true, true);
4947 }
4948
4949 static void bnxt_disable_int_sync(struct bnxt *bp)
4950 {
4951         int i;
4952
4953         atomic_inc(&bp->intr_sem);
4954         if (!netif_running(bp->dev))
4955                 return;
4956
4957         bnxt_disable_int(bp);
4958         for (i = 0; i < bp->cp_nr_rings; i++)
4959                 synchronize_irq(bp->irq_tbl[i].vector);
4960 }
4961
4962 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4963 {
4964         int rc = 0;
4965
4966 #ifdef CONFIG_BNXT_SRIOV
4967         if (bp->sriov_cfg) {
4968                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
4969                                                       !bp->sriov_cfg,
4970                                                       BNXT_SRIOV_CFG_WAIT_TMO);
4971                 if (rc)
4972                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
4973         }
4974 #endif
4975         /* Change device state to avoid TX queue wake up's */
4976         bnxt_tx_disable(bp);
4977
4978         clear_bit(BNXT_STATE_OPEN, &bp->state);
4979         smp_mb__after_atomic();
4980         while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4981                 msleep(20);
4982
4983         /* Flush rings before disabling interrupts */
4984         bnxt_shutdown_nic(bp, irq_re_init);
4985
4986         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
4987
4988         bnxt_disable_napi(bp);
4989         bnxt_disable_int_sync(bp);
4990         del_timer_sync(&bp->timer);
4991         bnxt_free_skbs(bp);
4992
4993         if (irq_re_init) {
4994                 bnxt_free_irq(bp);
4995                 bnxt_del_napi(bp);
4996         }
4997         bnxt_free_mem(bp, irq_re_init);
4998         return rc;
4999 }
5000
5001 static int bnxt_close(struct net_device *dev)
5002 {
5003         struct bnxt *bp = netdev_priv(dev);
5004
5005         bnxt_close_nic(bp, true, true);
5006         return 0;
5007 }
5008
5009 /* rtnl_lock held */
5010 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5011 {
5012         switch (cmd) {
5013         case SIOCGMIIPHY:
5014                 /* fallthru */
5015         case SIOCGMIIREG: {
5016                 if (!netif_running(dev))
5017                         return -EAGAIN;
5018
5019                 return 0;
5020         }
5021
5022         case SIOCSMIIREG:
5023                 if (!netif_running(dev))
5024                         return -EAGAIN;
5025
5026                 return 0;
5027
5028         default:
5029                 /* do nothing */
5030                 break;
5031         }
5032         return -EOPNOTSUPP;
5033 }
5034
5035 static struct rtnl_link_stats64 *
5036 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5037 {
5038         u32 i;
5039         struct bnxt *bp = netdev_priv(dev);
5040
5041         memset(stats, 0, sizeof(struct rtnl_link_stats64));
5042
5043         if (!bp->bnapi)
5044                 return stats;
5045
5046         /* TODO check if we need to synchronize with bnxt_close path */
5047         for (i = 0; i < bp->cp_nr_rings; i++) {
5048                 struct bnxt_napi *bnapi = bp->bnapi[i];
5049                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5050                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5051
5052                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5053                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5054                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5055
5056                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5057                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5058                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5059
5060                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5061                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5062                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5063
5064                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5065                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5066                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5067
5068                 stats->rx_missed_errors +=
5069                         le64_to_cpu(hw_stats->rx_discard_pkts);
5070
5071                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5072
5073                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5074         }
5075
5076         if (bp->flags & BNXT_FLAG_PORT_STATS) {
5077                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
5078                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
5079
5080                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5081                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5082                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5083                                           le64_to_cpu(rx->rx_ovrsz_frames) +
5084                                           le64_to_cpu(rx->rx_runt_frames);
5085                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5086                                    le64_to_cpu(rx->rx_jbr_frames);
5087                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5088                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5089                 stats->tx_errors = le64_to_cpu(tx->tx_err);
5090         }
5091
5092         return stats;
5093 }
5094
5095 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5096 {
5097         struct net_device *dev = bp->dev;
5098         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5099         struct netdev_hw_addr *ha;
5100         u8 *haddr;
5101         int mc_count = 0;
5102         bool update = false;
5103         int off = 0;
5104
5105         netdev_for_each_mc_addr(ha, dev) {
5106                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
5107                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5108                         vnic->mc_list_count = 0;
5109                         return false;
5110                 }
5111                 haddr = ha->addr;
5112                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5113                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5114                         update = true;
5115                 }
5116                 off += ETH_ALEN;
5117                 mc_count++;
5118         }
5119         if (mc_count)
5120                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5121
5122         if (mc_count != vnic->mc_list_count) {
5123                 vnic->mc_list_count = mc_count;
5124                 update = true;
5125         }
5126         return update;
5127 }
5128
5129 static bool bnxt_uc_list_updated(struct bnxt *bp)
5130 {
5131         struct net_device *dev = bp->dev;
5132         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5133         struct netdev_hw_addr *ha;
5134         int off = 0;
5135
5136         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5137                 return true;
5138
5139         netdev_for_each_uc_addr(ha, dev) {
5140                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5141                         return true;
5142
5143                 off += ETH_ALEN;
5144         }
5145         return false;
5146 }
5147
5148 static void bnxt_set_rx_mode(struct net_device *dev)
5149 {
5150         struct bnxt *bp = netdev_priv(dev);
5151         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5152         u32 mask = vnic->rx_mask;
5153         bool mc_update = false;
5154         bool uc_update;
5155
5156         if (!netif_running(dev))
5157                 return;
5158
5159         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5160                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5161                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5162
5163         /* Only allow PF to be in promiscuous mode */
5164         if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
5165                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5166
5167         uc_update = bnxt_uc_list_updated(bp);
5168
5169         if (dev->flags & IFF_ALLMULTI) {
5170                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5171                 vnic->mc_list_count = 0;
5172         } else {
5173                 mc_update = bnxt_mc_list_updated(bp, &mask);
5174         }
5175
5176         if (mask != vnic->rx_mask || uc_update || mc_update) {
5177                 vnic->rx_mask = mask;
5178
5179                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5180                 schedule_work(&bp->sp_task);
5181         }
5182 }
5183
5184 static int bnxt_cfg_rx_mode(struct bnxt *bp)
5185 {
5186         struct net_device *dev = bp->dev;
5187         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5188         struct netdev_hw_addr *ha;
5189         int i, off = 0, rc;
5190         bool uc_update;
5191
5192         netif_addr_lock_bh(dev);
5193         uc_update = bnxt_uc_list_updated(bp);
5194         netif_addr_unlock_bh(dev);
5195
5196         if (!uc_update)
5197                 goto skip_uc;
5198
5199         mutex_lock(&bp->hwrm_cmd_lock);
5200         for (i = 1; i < vnic->uc_filter_count; i++) {
5201                 struct hwrm_cfa_l2_filter_free_input req = {0};
5202
5203                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
5204                                        -1);
5205
5206                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
5207
5208                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5209                                         HWRM_CMD_TIMEOUT);
5210         }
5211         mutex_unlock(&bp->hwrm_cmd_lock);
5212
5213         vnic->uc_filter_count = 1;
5214
5215         netif_addr_lock_bh(dev);
5216         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
5217                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5218         } else {
5219                 netdev_for_each_uc_addr(ha, dev) {
5220                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
5221                         off += ETH_ALEN;
5222                         vnic->uc_filter_count++;
5223                 }
5224         }
5225         netif_addr_unlock_bh(dev);
5226
5227         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
5228                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
5229                 if (rc) {
5230                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
5231                                    rc);
5232                         vnic->uc_filter_count = i;
5233                         return rc;
5234                 }
5235         }
5236
5237 skip_uc:
5238         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
5239         if (rc)
5240                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
5241                            rc);
5242
5243         return rc;
5244 }
5245
5246 static bool bnxt_rfs_capable(struct bnxt *bp)
5247 {
5248 #ifdef CONFIG_RFS_ACCEL
5249         struct bnxt_pf_info *pf = &bp->pf;
5250         int vnics;
5251
5252         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
5253                 return false;
5254
5255         vnics = 1 + bp->rx_nr_rings;
5256         if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
5257                 return false;
5258
5259         return true;
5260 #else
5261         return false;
5262 #endif
5263 }
5264
5265 static netdev_features_t bnxt_fix_features(struct net_device *dev,
5266                                            netdev_features_t features)
5267 {
5268         struct bnxt *bp = netdev_priv(dev);
5269
5270         if (!bnxt_rfs_capable(bp))
5271                 features &= ~NETIF_F_NTUPLE;
5272         return features;
5273 }
5274
5275 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
5276 {
5277         struct bnxt *bp = netdev_priv(dev);
5278         u32 flags = bp->flags;
5279         u32 changes;
5280         int rc = 0;
5281         bool re_init = false;
5282         bool update_tpa = false;
5283
5284         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
5285         if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
5286                 flags |= BNXT_FLAG_GRO;
5287         if (features & NETIF_F_LRO)
5288                 flags |= BNXT_FLAG_LRO;
5289
5290         if (features & NETIF_F_HW_VLAN_CTAG_RX)
5291                 flags |= BNXT_FLAG_STRIP_VLAN;
5292
5293         if (features & NETIF_F_NTUPLE)
5294                 flags |= BNXT_FLAG_RFS;
5295
5296         changes = flags ^ bp->flags;
5297         if (changes & BNXT_FLAG_TPA) {
5298                 update_tpa = true;
5299                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
5300                     (flags & BNXT_FLAG_TPA) == 0)
5301                         re_init = true;
5302         }
5303
5304         if (changes & ~BNXT_FLAG_TPA)
5305                 re_init = true;
5306
5307         if (flags != bp->flags) {
5308                 u32 old_flags = bp->flags;
5309
5310                 bp->flags = flags;
5311
5312                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5313                         if (update_tpa)
5314                                 bnxt_set_ring_params(bp);
5315                         return rc;
5316                 }
5317
5318                 if (re_init) {
5319                         bnxt_close_nic(bp, false, false);
5320                         if (update_tpa)
5321                                 bnxt_set_ring_params(bp);
5322
5323                         return bnxt_open_nic(bp, false, false);
5324                 }
5325                 if (update_tpa) {
5326                         rc = bnxt_set_tpa(bp,
5327                                           (flags & BNXT_FLAG_TPA) ?
5328                                           true : false);
5329                         if (rc)
5330                                 bp->flags = old_flags;
5331                 }
5332         }
5333         return rc;
5334 }
5335
5336 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
5337 {
5338         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
5339         int i = bnapi->index;
5340
5341         if (!txr)
5342                 return;
5343
5344         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
5345                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
5346                     txr->tx_cons);
5347 }
5348
5349 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
5350 {
5351         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
5352         int i = bnapi->index;
5353
5354         if (!rxr)
5355                 return;
5356
5357         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
5358                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
5359                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
5360                     rxr->rx_sw_agg_prod);
5361 }
5362
5363 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
5364 {
5365         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5366         int i = bnapi->index;
5367
5368         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
5369                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
5370 }
5371
5372 static void bnxt_dbg_dump_states(struct bnxt *bp)
5373 {
5374         int i;
5375         struct bnxt_napi *bnapi;
5376
5377         for (i = 0; i < bp->cp_nr_rings; i++) {
5378                 bnapi = bp->bnapi[i];
5379                 if (netif_msg_drv(bp)) {
5380                         bnxt_dump_tx_sw_state(bnapi);
5381                         bnxt_dump_rx_sw_state(bnapi);
5382                         bnxt_dump_cp_sw_state(bnapi);
5383                 }
5384         }
5385 }
5386
5387 static void bnxt_reset_task(struct bnxt *bp)
5388 {
5389         bnxt_dbg_dump_states(bp);
5390         if (netif_running(bp->dev)) {
5391                 bnxt_close_nic(bp, false, false);
5392                 bnxt_open_nic(bp, false, false);
5393         }
5394 }
5395
5396 static void bnxt_tx_timeout(struct net_device *dev)
5397 {
5398         struct bnxt *bp = netdev_priv(dev);
5399
5400         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
5401         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
5402         schedule_work(&bp->sp_task);
5403 }
5404
5405 #ifdef CONFIG_NET_POLL_CONTROLLER
5406 static void bnxt_poll_controller(struct net_device *dev)
5407 {
5408         struct bnxt *bp = netdev_priv(dev);
5409         int i;
5410
5411         for (i = 0; i < bp->cp_nr_rings; i++) {
5412                 struct bnxt_irq *irq = &bp->irq_tbl[i];
5413
5414                 disable_irq(irq->vector);
5415                 irq->handler(irq->vector, bp->bnapi[i]);
5416                 enable_irq(irq->vector);
5417         }
5418 }
5419 #endif
5420
5421 static void bnxt_timer(unsigned long data)
5422 {
5423         struct bnxt *bp = (struct bnxt *)data;
5424         struct net_device *dev = bp->dev;
5425
5426         if (!netif_running(dev))
5427                 return;
5428
5429         if (atomic_read(&bp->intr_sem) != 0)
5430                 goto bnxt_restart_timer;
5431
5432         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
5433                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
5434                 schedule_work(&bp->sp_task);
5435         }
5436 bnxt_restart_timer:
5437         mod_timer(&bp->timer, jiffies + bp->current_interval);
5438 }
5439
5440 static void bnxt_cfg_ntp_filters(struct bnxt *);
5441
5442 static void bnxt_sp_task(struct work_struct *work)
5443 {
5444         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5445         int rc;
5446
5447         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5448         smp_mb__after_atomic();
5449         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5450                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5451                 return;
5452         }
5453
5454         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5455                 bnxt_cfg_rx_mode(bp);
5456
5457         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
5458                 bnxt_cfg_ntp_filters(bp);
5459         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
5460                 rc = bnxt_update_link(bp, true);
5461                 if (rc)
5462                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
5463                                    rc);
5464         }
5465         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
5466                 bnxt_hwrm_exec_fwd_req(bp);
5467         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5468                 bnxt_hwrm_tunnel_dst_port_alloc(
5469                         bp, bp->vxlan_port,
5470                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5471         }
5472         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5473                 bnxt_hwrm_tunnel_dst_port_free(
5474                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5475         }
5476         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5477                 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5478                  * for BNXT_STATE_IN_SP_TASK to clear.
5479                  */
5480                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5481                 rtnl_lock();
5482                 bnxt_reset_task(bp);
5483                 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5484                 rtnl_unlock();
5485         }
5486
5487         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
5488                 bnxt_hwrm_port_qstats(bp);
5489
5490         smp_mb__before_atomic();
5491         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5492 }
5493
5494 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5495 {
5496         int rc;
5497         struct bnxt *bp = netdev_priv(dev);
5498
5499         SET_NETDEV_DEV(dev, &pdev->dev);
5500
5501         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5502         rc = pci_enable_device(pdev);
5503         if (rc) {
5504                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5505                 goto init_err;
5506         }
5507
5508         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5509                 dev_err(&pdev->dev,
5510                         "Cannot find PCI device base address, aborting\n");
5511                 rc = -ENODEV;
5512                 goto init_err_disable;
5513         }
5514
5515         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5516         if (rc) {
5517                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5518                 goto init_err_disable;
5519         }
5520
5521         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
5522             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
5523                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
5524                 goto init_err_disable;
5525         }
5526
5527         pci_set_master(pdev);
5528
5529         bp->dev = dev;
5530         bp->pdev = pdev;
5531
5532         bp->bar0 = pci_ioremap_bar(pdev, 0);
5533         if (!bp->bar0) {
5534                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5535                 rc = -ENOMEM;
5536                 goto init_err_release;
5537         }
5538
5539         bp->bar1 = pci_ioremap_bar(pdev, 2);
5540         if (!bp->bar1) {
5541                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
5542                 rc = -ENOMEM;
5543                 goto init_err_release;
5544         }
5545
5546         bp->bar2 = pci_ioremap_bar(pdev, 4);
5547         if (!bp->bar2) {
5548                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
5549                 rc = -ENOMEM;
5550                 goto init_err_release;
5551         }
5552
5553         pci_enable_pcie_error_reporting(pdev);
5554
5555         INIT_WORK(&bp->sp_task, bnxt_sp_task);
5556
5557         spin_lock_init(&bp->ntp_fltr_lock);
5558
5559         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
5560         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
5561
5562         /* tick values in micro seconds */
5563         bp->rx_coal_ticks = 12;
5564         bp->rx_coal_bufs = 30;
5565         bp->rx_coal_ticks_irq = 1;
5566         bp->rx_coal_bufs_irq = 2;
5567
5568         bp->tx_coal_ticks = 25;
5569         bp->tx_coal_bufs = 30;
5570         bp->tx_coal_ticks_irq = 2;
5571         bp->tx_coal_bufs_irq = 2;
5572
5573         init_timer(&bp->timer);
5574         bp->timer.data = (unsigned long)bp;
5575         bp->timer.function = bnxt_timer;
5576         bp->current_interval = BNXT_TIMER_INTERVAL;
5577
5578         clear_bit(BNXT_STATE_OPEN, &bp->state);
5579
5580         return 0;
5581
5582 init_err_release:
5583         if (bp->bar2) {
5584                 pci_iounmap(pdev, bp->bar2);
5585                 bp->bar2 = NULL;
5586         }
5587
5588         if (bp->bar1) {
5589                 pci_iounmap(pdev, bp->bar1);
5590                 bp->bar1 = NULL;
5591         }
5592
5593         if (bp->bar0) {
5594                 pci_iounmap(pdev, bp->bar0);
5595                 bp->bar0 = NULL;
5596         }
5597
5598         pci_release_regions(pdev);
5599
5600 init_err_disable:
5601         pci_disable_device(pdev);
5602
5603 init_err:
5604         return rc;
5605 }
5606
5607 /* rtnl_lock held */
5608 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5609 {
5610         struct sockaddr *addr = p;
5611         struct bnxt *bp = netdev_priv(dev);
5612         int rc = 0;
5613
5614         if (!is_valid_ether_addr(addr->sa_data))
5615                 return -EADDRNOTAVAIL;
5616
5617 #ifdef CONFIG_BNXT_SRIOV
5618         if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5619                 return -EADDRNOTAVAIL;
5620 #endif
5621
5622         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5623                 return 0;
5624
5625         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5626         if (netif_running(dev)) {
5627                 bnxt_close_nic(bp, false, false);
5628                 rc = bnxt_open_nic(bp, false, false);
5629         }
5630
5631         return rc;
5632 }
5633
5634 /* rtnl_lock held */
5635 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
5636 {
5637         struct bnxt *bp = netdev_priv(dev);
5638
5639         if (new_mtu < 60 || new_mtu > 9000)
5640                 return -EINVAL;
5641
5642         if (netif_running(dev))
5643                 bnxt_close_nic(bp, false, false);
5644
5645         dev->mtu = new_mtu;
5646         bnxt_set_ring_params(bp);
5647
5648         if (netif_running(dev))
5649                 return bnxt_open_nic(bp, false, false);
5650
5651         return 0;
5652 }
5653
5654 static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
5655                          struct tc_to_netdev *ntc)
5656 {
5657         struct bnxt *bp = netdev_priv(dev);
5658         u8 tc;
5659
5660         if (ntc->type != TC_SETUP_MQPRIO)
5661                 return -EINVAL;
5662
5663         tc = ntc->tc;
5664
5665         if (tc > bp->max_tc) {
5666                 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
5667                            tc, bp->max_tc);
5668                 return -EINVAL;
5669         }
5670
5671         if (netdev_get_num_tc(dev) == tc)
5672                 return 0;
5673
5674         if (tc) {
5675                 int max_rx_rings, max_tx_rings, rc;
5676                 bool sh = false;
5677
5678                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5679                         sh = true;
5680
5681                 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
5682                 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
5683                         return -ENOMEM;
5684         }
5685
5686         /* Needs to close the device and do hw resource re-allocations */
5687         if (netif_running(bp->dev))
5688                 bnxt_close_nic(bp, true, false);
5689
5690         if (tc) {
5691                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
5692                 netdev_set_num_tc(dev, tc);
5693         } else {
5694                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5695                 netdev_reset_tc(dev);
5696         }
5697         bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
5698         bp->num_stat_ctxs = bp->cp_nr_rings;
5699
5700         if (netif_running(bp->dev))
5701                 return bnxt_open_nic(bp, true, false);
5702
5703         return 0;
5704 }
5705
5706 #ifdef CONFIG_RFS_ACCEL
5707 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
5708                             struct bnxt_ntuple_filter *f2)
5709 {
5710         struct flow_keys *keys1 = &f1->fkeys;
5711         struct flow_keys *keys2 = &f2->fkeys;
5712
5713         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
5714             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
5715             keys1->ports.ports == keys2->ports.ports &&
5716             keys1->basic.ip_proto == keys2->basic.ip_proto &&
5717             keys1->basic.n_proto == keys2->basic.n_proto &&
5718             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
5719                 return true;
5720
5721         return false;
5722 }
5723
5724 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
5725                               u16 rxq_index, u32 flow_id)
5726 {
5727         struct bnxt *bp = netdev_priv(dev);
5728         struct bnxt_ntuple_filter *fltr, *new_fltr;
5729         struct flow_keys *fkeys;
5730         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
5731         int rc = 0, idx, bit_id;
5732         struct hlist_head *head;
5733
5734         if (skb->encapsulation)
5735                 return -EPROTONOSUPPORT;
5736
5737         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
5738         if (!new_fltr)
5739                 return -ENOMEM;
5740
5741         fkeys = &new_fltr->fkeys;
5742         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
5743                 rc = -EPROTONOSUPPORT;
5744                 goto err_free;
5745         }
5746
5747         if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
5748             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
5749              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
5750                 rc = -EPROTONOSUPPORT;
5751                 goto err_free;
5752         }
5753
5754         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
5755
5756         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
5757         head = &bp->ntp_fltr_hash_tbl[idx];
5758         rcu_read_lock();
5759         hlist_for_each_entry_rcu(fltr, head, hash) {
5760                 if (bnxt_fltr_match(fltr, new_fltr)) {
5761                         rcu_read_unlock();
5762                         rc = 0;
5763                         goto err_free;
5764                 }
5765         }
5766         rcu_read_unlock();
5767
5768         spin_lock_bh(&bp->ntp_fltr_lock);
5769         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5770                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
5771         if (bit_id < 0) {
5772                 spin_unlock_bh(&bp->ntp_fltr_lock);
5773                 rc = -ENOMEM;
5774                 goto err_free;
5775         }
5776
5777         new_fltr->sw_id = (u16)bit_id;
5778         new_fltr->flow_id = flow_id;
5779         new_fltr->rxq = rxq_index;
5780         hlist_add_head_rcu(&new_fltr->hash, head);
5781         bp->ntp_fltr_count++;
5782         spin_unlock_bh(&bp->ntp_fltr_lock);
5783
5784         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5785         schedule_work(&bp->sp_task);
5786
5787         return new_fltr->sw_id;
5788
5789 err_free:
5790         kfree(new_fltr);
5791         return rc;
5792 }
5793
5794 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5795 {
5796         int i;
5797
5798         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5799                 struct hlist_head *head;
5800                 struct hlist_node *tmp;
5801                 struct bnxt_ntuple_filter *fltr;
5802                 int rc;
5803
5804                 head = &bp->ntp_fltr_hash_tbl[i];
5805                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
5806                         bool del = false;
5807
5808                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
5809                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
5810                                                         fltr->flow_id,
5811                                                         fltr->sw_id)) {
5812                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
5813                                                                          fltr);
5814                                         del = true;
5815                                 }
5816                         } else {
5817                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
5818                                                                        fltr);
5819                                 if (rc)
5820                                         del = true;
5821                                 else
5822                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
5823                         }
5824
5825                         if (del) {
5826                                 spin_lock_bh(&bp->ntp_fltr_lock);
5827                                 hlist_del_rcu(&fltr->hash);
5828                                 bp->ntp_fltr_count--;
5829                                 spin_unlock_bh(&bp->ntp_fltr_lock);
5830                                 synchronize_rcu();
5831                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5832                                 kfree(fltr);
5833                         }
5834                 }
5835         }
5836         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
5837                 netdev_info(bp->dev, "Receive PF driver unload event!");
5838 }
5839
5840 #else
5841
5842 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5843 {
5844 }
5845
5846 #endif /* CONFIG_RFS_ACCEL */
5847
5848 static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5849                                 __be16 port)
5850 {
5851         struct bnxt *bp = netdev_priv(dev);
5852
5853         if (!netif_running(dev))
5854                 return;
5855
5856         if (sa_family != AF_INET6 && sa_family != AF_INET)
5857                 return;
5858
5859         if (bp->vxlan_port_cnt && bp->vxlan_port != port)
5860                 return;
5861
5862         bp->vxlan_port_cnt++;
5863         if (bp->vxlan_port_cnt == 1) {
5864                 bp->vxlan_port = port;
5865                 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
5866                 schedule_work(&bp->sp_task);
5867         }
5868 }
5869
5870 static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5871                                 __be16 port)
5872 {
5873         struct bnxt *bp = netdev_priv(dev);
5874
5875         if (!netif_running(dev))
5876                 return;
5877
5878         if (sa_family != AF_INET6 && sa_family != AF_INET)
5879                 return;
5880
5881         if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
5882                 bp->vxlan_port_cnt--;
5883
5884                 if (bp->vxlan_port_cnt == 0) {
5885                         set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
5886                         schedule_work(&bp->sp_task);
5887                 }
5888         }
5889 }
5890
5891 static const struct net_device_ops bnxt_netdev_ops = {
5892         .ndo_open               = bnxt_open,
5893         .ndo_start_xmit         = bnxt_start_xmit,
5894         .ndo_stop               = bnxt_close,
5895         .ndo_get_stats64        = bnxt_get_stats64,
5896         .ndo_set_rx_mode        = bnxt_set_rx_mode,
5897         .ndo_do_ioctl           = bnxt_ioctl,
5898         .ndo_validate_addr      = eth_validate_addr,
5899         .ndo_set_mac_address    = bnxt_change_mac_addr,
5900         .ndo_change_mtu         = bnxt_change_mtu,
5901         .ndo_fix_features       = bnxt_fix_features,
5902         .ndo_set_features       = bnxt_set_features,
5903         .ndo_tx_timeout         = bnxt_tx_timeout,
5904 #ifdef CONFIG_BNXT_SRIOV
5905         .ndo_get_vf_config      = bnxt_get_vf_config,
5906         .ndo_set_vf_mac         = bnxt_set_vf_mac,
5907         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
5908         .ndo_set_vf_rate        = bnxt_set_vf_bw,
5909         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
5910         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
5911 #endif
5912 #ifdef CONFIG_NET_POLL_CONTROLLER
5913         .ndo_poll_controller    = bnxt_poll_controller,
5914 #endif
5915         .ndo_setup_tc           = bnxt_setup_tc,
5916 #ifdef CONFIG_RFS_ACCEL
5917         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
5918 #endif
5919         .ndo_add_vxlan_port     = bnxt_add_vxlan_port,
5920         .ndo_del_vxlan_port     = bnxt_del_vxlan_port,
5921 #ifdef CONFIG_NET_RX_BUSY_POLL
5922         .ndo_busy_poll          = bnxt_busy_poll,
5923 #endif
5924 };
5925
5926 static void bnxt_remove_one(struct pci_dev *pdev)
5927 {
5928         struct net_device *dev = pci_get_drvdata(pdev);
5929         struct bnxt *bp = netdev_priv(dev);
5930
5931         if (BNXT_PF(bp))
5932                 bnxt_sriov_disable(bp);
5933
5934         pci_disable_pcie_error_reporting(pdev);
5935         unregister_netdev(dev);
5936         cancel_work_sync(&bp->sp_task);
5937         bp->sp_event = 0;
5938
5939         bnxt_hwrm_func_drv_unrgtr(bp);
5940         bnxt_free_hwrm_resources(bp);
5941         pci_iounmap(pdev, bp->bar2);
5942         pci_iounmap(pdev, bp->bar1);
5943         pci_iounmap(pdev, bp->bar0);
5944         free_netdev(dev);
5945
5946         pci_release_regions(pdev);
5947         pci_disable_device(pdev);
5948 }
5949
5950 static int bnxt_probe_phy(struct bnxt *bp)
5951 {
5952         int rc = 0;
5953         struct bnxt_link_info *link_info = &bp->link_info;
5954
5955         rc = bnxt_hwrm_phy_qcaps(bp);
5956         if (rc) {
5957                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
5958                            rc);
5959                 return rc;
5960         }
5961
5962         rc = bnxt_update_link(bp, false);
5963         if (rc) {
5964                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
5965                            rc);
5966                 return rc;
5967         }
5968
5969         /*initialize the ethool setting copy with NVM settings */
5970         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5971                 link_info->autoneg = BNXT_AUTONEG_SPEED;
5972                 if (bp->hwrm_spec_code >= 0x10201) {
5973                         if (link_info->auto_pause_setting &
5974                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
5975                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5976                 } else {
5977                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5978                 }
5979                 link_info->advertising = link_info->auto_link_speeds;
5980         } else {
5981                 link_info->req_link_speed = link_info->force_link_speed;
5982                 link_info->req_duplex = link_info->duplex_setting;
5983         }
5984         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
5985                 link_info->req_flow_ctrl =
5986                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
5987         else
5988                 link_info->req_flow_ctrl = link_info->force_pause_setting;
5989         return rc;
5990 }
5991
5992 static int bnxt_get_max_irq(struct pci_dev *pdev)
5993 {
5994         u16 ctrl;
5995
5996         if (!pdev->msix_cap)
5997                 return 1;
5998
5999         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
6000         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
6001 }
6002
6003 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6004                                 int *max_cp)
6005 {
6006         int max_ring_grps = 0;
6007
6008 #ifdef CONFIG_BNXT_SRIOV
6009         if (!BNXT_PF(bp)) {
6010                 *max_tx = bp->vf.max_tx_rings;
6011                 *max_rx = bp->vf.max_rx_rings;
6012                 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
6013                 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
6014                 max_ring_grps = bp->vf.max_hw_ring_grps;
6015         } else
6016 #endif
6017         {
6018                 *max_tx = bp->pf.max_tx_rings;
6019                 *max_rx = bp->pf.max_rx_rings;
6020                 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
6021                 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
6022                 max_ring_grps = bp->pf.max_hw_ring_grps;
6023         }
6024
6025         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6026                 *max_rx >>= 1;
6027         *max_rx = min_t(int, *max_rx, max_ring_grps);
6028 }
6029
6030 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
6031 {
6032         int rx, tx, cp;
6033
6034         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
6035         if (!rx || !tx || !cp)
6036                 return -ENOMEM;
6037
6038         *max_rx = rx;
6039         *max_tx = tx;
6040         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
6041 }
6042
6043 static int bnxt_set_dflt_rings(struct bnxt *bp)
6044 {
6045         int dflt_rings, max_rx_rings, max_tx_rings, rc;
6046         bool sh = true;
6047
6048         if (sh)
6049                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
6050         dflt_rings = netif_get_num_default_rss_queues();
6051         rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6052         if (rc)
6053                 return rc;
6054         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
6055         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
6056         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6057         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6058                                bp->tx_nr_rings + bp->rx_nr_rings;
6059         bp->num_stat_ctxs = bp->cp_nr_rings;
6060         return rc;
6061 }
6062
6063 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6064 {
6065         static int version_printed;
6066         struct net_device *dev;
6067         struct bnxt *bp;
6068         int rc, max_irqs;
6069
6070         if (version_printed++ == 0)
6071                 pr_info("%s", version);
6072
6073         max_irqs = bnxt_get_max_irq(pdev);
6074         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
6075         if (!dev)
6076                 return -ENOMEM;
6077
6078         bp = netdev_priv(dev);
6079
6080         if (bnxt_vf_pciid(ent->driver_data))
6081                 bp->flags |= BNXT_FLAG_VF;
6082
6083         if (pdev->msix_cap)
6084                 bp->flags |= BNXT_FLAG_MSIX_CAP;
6085
6086         rc = bnxt_init_board(pdev, dev);
6087         if (rc < 0)
6088                 goto init_err_free;
6089
6090         dev->netdev_ops = &bnxt_netdev_ops;
6091         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
6092         dev->ethtool_ops = &bnxt_ethtool_ops;
6093
6094         pci_set_drvdata(pdev, dev);
6095
6096         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6097                            NETIF_F_TSO | NETIF_F_TSO6 |
6098                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
6099                            NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
6100                            NETIF_F_RXHASH |
6101                            NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
6102
6103         dev->hw_enc_features =
6104                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6105                         NETIF_F_TSO | NETIF_F_TSO6 |
6106                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
6107                         NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
6108         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
6109         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6110                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
6111         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
6112         dev->priv_flags |= IFF_UNICAST_FLT;
6113
6114 #ifdef CONFIG_BNXT_SRIOV
6115         init_waitqueue_head(&bp->sriov_cfg_wait);
6116 #endif
6117         rc = bnxt_alloc_hwrm_resources(bp);
6118         if (rc)
6119                 goto init_err;
6120
6121         mutex_init(&bp->hwrm_cmd_lock);
6122         bnxt_hwrm_ver_get(bp);
6123
6124         rc = bnxt_hwrm_func_drv_rgtr(bp);
6125         if (rc)
6126                 goto init_err;
6127
6128         /* Get the MAX capabilities for this function */
6129         rc = bnxt_hwrm_func_qcaps(bp);
6130         if (rc) {
6131                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
6132                            rc);
6133                 rc = -1;
6134                 goto init_err;
6135         }
6136
6137         rc = bnxt_hwrm_queue_qportcfg(bp);
6138         if (rc) {
6139                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
6140                            rc);
6141                 rc = -1;
6142                 goto init_err;
6143         }
6144
6145         bnxt_set_tpa_flags(bp);
6146         bnxt_set_ring_params(bp);
6147         if (BNXT_PF(bp))
6148                 bp->pf.max_irqs = max_irqs;
6149 #if defined(CONFIG_BNXT_SRIOV)
6150         else
6151                 bp->vf.max_irqs = max_irqs;
6152 #endif
6153         bnxt_set_dflt_rings(bp);
6154
6155         if (BNXT_PF(bp)) {
6156                 dev->hw_features |= NETIF_F_NTUPLE;
6157                 if (bnxt_rfs_capable(bp)) {
6158                         bp->flags |= BNXT_FLAG_RFS;
6159                         dev->features |= NETIF_F_NTUPLE;
6160                 }
6161         }
6162
6163         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
6164                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
6165
6166         rc = bnxt_probe_phy(bp);
6167         if (rc)
6168                 goto init_err;
6169
6170         rc = register_netdev(dev);
6171         if (rc)
6172                 goto init_err;
6173
6174         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
6175                     board_info[ent->driver_data].name,
6176                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
6177
6178         return 0;
6179
6180 init_err:
6181         pci_iounmap(pdev, bp->bar0);
6182         pci_release_regions(pdev);
6183         pci_disable_device(pdev);
6184
6185 init_err_free:
6186         free_netdev(dev);
6187         return rc;
6188 }
6189
6190 /**
6191  * bnxt_io_error_detected - called when PCI error is detected
6192  * @pdev: Pointer to PCI device
6193  * @state: The current pci connection state
6194  *
6195  * This function is called after a PCI bus error affecting
6196  * this device has been detected.
6197  */
6198 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
6199                                                pci_channel_state_t state)
6200 {
6201         struct net_device *netdev = pci_get_drvdata(pdev);
6202
6203         netdev_info(netdev, "PCI I/O error detected\n");
6204
6205         rtnl_lock();
6206         netif_device_detach(netdev);
6207
6208         if (state == pci_channel_io_perm_failure) {
6209                 rtnl_unlock();
6210                 return PCI_ERS_RESULT_DISCONNECT;
6211         }
6212
6213         if (netif_running(netdev))
6214                 bnxt_close(netdev);
6215
6216         pci_disable_device(pdev);
6217         rtnl_unlock();
6218
6219         /* Request a slot slot reset. */
6220         return PCI_ERS_RESULT_NEED_RESET;
6221 }
6222
6223 /**
6224  * bnxt_io_slot_reset - called after the pci bus has been reset.
6225  * @pdev: Pointer to PCI device
6226  *
6227  * Restart the card from scratch, as if from a cold-boot.
6228  * At this point, the card has exprienced a hard reset,
6229  * followed by fixups by BIOS, and has its config space
6230  * set up identically to what it was at cold boot.
6231  */
6232 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
6233 {
6234         struct net_device *netdev = pci_get_drvdata(pdev);
6235         struct bnxt *bp = netdev_priv(netdev);
6236         int err = 0;
6237         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6238
6239         netdev_info(bp->dev, "PCI Slot Reset\n");
6240
6241         rtnl_lock();
6242
6243         if (pci_enable_device(pdev)) {
6244                 dev_err(&pdev->dev,
6245                         "Cannot re-enable PCI device after reset.\n");
6246         } else {
6247                 pci_set_master(pdev);
6248
6249                 if (netif_running(netdev))
6250                         err = bnxt_open(netdev);
6251
6252                 if (!err)
6253                         result = PCI_ERS_RESULT_RECOVERED;
6254         }
6255
6256         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
6257                 dev_close(netdev);
6258
6259         rtnl_unlock();
6260
6261         err = pci_cleanup_aer_uncorrect_error_status(pdev);
6262         if (err) {
6263                 dev_err(&pdev->dev,
6264                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
6265                          err); /* non-fatal, continue */
6266         }
6267
6268         return PCI_ERS_RESULT_RECOVERED;
6269 }
6270
6271 /**
6272  * bnxt_io_resume - called when traffic can start flowing again.
6273  * @pdev: Pointer to PCI device
6274  *
6275  * This callback is called when the error recovery driver tells
6276  * us that its OK to resume normal operation.
6277  */
6278 static void bnxt_io_resume(struct pci_dev *pdev)
6279 {
6280         struct net_device *netdev = pci_get_drvdata(pdev);
6281
6282         rtnl_lock();
6283
6284         netif_device_attach(netdev);
6285
6286         rtnl_unlock();
6287 }
6288
6289 static const struct pci_error_handlers bnxt_err_handler = {
6290         .error_detected = bnxt_io_error_detected,
6291         .slot_reset     = bnxt_io_slot_reset,
6292         .resume         = bnxt_io_resume
6293 };
6294
6295 static struct pci_driver bnxt_pci_driver = {
6296         .name           = DRV_MODULE_NAME,
6297         .id_table       = bnxt_pci_tbl,
6298         .probe          = bnxt_init_one,
6299         .remove         = bnxt_remove_one,
6300         .err_handler    = &bnxt_err_handler,
6301 #if defined(CONFIG_BNXT_SRIOV)
6302         .sriov_configure = bnxt_sriov_configure,
6303 #endif
6304 };
6305
6306 module_pci_driver(bnxt_pci_driver);