be2net: add dma_mapping_error() check for dma_map_page()
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124 /* Is BE in a multi-channel mode */
125 static inline bool be_is_mc(struct be_adapter *adapter) {
126         return (adapter->function_mode & FLEX10_MODE ||
127                 adapter->function_mode & VNIC_MODE ||
128                 adapter->function_mode & UMC_ENABLED);
129 }
130
131 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
132 {
133         struct be_dma_mem *mem = &q->dma_mem;
134         if (mem->va) {
135                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136                                   mem->dma);
137                 mem->va = NULL;
138         }
139 }
140
141 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
142                 u16 len, u16 entry_size)
143 {
144         struct be_dma_mem *mem = &q->dma_mem;
145
146         memset(q, 0, sizeof(*q));
147         q->len = len;
148         q->entry_size = entry_size;
149         mem->size = len * entry_size;
150         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
151                                       GFP_KERNEL);
152         if (!mem->va)
153                 return -ENOMEM;
154         return 0;
155 }
156
157 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
158 {
159         u32 reg, enabled;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_intr_set(struct be_adapter *adapter, bool enable)
177 {
178         int status = 0;
179
180         /* On lancer interrupts can't be controlled via this register */
181         if (lancer_chip(adapter))
182                 return;
183
184         if (adapter->eeh_error)
185                 return;
186
187         status = be_cmd_intr_set(adapter, enable);
188         if (status)
189                 be_reg_intr_set(adapter, enable);
190 }
191
192 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
193 {
194         u32 val = 0;
195         val |= qid & DB_RQ_RING_ID_MASK;
196         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
197
198         wmb();
199         iowrite32(val, adapter->db + DB_RQ_OFFSET);
200 }
201
202 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203                           u16 posted)
204 {
205         u32 val = 0;
206         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
207         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
208
209         wmb();
210         iowrite32(val, adapter->db + txo->db_offset);
211 }
212
213 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
214                 bool arm, bool clear_int, u16 num_popped)
215 {
216         u32 val = 0;
217         val |= qid & DB_EQ_RING_ID_MASK;
218         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
219                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
220
221         if (adapter->eeh_error)
222                 return;
223
224         if (arm)
225                 val |= 1 << DB_EQ_REARM_SHIFT;
226         if (clear_int)
227                 val |= 1 << DB_EQ_CLR_SHIFT;
228         val |= 1 << DB_EQ_EVNT_SHIFT;
229         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
230         iowrite32(val, adapter->db + DB_EQ_OFFSET);
231 }
232
233 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
234 {
235         u32 val = 0;
236         val |= qid & DB_CQ_RING_ID_MASK;
237         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
238                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
239
240         if (adapter->eeh_error)
241                 return;
242
243         if (arm)
244                 val |= 1 << DB_CQ_REARM_SHIFT;
245         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
246         iowrite32(val, adapter->db + DB_CQ_OFFSET);
247 }
248
249 static int be_mac_addr_set(struct net_device *netdev, void *p)
250 {
251         struct be_adapter *adapter = netdev_priv(netdev);
252         struct device *dev = &adapter->pdev->dev;
253         struct sockaddr *addr = p;
254         int status;
255         u8 mac[ETH_ALEN];
256         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
257
258         if (!is_valid_ether_addr(addr->sa_data))
259                 return -EADDRNOTAVAIL;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
284         if (status)
285                 goto err;
286
287         /* The MAC change did not happen, either due to lack of privilege
288          * or PF didn't pre-provision.
289          */
290         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
291                 status = -EPERM;
292                 goto err;
293         }
294
295         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
296         dev_info(dev, "MAC address changed to %pM\n", mac);
297         return 0;
298 err:
299         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
300         return status;
301 }
302
303 /* BE2 supports only v0 cmd */
304 static void *hw_stats_from_cmd(struct be_adapter *adapter)
305 {
306         if (BE2_chip(adapter)) {
307                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
308
309                 return &cmd->hw_stats;
310         } else if (BE3_chip(adapter)) {
311                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
312
313                 return &cmd->hw_stats;
314         } else {
315                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
316
317                 return &cmd->hw_stats;
318         }
319 }
320
321 /* BE2 supports only v0 cmd */
322 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
323 {
324         if (BE2_chip(adapter)) {
325                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
326
327                 return &hw_stats->erx;
328         } else if (BE3_chip(adapter)) {
329                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
330
331                 return &hw_stats->erx;
332         } else {
333                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
334
335                 return &hw_stats->erx;
336         }
337 }
338
339 static void populate_be_v0_stats(struct be_adapter *adapter)
340 {
341         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
343         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
344         struct be_port_rxf_stats_v0 *port_stats =
345                                         &rxf_stats->port[adapter->port_num];
346         struct be_drv_stats *drvs = &adapter->drv_stats;
347
348         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
349         drvs->rx_pause_frames = port_stats->rx_pause_frames;
350         drvs->rx_crc_errors = port_stats->rx_crc_errors;
351         drvs->rx_control_frames = port_stats->rx_control_frames;
352         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
359         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
360         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
361         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
362         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
363         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
364         drvs->rx_dropped_header_too_small =
365                 port_stats->rx_dropped_header_too_small;
366         drvs->rx_address_filtered =
367                                         port_stats->rx_address_filtered +
368                                         port_stats->rx_vlan_filtered;
369         drvs->rx_alignment_symbol_errors =
370                 port_stats->rx_alignment_symbol_errors;
371
372         drvs->tx_pauseframes = port_stats->tx_pauseframes;
373         drvs->tx_controlframes = port_stats->tx_controlframes;
374
375         if (adapter->port_num)
376                 drvs->jabber_events = rxf_stats->port1_jabber_events;
377         else
378                 drvs->jabber_events = rxf_stats->port0_jabber_events;
379         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
380         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
381         drvs->forwarded_packets = rxf_stats->forwarded_packets;
382         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
383         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
384         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
385         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
386 }
387
388 static void populate_be_v1_stats(struct be_adapter *adapter)
389 {
390         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
391         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
392         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
393         struct be_port_rxf_stats_v1 *port_stats =
394                                         &rxf_stats->port[adapter->port_num];
395         struct be_drv_stats *drvs = &adapter->drv_stats;
396
397         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
398         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
399         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
400         drvs->rx_pause_frames = port_stats->rx_pause_frames;
401         drvs->rx_crc_errors = port_stats->rx_crc_errors;
402         drvs->rx_control_frames = port_stats->rx_control_frames;
403         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
404         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
405         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
406         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
407         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
408         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
409         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
410         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
411         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
412         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
413         drvs->rx_dropped_header_too_small =
414                 port_stats->rx_dropped_header_too_small;
415         drvs->rx_input_fifo_overflow_drop =
416                 port_stats->rx_input_fifo_overflow_drop;
417         drvs->rx_address_filtered = port_stats->rx_address_filtered;
418         drvs->rx_alignment_symbol_errors =
419                 port_stats->rx_alignment_symbol_errors;
420         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
421         drvs->tx_pauseframes = port_stats->tx_pauseframes;
422         drvs->tx_controlframes = port_stats->tx_controlframes;
423         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
424         drvs->jabber_events = port_stats->jabber_events;
425         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
426         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
427         drvs->forwarded_packets = rxf_stats->forwarded_packets;
428         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
429         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
430         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
431         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
432 }
433
434 static void populate_be_v2_stats(struct be_adapter *adapter)
435 {
436         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
437         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
438         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
439         struct be_port_rxf_stats_v2 *port_stats =
440                                         &rxf_stats->port[adapter->port_num];
441         struct be_drv_stats *drvs = &adapter->drv_stats;
442
443         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
444         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
445         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
446         drvs->rx_pause_frames = port_stats->rx_pause_frames;
447         drvs->rx_crc_errors = port_stats->rx_crc_errors;
448         drvs->rx_control_frames = port_stats->rx_control_frames;
449         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
450         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
451         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
452         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
453         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
454         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
455         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
456         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
457         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
458         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
459         drvs->rx_dropped_header_too_small =
460                 port_stats->rx_dropped_header_too_small;
461         drvs->rx_input_fifo_overflow_drop =
462                 port_stats->rx_input_fifo_overflow_drop;
463         drvs->rx_address_filtered = port_stats->rx_address_filtered;
464         drvs->rx_alignment_symbol_errors =
465                 port_stats->rx_alignment_symbol_errors;
466         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
467         drvs->tx_pauseframes = port_stats->tx_pauseframes;
468         drvs->tx_controlframes = port_stats->tx_controlframes;
469         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
470         drvs->jabber_events = port_stats->jabber_events;
471         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
472         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
473         drvs->forwarded_packets = rxf_stats->forwarded_packets;
474         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
475         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
476         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
477         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
478         if (be_roce_supported(adapter))  {
479                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
480                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
481                 drvs->rx_roce_frames = port_stats->roce_frames_received;
482                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
483                 drvs->roce_drops_payload_len =
484                         port_stats->roce_drops_payload_len;
485         }
486 }
487
488 static void populate_lancer_stats(struct be_adapter *adapter)
489 {
490
491         struct be_drv_stats *drvs = &adapter->drv_stats;
492         struct lancer_pport_stats *pport_stats =
493                                         pport_stats_from_cmd(adapter);
494
495         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
499         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
500         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
501         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505         drvs->rx_dropped_tcp_length =
506                                 pport_stats->rx_dropped_invalid_tcp_length;
507         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510         drvs->rx_dropped_header_too_small =
511                                 pport_stats->rx_dropped_header_too_small;
512         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
513         drvs->rx_address_filtered =
514                                         pport_stats->rx_address_filtered +
515                                         pport_stats->rx_vlan_filtered;
516         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
517         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
518         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
520         drvs->jabber_events = pport_stats->rx_jabbers;
521         drvs->forwarded_packets = pport_stats->num_forwards_lo;
522         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
523         drvs->rx_drops_too_many_frags =
524                                 pport_stats->rx_drops_too_many_frags_lo;
525 }
526
527 static void accumulate_16bit_val(u32 *acc, u16 val)
528 {
529 #define lo(x)                   (x & 0xFFFF)
530 #define hi(x)                   (x & 0xFFFF0000)
531         bool wrapped = val < lo(*acc);
532         u32 newacc = hi(*acc) + val;
533
534         if (wrapped)
535                 newacc += 65536;
536         ACCESS_ONCE(*acc) = newacc;
537 }
538
539 static void populate_erx_stats(struct be_adapter *adapter,
540                         struct be_rx_obj *rxo,
541                         u32 erx_stat)
542 {
543         if (!BEx_chip(adapter))
544                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
545         else
546                 /* below erx HW counter can actually wrap around after
547                  * 65535. Driver accumulates a 32-bit value
548                  */
549                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
550                                      (u16)erx_stat);
551 }
552
553 void be_parse_stats(struct be_adapter *adapter)
554 {
555         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
556         struct be_rx_obj *rxo;
557         int i;
558         u32 erx_stat;
559
560         if (lancer_chip(adapter)) {
561                 populate_lancer_stats(adapter);
562         } else {
563                 if (BE2_chip(adapter))
564                         populate_be_v0_stats(adapter);
565                 else if (BE3_chip(adapter))
566                         /* for BE3 */
567                         populate_be_v1_stats(adapter);
568                 else
569                         populate_be_v2_stats(adapter);
570
571                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
572                 for_all_rx_queues(adapter, rxo, i) {
573                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
574                         populate_erx_stats(adapter, rxo, erx_stat);
575                 }
576         }
577 }
578
579 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
580                                         struct rtnl_link_stats64 *stats)
581 {
582         struct be_adapter *adapter = netdev_priv(netdev);
583         struct be_drv_stats *drvs = &adapter->drv_stats;
584         struct be_rx_obj *rxo;
585         struct be_tx_obj *txo;
586         u64 pkts, bytes;
587         unsigned int start;
588         int i;
589
590         for_all_rx_queues(adapter, rxo, i) {
591                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
592                 do {
593                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
594                         pkts = rx_stats(rxo)->rx_pkts;
595                         bytes = rx_stats(rxo)->rx_bytes;
596                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
597                 stats->rx_packets += pkts;
598                 stats->rx_bytes += bytes;
599                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
600                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
601                                         rx_stats(rxo)->rx_drops_no_frags;
602         }
603
604         for_all_tx_queues(adapter, txo, i) {
605                 const struct be_tx_stats *tx_stats = tx_stats(txo);
606                 do {
607                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
608                         pkts = tx_stats(txo)->tx_pkts;
609                         bytes = tx_stats(txo)->tx_bytes;
610                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
611                 stats->tx_packets += pkts;
612                 stats->tx_bytes += bytes;
613         }
614
615         /* bad pkts received */
616         stats->rx_errors = drvs->rx_crc_errors +
617                 drvs->rx_alignment_symbol_errors +
618                 drvs->rx_in_range_errors +
619                 drvs->rx_out_range_errors +
620                 drvs->rx_frame_too_long +
621                 drvs->rx_dropped_too_small +
622                 drvs->rx_dropped_too_short +
623                 drvs->rx_dropped_header_too_small +
624                 drvs->rx_dropped_tcp_length +
625                 drvs->rx_dropped_runt;
626
627         /* detailed rx errors */
628         stats->rx_length_errors = drvs->rx_in_range_errors +
629                 drvs->rx_out_range_errors +
630                 drvs->rx_frame_too_long;
631
632         stats->rx_crc_errors = drvs->rx_crc_errors;
633
634         /* frame alignment errors */
635         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
636
637         /* receiver fifo overrun */
638         /* drops_no_pbuf is no per i/f, it's per BE card */
639         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
640                                 drvs->rx_input_fifo_overflow_drop +
641                                 drvs->rx_drops_no_pbuf;
642         return stats;
643 }
644
645 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
646 {
647         struct net_device *netdev = adapter->netdev;
648
649         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
650                 netif_carrier_off(netdev);
651                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
652         }
653
654         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
655                 netif_carrier_on(netdev);
656         else
657                 netif_carrier_off(netdev);
658 }
659
660 static void be_tx_stats_update(struct be_tx_obj *txo,
661                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
662 {
663         struct be_tx_stats *stats = tx_stats(txo);
664
665         u64_stats_update_begin(&stats->sync);
666         stats->tx_reqs++;
667         stats->tx_wrbs += wrb_cnt;
668         stats->tx_bytes += copied;
669         stats->tx_pkts += (gso_segs ? gso_segs : 1);
670         if (stopped)
671                 stats->tx_stops++;
672         u64_stats_update_end(&stats->sync);
673 }
674
675 /* Determine number of WRB entries needed to xmit data in an skb */
676 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677                                                                 bool *dummy)
678 {
679         int cnt = (skb->len > skb->data_len);
680
681         cnt += skb_shinfo(skb)->nr_frags;
682
683         /* to account for hdr wrb */
684         cnt++;
685         if (lancer_chip(adapter) || !(cnt & 1)) {
686                 *dummy = false;
687         } else {
688                 /* add a dummy to make it an even num */
689                 cnt++;
690                 *dummy = true;
691         }
692         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693         return cnt;
694 }
695
696 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697 {
698         wrb->frag_pa_hi = upper_32_bits(addr);
699         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
701         wrb->rsvd0 = 0;
702 }
703
704 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705                                         struct sk_buff *skb)
706 {
707         u8 vlan_prio;
708         u16 vlan_tag;
709
710         vlan_tag = vlan_tx_tag_get(skb);
711         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712         /* If vlan priority provided by OS is NOT in available bmap */
713         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715                                 adapter->recommended_prio;
716
717         return vlan_tag;
718 }
719
720 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
721                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
722 {
723         u16 vlan_tag;
724
725         memset(hdr, 0, sizeof(*hdr));
726
727         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
728
729         if (skb_is_gso(skb)) {
730                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
732                         hdr, skb_shinfo(skb)->gso_size);
733                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
734                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
735         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
736                 if (is_tcp_pkt(skb))
737                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
738                 else if (is_udp_pkt(skb))
739                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
740         }
741
742         if (vlan_tx_tag_present(skb)) {
743                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
744                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
745                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
746         }
747
748         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
749         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
753 }
754
755 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
756                 bool unmap_single)
757 {
758         dma_addr_t dma;
759
760         be_dws_le_to_cpu(wrb, sizeof(*wrb));
761
762         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
763         if (wrb->frag_len) {
764                 if (unmap_single)
765                         dma_unmap_single(dev, dma, wrb->frag_len,
766                                          DMA_TO_DEVICE);
767                 else
768                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
769         }
770 }
771
772 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
773                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
774                 bool skip_hw_vlan)
775 {
776         dma_addr_t busaddr;
777         int i, copied = 0;
778         struct device *dev = &adapter->pdev->dev;
779         struct sk_buff *first_skb = skb;
780         struct be_eth_wrb *wrb;
781         struct be_eth_hdr_wrb *hdr;
782         bool map_single = false;
783         u16 map_head;
784
785         hdr = queue_head_node(txq);
786         queue_head_inc(txq);
787         map_head = txq->head;
788
789         if (skb->len > skb->data_len) {
790                 int len = skb_headlen(skb);
791                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
792                 if (dma_mapping_error(dev, busaddr))
793                         goto dma_err;
794                 map_single = true;
795                 wrb = queue_head_node(txq);
796                 wrb_fill(wrb, busaddr, len);
797                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
798                 queue_head_inc(txq);
799                 copied += len;
800         }
801
802         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
803                 const struct skb_frag_struct *frag =
804                         &skb_shinfo(skb)->frags[i];
805                 busaddr = skb_frag_dma_map(dev, frag, 0,
806                                            skb_frag_size(frag), DMA_TO_DEVICE);
807                 if (dma_mapping_error(dev, busaddr))
808                         goto dma_err;
809                 wrb = queue_head_node(txq);
810                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
811                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
812                 queue_head_inc(txq);
813                 copied += skb_frag_size(frag);
814         }
815
816         if (dummy_wrb) {
817                 wrb = queue_head_node(txq);
818                 wrb_fill(wrb, 0, 0);
819                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
820                 queue_head_inc(txq);
821         }
822
823         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
824         be_dws_cpu_to_le(hdr, sizeof(*hdr));
825
826         return copied;
827 dma_err:
828         txq->head = map_head;
829         while (copied) {
830                 wrb = queue_head_node(txq);
831                 unmap_tx_frag(dev, wrb, map_single);
832                 map_single = false;
833                 copied -= wrb->frag_len;
834                 queue_head_inc(txq);
835         }
836         return 0;
837 }
838
839 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
840                                              struct sk_buff *skb,
841                                              bool *skip_hw_vlan)
842 {
843         u16 vlan_tag = 0;
844
845         skb = skb_share_check(skb, GFP_ATOMIC);
846         if (unlikely(!skb))
847                 return skb;
848
849         if (vlan_tx_tag_present(skb))
850                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
851
852         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
853                 if (!vlan_tag)
854                         vlan_tag = adapter->pvid;
855                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
856                  * skip VLAN insertion
857                  */
858                 if (skip_hw_vlan)
859                         *skip_hw_vlan = true;
860         }
861
862         if (vlan_tag) {
863                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
864                 if (unlikely(!skb))
865                         return skb;
866                 skb->vlan_tci = 0;
867         }
868
869         /* Insert the outer VLAN, if any */
870         if (adapter->qnq_vid) {
871                 vlan_tag = adapter->qnq_vid;
872                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
873                 if (unlikely(!skb))
874                         return skb;
875                 if (skip_hw_vlan)
876                         *skip_hw_vlan = true;
877         }
878
879         return skb;
880 }
881
882 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
883 {
884         struct ethhdr *eh = (struct ethhdr *)skb->data;
885         u16 offset = ETH_HLEN;
886
887         if (eh->h_proto == htons(ETH_P_IPV6)) {
888                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
889
890                 offset += sizeof(struct ipv6hdr);
891                 if (ip6h->nexthdr != NEXTHDR_TCP &&
892                     ip6h->nexthdr != NEXTHDR_UDP) {
893                         struct ipv6_opt_hdr *ehdr =
894                                 (struct ipv6_opt_hdr *) (skb->data + offset);
895
896                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
897                         if (ehdr->hdrlen == 0xff)
898                                 return true;
899                 }
900         }
901         return false;
902 }
903
904 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
905 {
906         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
907 }
908
909 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
910                                 struct sk_buff *skb)
911 {
912         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
913 }
914
915 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
916                                            struct sk_buff *skb,
917                                            bool *skip_hw_vlan)
918 {
919         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
920         unsigned int eth_hdr_len;
921         struct iphdr *ip;
922
923         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
924          * may cause a transmit stall on that port. So the work-around is to
925          * pad short packets (<= 32 bytes) to a 36-byte length.
926          */
927         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
928                 if (skb_padto(skb, 36))
929                         goto tx_drop;
930                 skb->len = 36;
931         }
932
933         /* For padded packets, BE HW modifies tot_len field in IP header
934          * incorrecly when VLAN tag is inserted by HW.
935          * For padded packets, Lancer computes incorrect checksum.
936          */
937         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
938                                                 VLAN_ETH_HLEN : ETH_HLEN;
939         if (skb->len <= 60 &&
940             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
941             is_ipv4_pkt(skb)) {
942                 ip = (struct iphdr *)ip_hdr(skb);
943                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
944         }
945
946         /* If vlan tag is already inlined in the packet, skip HW VLAN
947          * tagging in UMC mode
948          */
949         if ((adapter->function_mode & UMC_ENABLED) &&
950             veh->h_vlan_proto == htons(ETH_P_8021Q))
951                         *skip_hw_vlan = true;
952
953         /* HW has a bug wherein it will calculate CSUM for VLAN
954          * pkts even though it is disabled.
955          * Manually insert VLAN in pkt.
956          */
957         if (skb->ip_summed != CHECKSUM_PARTIAL &&
958             vlan_tx_tag_present(skb)) {
959                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
960                 if (unlikely(!skb))
961                         goto tx_drop;
962         }
963
964         /* HW may lockup when VLAN HW tagging is requested on
965          * certain ipv6 packets. Drop such pkts if the HW workaround to
966          * skip HW tagging is not enabled by FW.
967          */
968         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
969             (adapter->pvid || adapter->qnq_vid) &&
970             !qnq_async_evt_rcvd(adapter)))
971                 goto tx_drop;
972
973         /* Manual VLAN tag insertion to prevent:
974          * ASIC lockup when the ASIC inserts VLAN tag into
975          * certain ipv6 packets. Insert VLAN tags in driver,
976          * and set event, completion, vlan bits accordingly
977          * in the Tx WRB.
978          */
979         if (be_ipv6_tx_stall_chk(adapter, skb) &&
980             be_vlan_tag_tx_chk(adapter, skb)) {
981                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
982                 if (unlikely(!skb))
983                         goto tx_drop;
984         }
985
986         return skb;
987 tx_drop:
988         dev_kfree_skb_any(skb);
989         return NULL;
990 }
991
992 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
996         struct be_queue_info *txq = &txo->q;
997         bool dummy_wrb, stopped = false;
998         u32 wrb_cnt = 0, copied = 0;
999         bool skip_hw_vlan = false;
1000         u32 start = txq->head;
1001
1002         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1003         if (!skb) {
1004                 tx_stats(txo)->tx_drv_drops++;
1005                 return NETDEV_TX_OK;
1006         }
1007
1008         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1009
1010         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1011                               skip_hw_vlan);
1012         if (copied) {
1013                 int gso_segs = skb_shinfo(skb)->gso_segs;
1014
1015                 /* record the sent skb in the sent_skb table */
1016                 BUG_ON(txo->sent_skb_list[start]);
1017                 txo->sent_skb_list[start] = skb;
1018
1019                 /* Ensure txq has space for the next skb; Else stop the queue
1020                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1021                  * tx compls of the current transmit which'll wake up the queue
1022                  */
1023                 atomic_add(wrb_cnt, &txq->used);
1024                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1025                                                                 txq->len) {
1026                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1027                         stopped = true;
1028                 }
1029
1030                 be_txq_notify(adapter, txo, wrb_cnt);
1031
1032                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1033         } else {
1034                 txq->head = start;
1035                 tx_stats(txo)->tx_drv_drops++;
1036                 dev_kfree_skb_any(skb);
1037         }
1038         return NETDEV_TX_OK;
1039 }
1040
1041 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1042 {
1043         struct be_adapter *adapter = netdev_priv(netdev);
1044         if (new_mtu < BE_MIN_MTU ||
1045                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1046                                         (ETH_HLEN + ETH_FCS_LEN))) {
1047                 dev_info(&adapter->pdev->dev,
1048                         "MTU must be between %d and %d bytes\n",
1049                         BE_MIN_MTU,
1050                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1051                 return -EINVAL;
1052         }
1053         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1054                         netdev->mtu, new_mtu);
1055         netdev->mtu = new_mtu;
1056         return 0;
1057 }
1058
1059 /*
1060  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1061  * If the user configures more, place BE in vlan promiscuous mode.
1062  */
1063 static int be_vid_config(struct be_adapter *adapter)
1064 {
1065         u16 vids[BE_NUM_VLANS_SUPPORTED];
1066         u16 num = 0, i;
1067         int status = 0;
1068
1069         /* No need to further configure vids if in promiscuous mode */
1070         if (adapter->promiscuous)
1071                 return 0;
1072
1073         if (adapter->vlans_added > be_max_vlans(adapter))
1074                 goto set_vlan_promisc;
1075
1076         /* Construct VLAN Table to give to HW */
1077         for (i = 0; i < VLAN_N_VID; i++)
1078                 if (adapter->vlan_tag[i])
1079                         vids[num++] = cpu_to_le16(i);
1080
1081         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1082                                     vids, num, 0);
1083
1084         if (status) {
1085                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1086                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1087                         goto set_vlan_promisc;
1088                 dev_err(&adapter->pdev->dev,
1089                         "Setting HW VLAN filtering failed.\n");
1090         } else {
1091                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1092                         /* hw VLAN filtering re-enabled. */
1093                         status = be_cmd_rx_filter(adapter,
1094                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1095                         if (!status) {
1096                                 dev_info(&adapter->pdev->dev,
1097                                          "Disabling VLAN Promiscuous mode.\n");
1098                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1099                                 dev_info(&adapter->pdev->dev,
1100                                          "Re-Enabling HW VLAN filtering\n");
1101                         }
1102                 }
1103         }
1104
1105         return status;
1106
1107 set_vlan_promisc:
1108         dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1109
1110         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111         if (!status) {
1112                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1114                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115         } else
1116                 dev_err(&adapter->pdev->dev,
1117                         "Failed to enable VLAN Promiscuous mode.\n");
1118         return status;
1119 }
1120
1121 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1122 {
1123         struct be_adapter *adapter = netdev_priv(netdev);
1124         int status = 0;
1125
1126
1127         /* Packets with VID 0 are always received by Lancer by default */
1128         if (lancer_chip(adapter) && vid == 0)
1129                 goto ret;
1130
1131         adapter->vlan_tag[vid] = 1;
1132         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1133                 status = be_vid_config(adapter);
1134
1135         if (!status)
1136                 adapter->vlans_added++;
1137         else
1138                 adapter->vlan_tag[vid] = 0;
1139 ret:
1140         return status;
1141 }
1142
1143 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1144 {
1145         struct be_adapter *adapter = netdev_priv(netdev);
1146         int status = 0;
1147
1148         /* Packets with VID 0 are always received by Lancer by default */
1149         if (lancer_chip(adapter) && vid == 0)
1150                 goto ret;
1151
1152         adapter->vlan_tag[vid] = 0;
1153         if (adapter->vlans_added <= be_max_vlans(adapter))
1154                 status = be_vid_config(adapter);
1155
1156         if (!status)
1157                 adapter->vlans_added--;
1158         else
1159                 adapter->vlan_tag[vid] = 1;
1160 ret:
1161         return status;
1162 }
1163
1164 static void be_set_rx_mode(struct net_device *netdev)
1165 {
1166         struct be_adapter *adapter = netdev_priv(netdev);
1167         int status;
1168
1169         if (netdev->flags & IFF_PROMISC) {
1170                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1171                 adapter->promiscuous = true;
1172                 goto done;
1173         }
1174
1175         /* BE was previously in promiscuous mode; disable it */
1176         if (adapter->promiscuous) {
1177                 adapter->promiscuous = false;
1178                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1179
1180                 if (adapter->vlans_added)
1181                         be_vid_config(adapter);
1182         }
1183
1184         /* Enable multicast promisc if num configured exceeds what we support */
1185         if (netdev->flags & IFF_ALLMULTI ||
1186             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1187                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1188                 goto done;
1189         }
1190
1191         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1192                 struct netdev_hw_addr *ha;
1193                 int i = 1; /* First slot is claimed by the Primary MAC */
1194
1195                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1196                         be_cmd_pmac_del(adapter, adapter->if_handle,
1197                                         adapter->pmac_id[i], 0);
1198                 }
1199
1200                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1201                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1202                         adapter->promiscuous = true;
1203                         goto done;
1204                 }
1205
1206                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1207                         adapter->uc_macs++; /* First slot is for Primary MAC */
1208                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1209                                         adapter->if_handle,
1210                                         &adapter->pmac_id[adapter->uc_macs], 0);
1211                 }
1212         }
1213
1214         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1215
1216         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1217         if (status) {
1218                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1219                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1220                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1221         }
1222 done:
1223         return;
1224 }
1225
1226 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1227 {
1228         struct be_adapter *adapter = netdev_priv(netdev);
1229         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1230         int status;
1231
1232         if (!sriov_enabled(adapter))
1233                 return -EPERM;
1234
1235         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1236                 return -EINVAL;
1237
1238         if (BEx_chip(adapter)) {
1239                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1240                                 vf + 1);
1241
1242                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1243                                          &vf_cfg->pmac_id, vf + 1);
1244         } else {
1245                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1246                                         vf + 1);
1247         }
1248
1249         if (status)
1250                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1251                                 mac, vf);
1252         else
1253                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1254
1255         return status;
1256 }
1257
1258 static int be_get_vf_config(struct net_device *netdev, int vf,
1259                         struct ifla_vf_info *vi)
1260 {
1261         struct be_adapter *adapter = netdev_priv(netdev);
1262         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1263
1264         if (!sriov_enabled(adapter))
1265                 return -EPERM;
1266
1267         if (vf >= adapter->num_vfs)
1268                 return -EINVAL;
1269
1270         vi->vf = vf;
1271         vi->tx_rate = vf_cfg->tx_rate;
1272         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1273         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1274         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1275
1276         return 0;
1277 }
1278
1279 static int be_set_vf_vlan(struct net_device *netdev,
1280                         int vf, u16 vlan, u8 qos)
1281 {
1282         struct be_adapter *adapter = netdev_priv(netdev);
1283         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1284         int status = 0;
1285
1286         if (!sriov_enabled(adapter))
1287                 return -EPERM;
1288
1289         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1290                 return -EINVAL;
1291
1292         if (vlan || qos) {
1293                 vlan |= qos << VLAN_PRIO_SHIFT;
1294                 if (vf_cfg->vlan_tag != vlan) {
1295                         /* If this is new value, program it. Else skip. */
1296                         vf_cfg->vlan_tag = vlan;
1297                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298                                                        vf_cfg->if_handle, 0);
1299                 }
1300         } else {
1301                 /* Reset Transparent Vlan Tagging. */
1302                 vf_cfg->vlan_tag = 0;
1303                 vlan = vf_cfg->def_vid;
1304                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1305                                                vf_cfg->if_handle, 0);
1306         }
1307
1308
1309         if (status)
1310                 dev_info(&adapter->pdev->dev,
1311                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1312         return status;
1313 }
1314
1315 static int be_set_vf_tx_rate(struct net_device *netdev,
1316                         int vf, int rate)
1317 {
1318         struct be_adapter *adapter = netdev_priv(netdev);
1319         int status = 0;
1320
1321         if (!sriov_enabled(adapter))
1322                 return -EPERM;
1323
1324         if (vf >= adapter->num_vfs)
1325                 return -EINVAL;
1326
1327         if (rate < 100 || rate > 10000) {
1328                 dev_err(&adapter->pdev->dev,
1329                         "tx rate must be between 100 and 10000 Mbps\n");
1330                 return -EINVAL;
1331         }
1332
1333         if (lancer_chip(adapter))
1334                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1335         else
1336                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1337
1338         if (status)
1339                 dev_err(&adapter->pdev->dev,
1340                                 "tx rate %d on VF %d failed\n", rate, vf);
1341         else
1342                 adapter->vf_cfg[vf].tx_rate = rate;
1343         return status;
1344 }
1345
1346 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1347                           ulong now)
1348 {
1349         aic->rx_pkts_prev = rx_pkts;
1350         aic->tx_reqs_prev = tx_pkts;
1351         aic->jiffies = now;
1352 }
1353
1354 static void be_eqd_update(struct be_adapter *adapter)
1355 {
1356         struct be_set_eqd set_eqd[MAX_EVT_QS];
1357         int eqd, i, num = 0, start;
1358         struct be_aic_obj *aic;
1359         struct be_eq_obj *eqo;
1360         struct be_rx_obj *rxo;
1361         struct be_tx_obj *txo;
1362         u64 rx_pkts, tx_pkts;
1363         ulong now;
1364         u32 pps, delta;
1365
1366         for_all_evt_queues(adapter, eqo, i) {
1367                 aic = &adapter->aic_obj[eqo->idx];
1368                 if (!aic->enable) {
1369                         if (aic->jiffies)
1370                                 aic->jiffies = 0;
1371                         eqd = aic->et_eqd;
1372                         goto modify_eqd;
1373                 }
1374
1375                 rxo = &adapter->rx_obj[eqo->idx];
1376                 do {
1377                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1378                         rx_pkts = rxo->stats.rx_pkts;
1379                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1380
1381                 txo = &adapter->tx_obj[eqo->idx];
1382                 do {
1383                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1384                         tx_pkts = txo->stats.tx_reqs;
1385                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1386
1387
1388                 /* Skip, if wrapped around or first calculation */
1389                 now = jiffies;
1390                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1391                     rx_pkts < aic->rx_pkts_prev ||
1392                     tx_pkts < aic->tx_reqs_prev) {
1393                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1394                         continue;
1395                 }
1396
1397                 delta = jiffies_to_msecs(now - aic->jiffies);
1398                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1399                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1400                 eqd = (pps / 15000) << 2;
1401
1402                 if (eqd < 8)
1403                         eqd = 0;
1404                 eqd = min_t(u32, eqd, aic->max_eqd);
1405                 eqd = max_t(u32, eqd, aic->min_eqd);
1406
1407                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1408 modify_eqd:
1409                 if (eqd != aic->prev_eqd) {
1410                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1411                         set_eqd[num].eq_id = eqo->q.id;
1412                         aic->prev_eqd = eqd;
1413                         num++;
1414                 }
1415         }
1416
1417         if (num)
1418                 be_cmd_modify_eqd(adapter, set_eqd, num);
1419 }
1420
1421 static void be_rx_stats_update(struct be_rx_obj *rxo,
1422                 struct be_rx_compl_info *rxcp)
1423 {
1424         struct be_rx_stats *stats = rx_stats(rxo);
1425
1426         u64_stats_update_begin(&stats->sync);
1427         stats->rx_compl++;
1428         stats->rx_bytes += rxcp->pkt_size;
1429         stats->rx_pkts++;
1430         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1431                 stats->rx_mcast_pkts++;
1432         if (rxcp->err)
1433                 stats->rx_compl_err++;
1434         u64_stats_update_end(&stats->sync);
1435 }
1436
1437 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1438 {
1439         /* L4 checksum is not reliable for non TCP/UDP packets.
1440          * Also ignore ipcksm for ipv6 pkts */
1441         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1442                                 (rxcp->ip_csum || rxcp->ipv6);
1443 }
1444
1445 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1446                                                 u16 frag_idx)
1447 {
1448         struct be_adapter *adapter = rxo->adapter;
1449         struct be_rx_page_info *rx_page_info;
1450         struct be_queue_info *rxq = &rxo->q;
1451
1452         rx_page_info = &rxo->page_info_tbl[frag_idx];
1453         BUG_ON(!rx_page_info->page);
1454
1455         if (rx_page_info->last_page_user) {
1456                 dma_unmap_page(&adapter->pdev->dev,
1457                                dma_unmap_addr(rx_page_info, bus),
1458                                adapter->big_page_size, DMA_FROM_DEVICE);
1459                 rx_page_info->last_page_user = false;
1460         }
1461
1462         atomic_dec(&rxq->used);
1463         return rx_page_info;
1464 }
1465
1466 /* Throwaway the data in the Rx completion */
1467 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468                                 struct be_rx_compl_info *rxcp)
1469 {
1470         struct be_queue_info *rxq = &rxo->q;
1471         struct be_rx_page_info *page_info;
1472         u16 i, num_rcvd = rxcp->num_rcvd;
1473
1474         for (i = 0; i < num_rcvd; i++) {
1475                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1476                 put_page(page_info->page);
1477                 memset(page_info, 0, sizeof(*page_info));
1478                 index_inc(&rxcp->rxq_idx, rxq->len);
1479         }
1480 }
1481
1482 /*
1483  * skb_fill_rx_data forms a complete skb for an ether frame
1484  * indicated by rxcp.
1485  */
1486 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487                              struct be_rx_compl_info *rxcp)
1488 {
1489         struct be_queue_info *rxq = &rxo->q;
1490         struct be_rx_page_info *page_info;
1491         u16 i, j;
1492         u16 hdr_len, curr_frag_len, remaining;
1493         u8 *start;
1494
1495         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1496         start = page_address(page_info->page) + page_info->page_offset;
1497         prefetch(start);
1498
1499         /* Copy data in the first descriptor of this completion */
1500         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1501
1502         skb->len = curr_frag_len;
1503         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1504                 memcpy(skb->data, start, curr_frag_len);
1505                 /* Complete packet has now been moved to data */
1506                 put_page(page_info->page);
1507                 skb->data_len = 0;
1508                 skb->tail += curr_frag_len;
1509         } else {
1510                 hdr_len = ETH_HLEN;
1511                 memcpy(skb->data, start, hdr_len);
1512                 skb_shinfo(skb)->nr_frags = 1;
1513                 skb_frag_set_page(skb, 0, page_info->page);
1514                 skb_shinfo(skb)->frags[0].page_offset =
1515                                         page_info->page_offset + hdr_len;
1516                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1517                 skb->data_len = curr_frag_len - hdr_len;
1518                 skb->truesize += rx_frag_size;
1519                 skb->tail += hdr_len;
1520         }
1521         page_info->page = NULL;
1522
1523         if (rxcp->pkt_size <= rx_frag_size) {
1524                 BUG_ON(rxcp->num_rcvd != 1);
1525                 return;
1526         }
1527
1528         /* More frags present for this completion */
1529         index_inc(&rxcp->rxq_idx, rxq->len);
1530         remaining = rxcp->pkt_size - curr_frag_len;
1531         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1532                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1533                 curr_frag_len = min(remaining, rx_frag_size);
1534
1535                 /* Coalesce all frags from the same physical page in one slot */
1536                 if (page_info->page_offset == 0) {
1537                         /* Fresh page */
1538                         j++;
1539                         skb_frag_set_page(skb, j, page_info->page);
1540                         skb_shinfo(skb)->frags[j].page_offset =
1541                                                         page_info->page_offset;
1542                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1543                         skb_shinfo(skb)->nr_frags++;
1544                 } else {
1545                         put_page(page_info->page);
1546                 }
1547
1548                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1549                 skb->len += curr_frag_len;
1550                 skb->data_len += curr_frag_len;
1551                 skb->truesize += rx_frag_size;
1552                 remaining -= curr_frag_len;
1553                 index_inc(&rxcp->rxq_idx, rxq->len);
1554                 page_info->page = NULL;
1555         }
1556         BUG_ON(j > MAX_SKB_FRAGS);
1557 }
1558
1559 /* Process the RX completion indicated by rxcp when GRO is disabled */
1560 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1561                                 struct be_rx_compl_info *rxcp)
1562 {
1563         struct be_adapter *adapter = rxo->adapter;
1564         struct net_device *netdev = adapter->netdev;
1565         struct sk_buff *skb;
1566
1567         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1568         if (unlikely(!skb)) {
1569                 rx_stats(rxo)->rx_drops_no_skbs++;
1570                 be_rx_compl_discard(rxo, rxcp);
1571                 return;
1572         }
1573
1574         skb_fill_rx_data(rxo, skb, rxcp);
1575
1576         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1577                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578         else
1579                 skb_checksum_none_assert(skb);
1580
1581         skb->protocol = eth_type_trans(skb, netdev);
1582         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1583         if (netdev->features & NETIF_F_RXHASH)
1584                 skb->rxhash = rxcp->rss_hash;
1585         skb_mark_napi_id(skb, napi);
1586
1587         if (rxcp->vlanf)
1588                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1589
1590         netif_receive_skb(skb);
1591 }
1592
1593 /* Process the RX completion indicated by rxcp when GRO is enabled */
1594 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1595                                     struct napi_struct *napi,
1596                                     struct be_rx_compl_info *rxcp)
1597 {
1598         struct be_adapter *adapter = rxo->adapter;
1599         struct be_rx_page_info *page_info;
1600         struct sk_buff *skb = NULL;
1601         struct be_queue_info *rxq = &rxo->q;
1602         u16 remaining, curr_frag_len;
1603         u16 i, j;
1604
1605         skb = napi_get_frags(napi);
1606         if (!skb) {
1607                 be_rx_compl_discard(rxo, rxcp);
1608                 return;
1609         }
1610
1611         remaining = rxcp->pkt_size;
1612         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1613                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1614
1615                 curr_frag_len = min(remaining, rx_frag_size);
1616
1617                 /* Coalesce all frags from the same physical page in one slot */
1618                 if (i == 0 || page_info->page_offset == 0) {
1619                         /* First frag or Fresh page */
1620                         j++;
1621                         skb_frag_set_page(skb, j, page_info->page);
1622                         skb_shinfo(skb)->frags[j].page_offset =
1623                                                         page_info->page_offset;
1624                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1625                 } else {
1626                         put_page(page_info->page);
1627                 }
1628                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1629                 skb->truesize += rx_frag_size;
1630                 remaining -= curr_frag_len;
1631                 index_inc(&rxcp->rxq_idx, rxq->len);
1632                 memset(page_info, 0, sizeof(*page_info));
1633         }
1634         BUG_ON(j > MAX_SKB_FRAGS);
1635
1636         skb_shinfo(skb)->nr_frags = j + 1;
1637         skb->len = rxcp->pkt_size;
1638         skb->data_len = rxcp->pkt_size;
1639         skb->ip_summed = CHECKSUM_UNNECESSARY;
1640         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1641         if (adapter->netdev->features & NETIF_F_RXHASH)
1642                 skb->rxhash = rxcp->rss_hash;
1643         skb_mark_napi_id(skb, napi);
1644
1645         if (rxcp->vlanf)
1646                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1647
1648         napi_gro_frags(napi);
1649 }
1650
1651 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1652                                  struct be_rx_compl_info *rxcp)
1653 {
1654         rxcp->pkt_size =
1655                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1656         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1657         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1658         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1659         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1660         rxcp->ip_csum =
1661                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1662         rxcp->l4_csum =
1663                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1664         rxcp->ipv6 =
1665                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1666         rxcp->rxq_idx =
1667                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1668         rxcp->num_rcvd =
1669                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1670         rxcp->pkt_type =
1671                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1672         rxcp->rss_hash =
1673                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1674         if (rxcp->vlanf) {
1675                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1676                                           compl);
1677                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1678                                                compl);
1679         }
1680         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1681 }
1682
1683 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1684                                  struct be_rx_compl_info *rxcp)
1685 {
1686         rxcp->pkt_size =
1687                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1688         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1689         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1690         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1691         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1692         rxcp->ip_csum =
1693                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1694         rxcp->l4_csum =
1695                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1696         rxcp->ipv6 =
1697                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1698         rxcp->rxq_idx =
1699                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1700         rxcp->num_rcvd =
1701                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1702         rxcp->pkt_type =
1703                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1704         rxcp->rss_hash =
1705                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1706         if (rxcp->vlanf) {
1707                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1708                                           compl);
1709                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1710                                                compl);
1711         }
1712         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1713         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1714                                       ip_frag, compl);
1715 }
1716
1717 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1718 {
1719         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1720         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1721         struct be_adapter *adapter = rxo->adapter;
1722
1723         /* For checking the valid bit it is Ok to use either definition as the
1724          * valid bit is at the same position in both v0 and v1 Rx compl */
1725         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1726                 return NULL;
1727
1728         rmb();
1729         be_dws_le_to_cpu(compl, sizeof(*compl));
1730
1731         if (adapter->be3_native)
1732                 be_parse_rx_compl_v1(compl, rxcp);
1733         else
1734                 be_parse_rx_compl_v0(compl, rxcp);
1735
1736         if (rxcp->ip_frag)
1737                 rxcp->l4_csum = 0;
1738
1739         if (rxcp->vlanf) {
1740                 /* vlanf could be wrongly set in some cards.
1741                  * ignore if vtm is not set */
1742                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1743                         rxcp->vlanf = 0;
1744
1745                 if (!lancer_chip(adapter))
1746                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1747
1748                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1749                     !adapter->vlan_tag[rxcp->vlan_tag])
1750                         rxcp->vlanf = 0;
1751         }
1752
1753         /* As the compl has been parsed, reset it; we wont touch it again */
1754         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1755
1756         queue_tail_inc(&rxo->cq);
1757         return rxcp;
1758 }
1759
1760 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1761 {
1762         u32 order = get_order(size);
1763
1764         if (order > 0)
1765                 gfp |= __GFP_COMP;
1766         return  alloc_pages(gfp, order);
1767 }
1768
1769 /*
1770  * Allocate a page, split it to fragments of size rx_frag_size and post as
1771  * receive buffers to BE
1772  */
1773 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1774 {
1775         struct be_adapter *adapter = rxo->adapter;
1776         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1777         struct be_queue_info *rxq = &rxo->q;
1778         struct page *pagep = NULL;
1779         struct device *dev = &adapter->pdev->dev;
1780         struct be_eth_rx_d *rxd;
1781         u64 page_dmaaddr = 0, frag_dmaaddr;
1782         u32 posted, page_offset = 0;
1783
1784         page_info = &rxo->page_info_tbl[rxq->head];
1785         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1786                 if (!pagep) {
1787                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1788                         if (unlikely(!pagep)) {
1789                                 rx_stats(rxo)->rx_post_fail++;
1790                                 break;
1791                         }
1792                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1793                                                     adapter->big_page_size,
1794                                                     DMA_FROM_DEVICE);
1795                         if (dma_mapping_error(dev, page_dmaaddr)) {
1796                                 put_page(pagep);
1797                                 pagep = NULL;
1798                                 rx_stats(rxo)->rx_post_fail++;
1799                                 break;
1800                         }
1801                         page_info->page_offset = 0;
1802                 } else {
1803                         get_page(pagep);
1804                         page_info->page_offset = page_offset + rx_frag_size;
1805                 }
1806                 page_offset = page_info->page_offset;
1807                 page_info->page = pagep;
1808                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1809                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1810
1811                 rxd = queue_head_node(rxq);
1812                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1813                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1814
1815                 /* Any space left in the current big page for another frag? */
1816                 if ((page_offset + rx_frag_size + rx_frag_size) >
1817                                         adapter->big_page_size) {
1818                         pagep = NULL;
1819                         page_info->last_page_user = true;
1820                 }
1821
1822                 prev_page_info = page_info;
1823                 queue_head_inc(rxq);
1824                 page_info = &rxo->page_info_tbl[rxq->head];
1825         }
1826         if (pagep)
1827                 prev_page_info->last_page_user = true;
1828
1829         if (posted) {
1830                 atomic_add(posted, &rxq->used);
1831                 if (rxo->rx_post_starved)
1832                         rxo->rx_post_starved = false;
1833                 be_rxq_notify(adapter, rxq->id, posted);
1834         } else if (atomic_read(&rxq->used) == 0) {
1835                 /* Let be_worker replenish when memory is available */
1836                 rxo->rx_post_starved = true;
1837         }
1838 }
1839
1840 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1841 {
1842         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1843
1844         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1845                 return NULL;
1846
1847         rmb();
1848         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1849
1850         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1851
1852         queue_tail_inc(tx_cq);
1853         return txcp;
1854 }
1855
1856 static u16 be_tx_compl_process(struct be_adapter *adapter,
1857                 struct be_tx_obj *txo, u16 last_index)
1858 {
1859         struct be_queue_info *txq = &txo->q;
1860         struct be_eth_wrb *wrb;
1861         struct sk_buff **sent_skbs = txo->sent_skb_list;
1862         struct sk_buff *sent_skb;
1863         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1864         bool unmap_skb_hdr = true;
1865
1866         sent_skb = sent_skbs[txq->tail];
1867         BUG_ON(!sent_skb);
1868         sent_skbs[txq->tail] = NULL;
1869
1870         /* skip header wrb */
1871         queue_tail_inc(txq);
1872
1873         do {
1874                 cur_index = txq->tail;
1875                 wrb = queue_tail_node(txq);
1876                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1877                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1878                 unmap_skb_hdr = false;
1879
1880                 num_wrbs++;
1881                 queue_tail_inc(txq);
1882         } while (cur_index != last_index);
1883
1884         kfree_skb(sent_skb);
1885         return num_wrbs;
1886 }
1887
1888 /* Return the number of events in the event queue */
1889 static inline int events_get(struct be_eq_obj *eqo)
1890 {
1891         struct be_eq_entry *eqe;
1892         int num = 0;
1893
1894         do {
1895                 eqe = queue_tail_node(&eqo->q);
1896                 if (eqe->evt == 0)
1897                         break;
1898
1899                 rmb();
1900                 eqe->evt = 0;
1901                 num++;
1902                 queue_tail_inc(&eqo->q);
1903         } while (true);
1904
1905         return num;
1906 }
1907
1908 /* Leaves the EQ is disarmed state */
1909 static void be_eq_clean(struct be_eq_obj *eqo)
1910 {
1911         int num = events_get(eqo);
1912
1913         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1914 }
1915
1916 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1917 {
1918         struct be_rx_page_info *page_info;
1919         struct be_queue_info *rxq = &rxo->q;
1920         struct be_queue_info *rx_cq = &rxo->cq;
1921         struct be_rx_compl_info *rxcp;
1922         struct be_adapter *adapter = rxo->adapter;
1923         int flush_wait = 0;
1924         u16 tail;
1925
1926         /* Consume pending rx completions.
1927          * Wait for the flush completion (identified by zero num_rcvd)
1928          * to arrive. Notify CQ even when there are no more CQ entries
1929          * for HW to flush partially coalesced CQ entries.
1930          * In Lancer, there is no need to wait for flush compl.
1931          */
1932         for (;;) {
1933                 rxcp = be_rx_compl_get(rxo);
1934                 if (rxcp == NULL) {
1935                         if (lancer_chip(adapter))
1936                                 break;
1937
1938                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1939                                 dev_warn(&adapter->pdev->dev,
1940                                          "did not receive flush compl\n");
1941                                 break;
1942                         }
1943                         be_cq_notify(adapter, rx_cq->id, true, 0);
1944                         mdelay(1);
1945                 } else {
1946                         be_rx_compl_discard(rxo, rxcp);
1947                         be_cq_notify(adapter, rx_cq->id, false, 1);
1948                         if (rxcp->num_rcvd == 0)
1949                                 break;
1950                 }
1951         }
1952
1953         /* After cleanup, leave the CQ in unarmed state */
1954         be_cq_notify(adapter, rx_cq->id, false, 0);
1955
1956         /* Then free posted rx buffers that were not used */
1957         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1958         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1959                 page_info = get_rx_page_info(rxo, tail);
1960                 put_page(page_info->page);
1961                 memset(page_info, 0, sizeof(*page_info));
1962         }
1963         BUG_ON(atomic_read(&rxq->used));
1964         rxq->tail = rxq->head = 0;
1965 }
1966
1967 static void be_tx_compl_clean(struct be_adapter *adapter)
1968 {
1969         struct be_tx_obj *txo;
1970         struct be_queue_info *txq;
1971         struct be_eth_tx_compl *txcp;
1972         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1973         struct sk_buff *sent_skb;
1974         bool dummy_wrb;
1975         int i, pending_txqs;
1976
1977         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1978         do {
1979                 pending_txqs = adapter->num_tx_qs;
1980
1981                 for_all_tx_queues(adapter, txo, i) {
1982                         txq = &txo->q;
1983                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1984                                 end_idx =
1985                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1986                                                       wrb_index, txcp);
1987                                 num_wrbs += be_tx_compl_process(adapter, txo,
1988                                                                 end_idx);
1989                                 cmpl++;
1990                         }
1991                         if (cmpl) {
1992                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1993                                 atomic_sub(num_wrbs, &txq->used);
1994                                 cmpl = 0;
1995                                 num_wrbs = 0;
1996                         }
1997                         if (atomic_read(&txq->used) == 0)
1998                                 pending_txqs--;
1999                 }
2000
2001                 if (pending_txqs == 0 || ++timeo > 200)
2002                         break;
2003
2004                 mdelay(1);
2005         } while (true);
2006
2007         for_all_tx_queues(adapter, txo, i) {
2008                 txq = &txo->q;
2009                 if (atomic_read(&txq->used))
2010                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2011                                 atomic_read(&txq->used));
2012
2013                 /* free posted tx for which compls will never arrive */
2014                 while (atomic_read(&txq->used)) {
2015                         sent_skb = txo->sent_skb_list[txq->tail];
2016                         end_idx = txq->tail;
2017                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2018                                                    &dummy_wrb);
2019                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2020                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2021                         atomic_sub(num_wrbs, &txq->used);
2022                 }
2023         }
2024 }
2025
2026 static void be_evt_queues_destroy(struct be_adapter *adapter)
2027 {
2028         struct be_eq_obj *eqo;
2029         int i;
2030
2031         for_all_evt_queues(adapter, eqo, i) {
2032                 if (eqo->q.created) {
2033                         be_eq_clean(eqo);
2034                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2035                         napi_hash_del(&eqo->napi);
2036                         netif_napi_del(&eqo->napi);
2037                 }
2038                 be_queue_free(adapter, &eqo->q);
2039         }
2040 }
2041
2042 static int be_evt_queues_create(struct be_adapter *adapter)
2043 {
2044         struct be_queue_info *eq;
2045         struct be_eq_obj *eqo;
2046         struct be_aic_obj *aic;
2047         int i, rc;
2048
2049         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2050                                     adapter->cfg_num_qs);
2051
2052         for_all_evt_queues(adapter, eqo, i) {
2053                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2054                                BE_NAPI_WEIGHT);
2055                 napi_hash_add(&eqo->napi);
2056                 aic = &adapter->aic_obj[i];
2057                 eqo->adapter = adapter;
2058                 eqo->tx_budget = BE_TX_BUDGET;
2059                 eqo->idx = i;
2060                 aic->max_eqd = BE_MAX_EQD;
2061                 aic->enable = true;
2062
2063                 eq = &eqo->q;
2064                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2065                                         sizeof(struct be_eq_entry));
2066                 if (rc)
2067                         return rc;
2068
2069                 rc = be_cmd_eq_create(adapter, eqo);
2070                 if (rc)
2071                         return rc;
2072         }
2073         return 0;
2074 }
2075
2076 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2077 {
2078         struct be_queue_info *q;
2079
2080         q = &adapter->mcc_obj.q;
2081         if (q->created)
2082                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2083         be_queue_free(adapter, q);
2084
2085         q = &adapter->mcc_obj.cq;
2086         if (q->created)
2087                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2088         be_queue_free(adapter, q);
2089 }
2090
2091 /* Must be called only after TX qs are created as MCC shares TX EQ */
2092 static int be_mcc_queues_create(struct be_adapter *adapter)
2093 {
2094         struct be_queue_info *q, *cq;
2095
2096         cq = &adapter->mcc_obj.cq;
2097         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2098                         sizeof(struct be_mcc_compl)))
2099                 goto err;
2100
2101         /* Use the default EQ for MCC completions */
2102         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2103                 goto mcc_cq_free;
2104
2105         q = &adapter->mcc_obj.q;
2106         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2107                 goto mcc_cq_destroy;
2108
2109         if (be_cmd_mccq_create(adapter, q, cq))
2110                 goto mcc_q_free;
2111
2112         return 0;
2113
2114 mcc_q_free:
2115         be_queue_free(adapter, q);
2116 mcc_cq_destroy:
2117         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2118 mcc_cq_free:
2119         be_queue_free(adapter, cq);
2120 err:
2121         return -1;
2122 }
2123
2124 static void be_tx_queues_destroy(struct be_adapter *adapter)
2125 {
2126         struct be_queue_info *q;
2127         struct be_tx_obj *txo;
2128         u8 i;
2129
2130         for_all_tx_queues(adapter, txo, i) {
2131                 q = &txo->q;
2132                 if (q->created)
2133                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2134                 be_queue_free(adapter, q);
2135
2136                 q = &txo->cq;
2137                 if (q->created)
2138                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2139                 be_queue_free(adapter, q);
2140         }
2141 }
2142
2143 static int be_tx_qs_create(struct be_adapter *adapter)
2144 {
2145         struct be_queue_info *cq, *eq;
2146         struct be_tx_obj *txo;
2147         int status, i;
2148
2149         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2150
2151         for_all_tx_queues(adapter, txo, i) {
2152                 cq = &txo->cq;
2153                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2154                                         sizeof(struct be_eth_tx_compl));
2155                 if (status)
2156                         return status;
2157
2158                 u64_stats_init(&txo->stats.sync);
2159                 u64_stats_init(&txo->stats.sync_compl);
2160
2161                 /* If num_evt_qs is less than num_tx_qs, then more than
2162                  * one txq share an eq
2163                  */
2164                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2165                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2166                 if (status)
2167                         return status;
2168
2169                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2170                                         sizeof(struct be_eth_wrb));
2171                 if (status)
2172                         return status;
2173
2174                 status = be_cmd_txq_create(adapter, txo);
2175                 if (status)
2176                         return status;
2177         }
2178
2179         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2180                  adapter->num_tx_qs);
2181         return 0;
2182 }
2183
2184 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2185 {
2186         struct be_queue_info *q;
2187         struct be_rx_obj *rxo;
2188         int i;
2189
2190         for_all_rx_queues(adapter, rxo, i) {
2191                 q = &rxo->cq;
2192                 if (q->created)
2193                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2194                 be_queue_free(adapter, q);
2195         }
2196 }
2197
2198 static int be_rx_cqs_create(struct be_adapter *adapter)
2199 {
2200         struct be_queue_info *eq, *cq;
2201         struct be_rx_obj *rxo;
2202         int rc, i;
2203
2204         /* We can create as many RSS rings as there are EQs. */
2205         adapter->num_rx_qs = adapter->num_evt_qs;
2206
2207         /* We'll use RSS only if atleast 2 RSS rings are supported.
2208          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2209          */
2210         if (adapter->num_rx_qs > 1)
2211                 adapter->num_rx_qs++;
2212
2213         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2214         for_all_rx_queues(adapter, rxo, i) {
2215                 rxo->adapter = adapter;
2216                 cq = &rxo->cq;
2217                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2218                                 sizeof(struct be_eth_rx_compl));
2219                 if (rc)
2220                         return rc;
2221
2222                 u64_stats_init(&rxo->stats.sync);
2223                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2224                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2225                 if (rc)
2226                         return rc;
2227         }
2228
2229         dev_info(&adapter->pdev->dev,
2230                  "created %d RSS queue(s) and 1 default RX queue\n",
2231                  adapter->num_rx_qs - 1);
2232         return 0;
2233 }
2234
2235 static irqreturn_t be_intx(int irq, void *dev)
2236 {
2237         struct be_eq_obj *eqo = dev;
2238         struct be_adapter *adapter = eqo->adapter;
2239         int num_evts = 0;
2240
2241         /* IRQ is not expected when NAPI is scheduled as the EQ
2242          * will not be armed.
2243          * But, this can happen on Lancer INTx where it takes
2244          * a while to de-assert INTx or in BE2 where occasionaly
2245          * an interrupt may be raised even when EQ is unarmed.
2246          * If NAPI is already scheduled, then counting & notifying
2247          * events will orphan them.
2248          */
2249         if (napi_schedule_prep(&eqo->napi)) {
2250                 num_evts = events_get(eqo);
2251                 __napi_schedule(&eqo->napi);
2252                 if (num_evts)
2253                         eqo->spurious_intr = 0;
2254         }
2255         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2256
2257         /* Return IRQ_HANDLED only for the the first spurious intr
2258          * after a valid intr to stop the kernel from branding
2259          * this irq as a bad one!
2260          */
2261         if (num_evts || eqo->spurious_intr++ == 0)
2262                 return IRQ_HANDLED;
2263         else
2264                 return IRQ_NONE;
2265 }
2266
2267 static irqreturn_t be_msix(int irq, void *dev)
2268 {
2269         struct be_eq_obj *eqo = dev;
2270
2271         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2272         napi_schedule(&eqo->napi);
2273         return IRQ_HANDLED;
2274 }
2275
2276 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2277 {
2278         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2279 }
2280
2281 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2282                         int budget, int polling)
2283 {
2284         struct be_adapter *adapter = rxo->adapter;
2285         struct be_queue_info *rx_cq = &rxo->cq;
2286         struct be_rx_compl_info *rxcp;
2287         u32 work_done;
2288
2289         for (work_done = 0; work_done < budget; work_done++) {
2290                 rxcp = be_rx_compl_get(rxo);
2291                 if (!rxcp)
2292                         break;
2293
2294                 /* Is it a flush compl that has no data */
2295                 if (unlikely(rxcp->num_rcvd == 0))
2296                         goto loop_continue;
2297
2298                 /* Discard compl with partial DMA Lancer B0 */
2299                 if (unlikely(!rxcp->pkt_size)) {
2300                         be_rx_compl_discard(rxo, rxcp);
2301                         goto loop_continue;
2302                 }
2303
2304                 /* On BE drop pkts that arrive due to imperfect filtering in
2305                  * promiscuous mode on some skews
2306                  */
2307                 if (unlikely(rxcp->port != adapter->port_num &&
2308                                 !lancer_chip(adapter))) {
2309                         be_rx_compl_discard(rxo, rxcp);
2310                         goto loop_continue;
2311                 }
2312
2313                 /* Don't do gro when we're busy_polling */
2314                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2315                         be_rx_compl_process_gro(rxo, napi, rxcp);
2316                 else
2317                         be_rx_compl_process(rxo, napi, rxcp);
2318
2319 loop_continue:
2320                 be_rx_stats_update(rxo, rxcp);
2321         }
2322
2323         if (work_done) {
2324                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2325
2326                 /* When an rx-obj gets into post_starved state, just
2327                  * let be_worker do the posting.
2328                  */
2329                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2330                     !rxo->rx_post_starved)
2331                         be_post_rx_frags(rxo, GFP_ATOMIC);
2332         }
2333
2334         return work_done;
2335 }
2336
2337 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2338                           int budget, int idx)
2339 {
2340         struct be_eth_tx_compl *txcp;
2341         int num_wrbs = 0, work_done;
2342
2343         for (work_done = 0; work_done < budget; work_done++) {
2344                 txcp = be_tx_compl_get(&txo->cq);
2345                 if (!txcp)
2346                         break;
2347                 num_wrbs += be_tx_compl_process(adapter, txo,
2348                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2349                                         wrb_index, txcp));
2350         }
2351
2352         if (work_done) {
2353                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2354                 atomic_sub(num_wrbs, &txo->q.used);
2355
2356                 /* As Tx wrbs have been freed up, wake up netdev queue
2357                  * if it was stopped due to lack of tx wrbs.  */
2358                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2359                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2360                         netif_wake_subqueue(adapter->netdev, idx);
2361                 }
2362
2363                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2364                 tx_stats(txo)->tx_compl += work_done;
2365                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2366         }
2367         return (work_done < budget); /* Done */
2368 }
2369
2370 int be_poll(struct napi_struct *napi, int budget)
2371 {
2372         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2373         struct be_adapter *adapter = eqo->adapter;
2374         int max_work = 0, work, i, num_evts;
2375         struct be_rx_obj *rxo;
2376         bool tx_done;
2377
2378         num_evts = events_get(eqo);
2379
2380         /* Process all TXQs serviced by this EQ */
2381         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2382                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2383                                         eqo->tx_budget, i);
2384                 if (!tx_done)
2385                         max_work = budget;
2386         }
2387
2388         if (be_lock_napi(eqo)) {
2389                 /* This loop will iterate twice for EQ0 in which
2390                  * completions of the last RXQ (default one) are also processed
2391                  * For other EQs the loop iterates only once
2392                  */
2393                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2394                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2395                         max_work = max(work, max_work);
2396                 }
2397                 be_unlock_napi(eqo);
2398         } else {
2399                 max_work = budget;
2400         }
2401
2402         if (is_mcc_eqo(eqo))
2403                 be_process_mcc(adapter);
2404
2405         if (max_work < budget) {
2406                 napi_complete(napi);
2407                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2408         } else {
2409                 /* As we'll continue in polling mode, count and clear events */
2410                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2411         }
2412         return max_work;
2413 }
2414
2415 #ifdef CONFIG_NET_RX_BUSY_POLL
2416 static int be_busy_poll(struct napi_struct *napi)
2417 {
2418         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2419         struct be_adapter *adapter = eqo->adapter;
2420         struct be_rx_obj *rxo;
2421         int i, work = 0;
2422
2423         if (!be_lock_busy_poll(eqo))
2424                 return LL_FLUSH_BUSY;
2425
2426         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2427                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2428                 if (work)
2429                         break;
2430         }
2431
2432         be_unlock_busy_poll(eqo);
2433         return work;
2434 }
2435 #endif
2436
2437 void be_detect_error(struct be_adapter *adapter)
2438 {
2439         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2440         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2441         u32 i;
2442
2443         if (be_hw_error(adapter))
2444                 return;
2445
2446         if (lancer_chip(adapter)) {
2447                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2448                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2449                         sliport_err1 = ioread32(adapter->db +
2450                                         SLIPORT_ERROR1_OFFSET);
2451                         sliport_err2 = ioread32(adapter->db +
2452                                         SLIPORT_ERROR2_OFFSET);
2453                 }
2454         } else {
2455                 pci_read_config_dword(adapter->pdev,
2456                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2457                 pci_read_config_dword(adapter->pdev,
2458                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2459                 pci_read_config_dword(adapter->pdev,
2460                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2461                 pci_read_config_dword(adapter->pdev,
2462                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2463
2464                 ue_lo = (ue_lo & ~ue_lo_mask);
2465                 ue_hi = (ue_hi & ~ue_hi_mask);
2466         }
2467
2468         /* On certain platforms BE hardware can indicate spurious UEs.
2469          * Allow the h/w to stop working completely in case of a real UE.
2470          * Hence not setting the hw_error for UE detection.
2471          */
2472         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2473                 adapter->hw_error = true;
2474                 /* Do not log error messages if its a FW reset */
2475                 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2476                     sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2477                         dev_info(&adapter->pdev->dev,
2478                                  "Firmware update in progress\n");
2479                         return;
2480                 } else {
2481                         dev_err(&adapter->pdev->dev,
2482                                 "Error detected in the card\n");
2483                 }
2484         }
2485
2486         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2487                 dev_err(&adapter->pdev->dev,
2488                         "ERR: sliport status 0x%x\n", sliport_status);
2489                 dev_err(&adapter->pdev->dev,
2490                         "ERR: sliport error1 0x%x\n", sliport_err1);
2491                 dev_err(&adapter->pdev->dev,
2492                         "ERR: sliport error2 0x%x\n", sliport_err2);
2493         }
2494
2495         if (ue_lo) {
2496                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2497                         if (ue_lo & 1)
2498                                 dev_err(&adapter->pdev->dev,
2499                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2500                 }
2501         }
2502
2503         if (ue_hi) {
2504                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2505                         if (ue_hi & 1)
2506                                 dev_err(&adapter->pdev->dev,
2507                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2508                 }
2509         }
2510
2511 }
2512
2513 static void be_msix_disable(struct be_adapter *adapter)
2514 {
2515         if (msix_enabled(adapter)) {
2516                 pci_disable_msix(adapter->pdev);
2517                 adapter->num_msix_vec = 0;
2518                 adapter->num_msix_roce_vec = 0;
2519         }
2520 }
2521
2522 static int be_msix_enable(struct be_adapter *adapter)
2523 {
2524         int i, status, num_vec;
2525         struct device *dev = &adapter->pdev->dev;
2526
2527         /* If RoCE is supported, program the max number of NIC vectors that
2528          * may be configured via set-channels, along with vectors needed for
2529          * RoCe. Else, just program the number we'll use initially.
2530          */
2531         if (be_roce_supported(adapter))
2532                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2533                                 2 * num_online_cpus());
2534         else
2535                 num_vec = adapter->cfg_num_qs;
2536
2537         for (i = 0; i < num_vec; i++)
2538                 adapter->msix_entries[i].entry = i;
2539
2540         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2541         if (status == 0) {
2542                 goto done;
2543         } else if (status >= MIN_MSIX_VECTORS) {
2544                 num_vec = status;
2545                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2546                                          num_vec);
2547                 if (!status)
2548                         goto done;
2549         }
2550
2551         dev_warn(dev, "MSIx enable failed\n");
2552
2553         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2554         if (!be_physfn(adapter))
2555                 return status;
2556         return 0;
2557 done:
2558         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2559                 adapter->num_msix_roce_vec = num_vec / 2;
2560                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2561                          adapter->num_msix_roce_vec);
2562         }
2563
2564         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2565
2566         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2567                  adapter->num_msix_vec);
2568         return 0;
2569 }
2570
2571 static inline int be_msix_vec_get(struct be_adapter *adapter,
2572                                 struct be_eq_obj *eqo)
2573 {
2574         return adapter->msix_entries[eqo->msix_idx].vector;
2575 }
2576
2577 static int be_msix_register(struct be_adapter *adapter)
2578 {
2579         struct net_device *netdev = adapter->netdev;
2580         struct be_eq_obj *eqo;
2581         int status, i, vec;
2582
2583         for_all_evt_queues(adapter, eqo, i) {
2584                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2585                 vec = be_msix_vec_get(adapter, eqo);
2586                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2587                 if (status)
2588                         goto err_msix;
2589         }
2590
2591         return 0;
2592 err_msix:
2593         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2594                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2595         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2596                 status);
2597         be_msix_disable(adapter);
2598         return status;
2599 }
2600
2601 static int be_irq_register(struct be_adapter *adapter)
2602 {
2603         struct net_device *netdev = adapter->netdev;
2604         int status;
2605
2606         if (msix_enabled(adapter)) {
2607                 status = be_msix_register(adapter);
2608                 if (status == 0)
2609                         goto done;
2610                 /* INTx is not supported for VF */
2611                 if (!be_physfn(adapter))
2612                         return status;
2613         }
2614
2615         /* INTx: only the first EQ is used */
2616         netdev->irq = adapter->pdev->irq;
2617         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2618                              &adapter->eq_obj[0]);
2619         if (status) {
2620                 dev_err(&adapter->pdev->dev,
2621                         "INTx request IRQ failed - err %d\n", status);
2622                 return status;
2623         }
2624 done:
2625         adapter->isr_registered = true;
2626         return 0;
2627 }
2628
2629 static void be_irq_unregister(struct be_adapter *adapter)
2630 {
2631         struct net_device *netdev = adapter->netdev;
2632         struct be_eq_obj *eqo;
2633         int i;
2634
2635         if (!adapter->isr_registered)
2636                 return;
2637
2638         /* INTx */
2639         if (!msix_enabled(adapter)) {
2640                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2641                 goto done;
2642         }
2643
2644         /* MSIx */
2645         for_all_evt_queues(adapter, eqo, i)
2646                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2647
2648 done:
2649         adapter->isr_registered = false;
2650 }
2651
2652 static void be_rx_qs_destroy(struct be_adapter *adapter)
2653 {
2654         struct be_queue_info *q;
2655         struct be_rx_obj *rxo;
2656         int i;
2657
2658         for_all_rx_queues(adapter, rxo, i) {
2659                 q = &rxo->q;
2660                 if (q->created) {
2661                         be_cmd_rxq_destroy(adapter, q);
2662                         be_rx_cq_clean(rxo);
2663                 }
2664                 be_queue_free(adapter, q);
2665         }
2666 }
2667
2668 static int be_close(struct net_device *netdev)
2669 {
2670         struct be_adapter *adapter = netdev_priv(netdev);
2671         struct be_eq_obj *eqo;
2672         int i;
2673
2674         be_roce_dev_close(adapter);
2675
2676         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2677                 for_all_evt_queues(adapter, eqo, i) {
2678                         napi_disable(&eqo->napi);
2679                         be_disable_busy_poll(eqo);
2680                 }
2681                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2682         }
2683
2684         be_async_mcc_disable(adapter);
2685
2686         /* Wait for all pending tx completions to arrive so that
2687          * all tx skbs are freed.
2688          */
2689         netif_tx_disable(netdev);
2690         be_tx_compl_clean(adapter);
2691
2692         be_rx_qs_destroy(adapter);
2693
2694         for (i = 1; i < (adapter->uc_macs + 1); i++)
2695                 be_cmd_pmac_del(adapter, adapter->if_handle,
2696                                 adapter->pmac_id[i], 0);
2697         adapter->uc_macs = 0;
2698
2699         for_all_evt_queues(adapter, eqo, i) {
2700                 if (msix_enabled(adapter))
2701                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2702                 else
2703                         synchronize_irq(netdev->irq);
2704                 be_eq_clean(eqo);
2705         }
2706
2707         be_irq_unregister(adapter);
2708
2709         return 0;
2710 }
2711
2712 static int be_rx_qs_create(struct be_adapter *adapter)
2713 {
2714         struct be_rx_obj *rxo;
2715         int rc, i, j;
2716         u8 rsstable[128];
2717
2718         for_all_rx_queues(adapter, rxo, i) {
2719                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2720                                     sizeof(struct be_eth_rx_d));
2721                 if (rc)
2722                         return rc;
2723         }
2724
2725         /* The FW would like the default RXQ to be created first */
2726         rxo = default_rxo(adapter);
2727         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2728                                adapter->if_handle, false, &rxo->rss_id);
2729         if (rc)
2730                 return rc;
2731
2732         for_all_rss_queues(adapter, rxo, i) {
2733                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2734                                        rx_frag_size, adapter->if_handle,
2735                                        true, &rxo->rss_id);
2736                 if (rc)
2737                         return rc;
2738         }
2739
2740         if (be_multi_rxq(adapter)) {
2741                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2742                         for_all_rss_queues(adapter, rxo, i) {
2743                                 if ((j + i) >= 128)
2744                                         break;
2745                                 rsstable[j + i] = rxo->rss_id;
2746                         }
2747                 }
2748                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2749                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2750
2751                 if (!BEx_chip(adapter))
2752                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2753                                                 RSS_ENABLE_UDP_IPV6;
2754         } else {
2755                 /* Disable RSS, if only default RX Q is created */
2756                 adapter->rss_flags = RSS_ENABLE_NONE;
2757         }
2758
2759         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2760                                128);
2761         if (rc) {
2762                 adapter->rss_flags = RSS_ENABLE_NONE;
2763                 return rc;
2764         }
2765
2766         /* First time posting */
2767         for_all_rx_queues(adapter, rxo, i)
2768                 be_post_rx_frags(rxo, GFP_KERNEL);
2769         return 0;
2770 }
2771
2772 static int be_open(struct net_device *netdev)
2773 {
2774         struct be_adapter *adapter = netdev_priv(netdev);
2775         struct be_eq_obj *eqo;
2776         struct be_rx_obj *rxo;
2777         struct be_tx_obj *txo;
2778         u8 link_status;
2779         int status, i;
2780
2781         status = be_rx_qs_create(adapter);
2782         if (status)
2783                 goto err;
2784
2785         status = be_irq_register(adapter);
2786         if (status)
2787                 goto err;
2788
2789         for_all_rx_queues(adapter, rxo, i)
2790                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2791
2792         for_all_tx_queues(adapter, txo, i)
2793                 be_cq_notify(adapter, txo->cq.id, true, 0);
2794
2795         be_async_mcc_enable(adapter);
2796
2797         for_all_evt_queues(adapter, eqo, i) {
2798                 napi_enable(&eqo->napi);
2799                 be_enable_busy_poll(eqo);
2800                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2801         }
2802         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2803
2804         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2805         if (!status)
2806                 be_link_status_update(adapter, link_status);
2807
2808         netif_tx_start_all_queues(netdev);
2809         be_roce_dev_open(adapter);
2810         return 0;
2811 err:
2812         be_close(adapter->netdev);
2813         return -EIO;
2814 }
2815
2816 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2817 {
2818         struct be_dma_mem cmd;
2819         int status = 0;
2820         u8 mac[ETH_ALEN];
2821
2822         memset(mac, 0, ETH_ALEN);
2823
2824         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2825         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2826                                      GFP_KERNEL);
2827         if (cmd.va == NULL)
2828                 return -1;
2829
2830         if (enable) {
2831                 status = pci_write_config_dword(adapter->pdev,
2832                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2833                 if (status) {
2834                         dev_err(&adapter->pdev->dev,
2835                                 "Could not enable Wake-on-lan\n");
2836                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2837                                           cmd.dma);
2838                         return status;
2839                 }
2840                 status = be_cmd_enable_magic_wol(adapter,
2841                                 adapter->netdev->dev_addr, &cmd);
2842                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2843                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2844         } else {
2845                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2846                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2847                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2848         }
2849
2850         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2851         return status;
2852 }
2853
2854 /*
2855  * Generate a seed MAC address from the PF MAC Address using jhash.
2856  * MAC Address for VFs are assigned incrementally starting from the seed.
2857  * These addresses are programmed in the ASIC by the PF and the VF driver
2858  * queries for the MAC address during its probe.
2859  */
2860 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2861 {
2862         u32 vf;
2863         int status = 0;
2864         u8 mac[ETH_ALEN];
2865         struct be_vf_cfg *vf_cfg;
2866
2867         be_vf_eth_addr_generate(adapter, mac);
2868
2869         for_all_vfs(adapter, vf_cfg, vf) {
2870                 if (BEx_chip(adapter))
2871                         status = be_cmd_pmac_add(adapter, mac,
2872                                                  vf_cfg->if_handle,
2873                                                  &vf_cfg->pmac_id, vf + 1);
2874                 else
2875                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2876                                                 vf + 1);
2877
2878                 if (status)
2879                         dev_err(&adapter->pdev->dev,
2880                         "Mac address assignment failed for VF %d\n", vf);
2881                 else
2882                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2883
2884                 mac[5] += 1;
2885         }
2886         return status;
2887 }
2888
2889 static int be_vfs_mac_query(struct be_adapter *adapter)
2890 {
2891         int status, vf;
2892         u8 mac[ETH_ALEN];
2893         struct be_vf_cfg *vf_cfg;
2894         bool active = false;
2895
2896         for_all_vfs(adapter, vf_cfg, vf) {
2897                 be_cmd_get_mac_from_list(adapter, mac, &active,
2898                                          &vf_cfg->pmac_id, 0);
2899
2900                 status = be_cmd_mac_addr_query(adapter, mac, false,
2901                                                vf_cfg->if_handle, 0);
2902                 if (status)
2903                         return status;
2904                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2905         }
2906         return 0;
2907 }
2908
2909 static void be_vf_clear(struct be_adapter *adapter)
2910 {
2911         struct be_vf_cfg *vf_cfg;
2912         u32 vf;
2913
2914         if (pci_vfs_assigned(adapter->pdev)) {
2915                 dev_warn(&adapter->pdev->dev,
2916                          "VFs are assigned to VMs: not disabling VFs\n");
2917                 goto done;
2918         }
2919
2920         pci_disable_sriov(adapter->pdev);
2921
2922         for_all_vfs(adapter, vf_cfg, vf) {
2923                 if (BEx_chip(adapter))
2924                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2925                                         vf_cfg->pmac_id, vf + 1);
2926                 else
2927                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2928                                        vf + 1);
2929
2930                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2931         }
2932 done:
2933         kfree(adapter->vf_cfg);
2934         adapter->num_vfs = 0;
2935 }
2936
2937 static void be_clear_queues(struct be_adapter *adapter)
2938 {
2939         be_mcc_queues_destroy(adapter);
2940         be_rx_cqs_destroy(adapter);
2941         be_tx_queues_destroy(adapter);
2942         be_evt_queues_destroy(adapter);
2943 }
2944
2945 static void be_cancel_worker(struct be_adapter *adapter)
2946 {
2947         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2948                 cancel_delayed_work_sync(&adapter->work);
2949                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2950         }
2951 }
2952
2953 static void be_mac_clear(struct be_adapter *adapter)
2954 {
2955         int i;
2956
2957         if (adapter->pmac_id) {
2958                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2959                         be_cmd_pmac_del(adapter, adapter->if_handle,
2960                                         adapter->pmac_id[i], 0);
2961                 adapter->uc_macs = 0;
2962
2963                 kfree(adapter->pmac_id);
2964                 adapter->pmac_id = NULL;
2965         }
2966 }
2967
2968 static int be_clear(struct be_adapter *adapter)
2969 {
2970         be_cancel_worker(adapter);
2971
2972         if (sriov_enabled(adapter))
2973                 be_vf_clear(adapter);
2974
2975         /* delete the primary mac along with the uc-mac list */
2976         be_mac_clear(adapter);
2977
2978         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2979
2980         be_clear_queues(adapter);
2981
2982         be_msix_disable(adapter);
2983         return 0;
2984 }
2985
2986 static int be_vfs_if_create(struct be_adapter *adapter)
2987 {
2988         struct be_resources res = {0};
2989         struct be_vf_cfg *vf_cfg;
2990         u32 cap_flags, en_flags, vf;
2991         int status = 0;
2992
2993         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2994                     BE_IF_FLAGS_MULTICAST;
2995
2996         for_all_vfs(adapter, vf_cfg, vf) {
2997                 if (!BE3_chip(adapter)) {
2998                         status = be_cmd_get_profile_config(adapter, &res,
2999                                                            vf + 1);
3000                         if (!status)
3001                                 cap_flags = res.if_cap_flags;
3002                 }
3003
3004                 /* If a FW profile exists, then cap_flags are updated */
3005                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3006                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3007                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3008                                           &vf_cfg->if_handle, vf + 1);
3009                 if (status)
3010                         goto err;
3011         }
3012 err:
3013         return status;
3014 }
3015
3016 static int be_vf_setup_init(struct be_adapter *adapter)
3017 {
3018         struct be_vf_cfg *vf_cfg;
3019         int vf;
3020
3021         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3022                                   GFP_KERNEL);
3023         if (!adapter->vf_cfg)
3024                 return -ENOMEM;
3025
3026         for_all_vfs(adapter, vf_cfg, vf) {
3027                 vf_cfg->if_handle = -1;
3028                 vf_cfg->pmac_id = -1;
3029         }
3030         return 0;
3031 }
3032
3033 static int be_vf_setup(struct be_adapter *adapter)
3034 {
3035         struct be_vf_cfg *vf_cfg;
3036         u16 def_vlan, lnk_speed;
3037         int status, old_vfs, vf;
3038         struct device *dev = &adapter->pdev->dev;
3039         u32 privileges;
3040
3041         old_vfs = pci_num_vf(adapter->pdev);
3042         if (old_vfs) {
3043                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3044                 if (old_vfs != num_vfs)
3045                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3046                 adapter->num_vfs = old_vfs;
3047         } else {
3048                 if (num_vfs > be_max_vfs(adapter))
3049                         dev_info(dev, "Device supports %d VFs and not %d\n",
3050                                  be_max_vfs(adapter), num_vfs);
3051                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3052                 if (!adapter->num_vfs)
3053                         return 0;
3054         }
3055
3056         status = be_vf_setup_init(adapter);
3057         if (status)
3058                 goto err;
3059
3060         if (old_vfs) {
3061                 for_all_vfs(adapter, vf_cfg, vf) {
3062                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3063                         if (status)
3064                                 goto err;
3065                 }
3066         } else {
3067                 status = be_vfs_if_create(adapter);
3068                 if (status)
3069                         goto err;
3070         }
3071
3072         if (old_vfs) {
3073                 status = be_vfs_mac_query(adapter);
3074                 if (status)
3075                         goto err;
3076         } else {
3077                 status = be_vf_eth_addr_config(adapter);
3078                 if (status)
3079                         goto err;
3080         }
3081
3082         for_all_vfs(adapter, vf_cfg, vf) {
3083                 /* Allow VFs to programs MAC/VLAN filters */
3084                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3085                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3086                         status = be_cmd_set_fn_privileges(adapter,
3087                                                           privileges |
3088                                                           BE_PRIV_FILTMGMT,
3089                                                           vf + 1);
3090                         if (!status)
3091                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3092                                          vf);
3093                 }
3094
3095                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3096                  * Allow full available bandwidth
3097                  */
3098                 if (BE3_chip(adapter) && !old_vfs)
3099                         be_cmd_set_qos(adapter, 1000, vf+1);
3100
3101                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3102                                                   NULL, vf + 1);
3103                 if (!status)
3104                         vf_cfg->tx_rate = lnk_speed;
3105
3106                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3107                                                vf + 1, vf_cfg->if_handle, NULL);
3108                 if (status)
3109                         goto err;
3110                 vf_cfg->def_vid = def_vlan;
3111
3112                 if (!old_vfs)
3113                         be_cmd_enable_vf(adapter, vf + 1);
3114         }
3115
3116         if (!old_vfs) {
3117                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3118                 if (status) {
3119                         dev_err(dev, "SRIOV enable failed\n");
3120                         adapter->num_vfs = 0;
3121                         goto err;
3122                 }
3123         }
3124         return 0;
3125 err:
3126         dev_err(dev, "VF setup failed\n");
3127         be_vf_clear(adapter);
3128         return status;
3129 }
3130
3131 /* On BE2/BE3 FW does not suggest the supported limits */
3132 static void BEx_get_resources(struct be_adapter *adapter,
3133                               struct be_resources *res)
3134 {
3135         struct pci_dev *pdev = adapter->pdev;
3136         bool use_sriov = false;
3137         int max_vfs;
3138
3139         max_vfs = pci_sriov_get_totalvfs(pdev);
3140
3141         if (BE3_chip(adapter) && sriov_want(adapter)) {
3142                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3143                 use_sriov = res->max_vfs;
3144         }
3145
3146         if (be_physfn(adapter))
3147                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3148         else
3149                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3150
3151         if (adapter->function_mode & FLEX10_MODE)
3152                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3153         else if (adapter->function_mode & UMC_ENABLED)
3154                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3155         else
3156                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3157         res->max_mcast_mac = BE_MAX_MC;
3158
3159         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3160         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3161             !be_physfn(adapter) || (adapter->port_num > 1))
3162                 res->max_tx_qs = 1;
3163         else
3164                 res->max_tx_qs = BE3_MAX_TX_QS;
3165
3166         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3167             !use_sriov && be_physfn(adapter))
3168                 res->max_rss_qs = (adapter->be3_native) ?
3169                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3170         res->max_rx_qs = res->max_rss_qs + 1;
3171
3172         if (be_physfn(adapter))
3173                 res->max_evt_qs = (max_vfs > 0) ?
3174                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3175         else
3176                 res->max_evt_qs = 1;
3177
3178         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3179         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3180                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3181 }
3182
3183 static void be_setup_init(struct be_adapter *adapter)
3184 {
3185         adapter->vlan_prio_bmap = 0xff;
3186         adapter->phy.link_speed = -1;
3187         adapter->if_handle = -1;
3188         adapter->be3_native = false;
3189         adapter->promiscuous = false;
3190         if (be_physfn(adapter))
3191                 adapter->cmd_privileges = MAX_PRIVILEGES;
3192         else
3193                 adapter->cmd_privileges = MIN_PRIVILEGES;
3194 }
3195
3196 static int be_get_resources(struct be_adapter *adapter)
3197 {
3198         struct device *dev = &adapter->pdev->dev;
3199         struct be_resources res = {0};
3200         int status;
3201
3202         if (BEx_chip(adapter)) {
3203                 BEx_get_resources(adapter, &res);
3204                 adapter->res = res;
3205         }
3206
3207         /* For Lancer, SH etc read per-function resource limits from FW.
3208          * GET_FUNC_CONFIG returns per function guaranteed limits.
3209          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3210          */
3211         if (!BEx_chip(adapter)) {
3212                 status = be_cmd_get_func_config(adapter, &res);
3213                 if (status)
3214                         return status;
3215
3216                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3217                 if (be_roce_supported(adapter))
3218                         res.max_evt_qs /= 2;
3219                 adapter->res = res;
3220
3221                 if (be_physfn(adapter)) {
3222                         status = be_cmd_get_profile_config(adapter, &res, 0);
3223                         if (status)
3224                                 return status;
3225                         adapter->res.max_vfs = res.max_vfs;
3226                 }
3227
3228                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3229                          be_max_txqs(adapter), be_max_rxqs(adapter),
3230                          be_max_rss(adapter), be_max_eqs(adapter),
3231                          be_max_vfs(adapter));
3232                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3233                          be_max_uc(adapter), be_max_mc(adapter),
3234                          be_max_vlans(adapter));
3235         }
3236
3237         return 0;
3238 }
3239
3240 /* Routine to query per function resource limits */
3241 static int be_get_config(struct be_adapter *adapter)
3242 {
3243         int status;
3244
3245         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3246                                      &adapter->function_mode,
3247                                      &adapter->function_caps,
3248                                      &adapter->asic_rev);
3249         if (status)
3250                 return status;
3251
3252         status = be_get_resources(adapter);
3253         if (status)
3254                 return status;
3255
3256         /* primary mac needs 1 pmac entry */
3257         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3258                                    GFP_KERNEL);
3259         if (!adapter->pmac_id)
3260                 return -ENOMEM;
3261
3262         /* Sanitize cfg_num_qs based on HW and platform limits */
3263         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3264
3265         return 0;
3266 }
3267
3268 static int be_mac_setup(struct be_adapter *adapter)
3269 {
3270         u8 mac[ETH_ALEN];
3271         int status;
3272
3273         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3274                 status = be_cmd_get_perm_mac(adapter, mac);
3275                 if (status)
3276                         return status;
3277
3278                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3279                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3280         } else {
3281                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3282                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3283         }
3284
3285         /* For BE3-R VFs, the PF programs the initial MAC address */
3286         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3287                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3288                                 &adapter->pmac_id[0], 0);
3289         return 0;
3290 }
3291
3292 static void be_schedule_worker(struct be_adapter *adapter)
3293 {
3294         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3295         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3296 }
3297
3298 static int be_setup_queues(struct be_adapter *adapter)
3299 {
3300         struct net_device *netdev = adapter->netdev;
3301         int status;
3302
3303         status = be_evt_queues_create(adapter);
3304         if (status)
3305                 goto err;
3306
3307         status = be_tx_qs_create(adapter);
3308         if (status)
3309                 goto err;
3310
3311         status = be_rx_cqs_create(adapter);
3312         if (status)
3313                 goto err;
3314
3315         status = be_mcc_queues_create(adapter);
3316         if (status)
3317                 goto err;
3318
3319         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3320         if (status)
3321                 goto err;
3322
3323         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3324         if (status)
3325                 goto err;
3326
3327         return 0;
3328 err:
3329         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3330         return status;
3331 }
3332
3333 int be_update_queues(struct be_adapter *adapter)
3334 {
3335         struct net_device *netdev = adapter->netdev;
3336         int status;
3337
3338         if (netif_running(netdev))
3339                 be_close(netdev);
3340
3341         be_cancel_worker(adapter);
3342
3343         /* If any vectors have been shared with RoCE we cannot re-program
3344          * the MSIx table.
3345          */
3346         if (!adapter->num_msix_roce_vec)
3347                 be_msix_disable(adapter);
3348
3349         be_clear_queues(adapter);
3350
3351         if (!msix_enabled(adapter)) {
3352                 status = be_msix_enable(adapter);
3353                 if (status)
3354                         return status;
3355         }
3356
3357         status = be_setup_queues(adapter);
3358         if (status)
3359                 return status;
3360
3361         be_schedule_worker(adapter);
3362
3363         if (netif_running(netdev))
3364                 status = be_open(netdev);
3365
3366         return status;
3367 }
3368
3369 static int be_setup(struct be_adapter *adapter)
3370 {
3371         struct device *dev = &adapter->pdev->dev;
3372         u32 tx_fc, rx_fc, en_flags;
3373         int status;
3374
3375         be_setup_init(adapter);
3376
3377         if (!lancer_chip(adapter))
3378                 be_cmd_req_native_mode(adapter);
3379
3380         status = be_get_config(adapter);
3381         if (status)
3382                 goto err;
3383
3384         status = be_msix_enable(adapter);
3385         if (status)
3386                 goto err;
3387
3388         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3389                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3390         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3391                 en_flags |= BE_IF_FLAGS_RSS;
3392         en_flags = en_flags & be_if_cap_flags(adapter);
3393         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3394                                   &adapter->if_handle, 0);
3395         if (status)
3396                 goto err;
3397
3398         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3399         rtnl_lock();
3400         status = be_setup_queues(adapter);
3401         rtnl_unlock();
3402         if (status)
3403                 goto err;
3404
3405         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3406         /* In UMC mode FW does not return right privileges.
3407          * Override with correct privilege equivalent to PF.
3408          */
3409         if (be_is_mc(adapter))
3410                 adapter->cmd_privileges = MAX_PRIVILEGES;
3411
3412         status = be_mac_setup(adapter);
3413         if (status)
3414                 goto err;
3415
3416         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3417
3418         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3419                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3420                         adapter->fw_ver);
3421                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3422         }
3423
3424         if (adapter->vlans_added)
3425                 be_vid_config(adapter);
3426
3427         be_set_rx_mode(adapter->netdev);
3428
3429         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3430
3431         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3432                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3433                                         adapter->rx_fc);
3434
3435         if (sriov_want(adapter)) {
3436                 if (be_max_vfs(adapter))
3437                         be_vf_setup(adapter);
3438                 else
3439                         dev_warn(dev, "device doesn't support SRIOV\n");
3440         }
3441
3442         status = be_cmd_get_phy_info(adapter);
3443         if (!status && be_pause_supported(adapter))
3444                 adapter->phy.fc_autoneg = 1;
3445
3446         be_schedule_worker(adapter);
3447         return 0;
3448 err:
3449         be_clear(adapter);
3450         return status;
3451 }
3452
3453 #ifdef CONFIG_NET_POLL_CONTROLLER
3454 static void be_netpoll(struct net_device *netdev)
3455 {
3456         struct be_adapter *adapter = netdev_priv(netdev);
3457         struct be_eq_obj *eqo;
3458         int i;
3459
3460         for_all_evt_queues(adapter, eqo, i) {
3461                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3462                 napi_schedule(&eqo->napi);
3463         }
3464
3465         return;
3466 }
3467 #endif
3468
3469 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3470 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3471
3472 static bool be_flash_redboot(struct be_adapter *adapter,
3473                         const u8 *p, u32 img_start, int image_size,
3474                         int hdr_size)
3475 {
3476         u32 crc_offset;
3477         u8 flashed_crc[4];
3478         int status;
3479
3480         crc_offset = hdr_size + img_start + image_size - 4;
3481
3482         p += crc_offset;
3483
3484         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3485                         (image_size - 4));
3486         if (status) {
3487                 dev_err(&adapter->pdev->dev,
3488                 "could not get crc from flash, not flashing redboot\n");
3489                 return false;
3490         }
3491
3492         /*update redboot only if crc does not match*/
3493         if (!memcmp(flashed_crc, p, 4))
3494                 return false;
3495         else
3496                 return true;
3497 }
3498
3499 static bool phy_flashing_required(struct be_adapter *adapter)
3500 {
3501         return (adapter->phy.phy_type == TN_8022 &&
3502                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3503 }
3504
3505 static bool is_comp_in_ufi(struct be_adapter *adapter,
3506                            struct flash_section_info *fsec, int type)
3507 {
3508         int i = 0, img_type = 0;
3509         struct flash_section_info_g2 *fsec_g2 = NULL;
3510
3511         if (BE2_chip(adapter))
3512                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3513
3514         for (i = 0; i < MAX_FLASH_COMP; i++) {
3515                 if (fsec_g2)
3516                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3517                 else
3518                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3519
3520                 if (img_type == type)
3521                         return true;
3522         }
3523         return false;
3524
3525 }
3526
3527 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3528                                          int header_size,
3529                                          const struct firmware *fw)
3530 {
3531         struct flash_section_info *fsec = NULL;
3532         const u8 *p = fw->data;
3533
3534         p += header_size;
3535         while (p < (fw->data + fw->size)) {
3536                 fsec = (struct flash_section_info *)p;
3537                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3538                         return fsec;
3539                 p += 32;
3540         }
3541         return NULL;
3542 }
3543
3544 static int be_flash(struct be_adapter *adapter, const u8 *img,
3545                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3546 {
3547         u32 total_bytes = 0, flash_op, num_bytes = 0;
3548         int status = 0;
3549         struct be_cmd_write_flashrom *req = flash_cmd->va;
3550
3551         total_bytes = img_size;
3552         while (total_bytes) {
3553                 num_bytes = min_t(u32, 32*1024, total_bytes);
3554
3555                 total_bytes -= num_bytes;
3556
3557                 if (!total_bytes) {
3558                         if (optype == OPTYPE_PHY_FW)
3559                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3560                         else
3561                                 flash_op = FLASHROM_OPER_FLASH;
3562                 } else {
3563                         if (optype == OPTYPE_PHY_FW)
3564                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3565                         else
3566                                 flash_op = FLASHROM_OPER_SAVE;
3567                 }
3568
3569                 memcpy(req->data_buf, img, num_bytes);
3570                 img += num_bytes;
3571                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3572                                                 flash_op, num_bytes);
3573                 if (status) {
3574                         if (status == ILLEGAL_IOCTL_REQ &&
3575                             optype == OPTYPE_PHY_FW)
3576                                 break;
3577                         dev_err(&adapter->pdev->dev,
3578                                 "cmd to write to flash rom failed.\n");
3579                         return status;
3580                 }
3581         }
3582         return 0;
3583 }
3584
3585 /* For BE2, BE3 and BE3-R */
3586 static int be_flash_BEx(struct be_adapter *adapter,
3587                          const struct firmware *fw,
3588                          struct be_dma_mem *flash_cmd,
3589                          int num_of_images)
3590
3591 {
3592         int status = 0, i, filehdr_size = 0;
3593         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3594         const u8 *p = fw->data;
3595         const struct flash_comp *pflashcomp;
3596         int num_comp, redboot;
3597         struct flash_section_info *fsec = NULL;
3598
3599         struct flash_comp gen3_flash_types[] = {
3600                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3601                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3602                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3603                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3604                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3605                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3606                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3607                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3608                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3609                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3610                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3611                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3612                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3613                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3614                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3615                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3616                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3617                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3618                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3619                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3620         };
3621
3622         struct flash_comp gen2_flash_types[] = {
3623                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3624                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3625                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3626                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3627                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3628                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3629                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3630                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3631                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3632                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3633                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3634                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3635                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3636                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3637                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3638                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3639         };
3640
3641         if (BE3_chip(adapter)) {
3642                 pflashcomp = gen3_flash_types;
3643                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3644                 num_comp = ARRAY_SIZE(gen3_flash_types);
3645         } else {
3646                 pflashcomp = gen2_flash_types;
3647                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3648                 num_comp = ARRAY_SIZE(gen2_flash_types);
3649         }
3650
3651         /* Get flash section info*/
3652         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3653         if (!fsec) {
3654                 dev_err(&adapter->pdev->dev,
3655                         "Invalid Cookie. UFI corrupted ?\n");
3656                 return -1;
3657         }
3658         for (i = 0; i < num_comp; i++) {
3659                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3660                         continue;
3661
3662                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3663                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3664                         continue;
3665
3666                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3667                     !phy_flashing_required(adapter))
3668                                 continue;
3669
3670                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3671                         redboot = be_flash_redboot(adapter, fw->data,
3672                                 pflashcomp[i].offset, pflashcomp[i].size,
3673                                 filehdr_size + img_hdrs_size);
3674                         if (!redboot)
3675                                 continue;
3676                 }
3677
3678                 p = fw->data;
3679                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3680                 if (p + pflashcomp[i].size > fw->data + fw->size)
3681                         return -1;
3682
3683                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3684                                         pflashcomp[i].size);
3685                 if (status) {
3686                         dev_err(&adapter->pdev->dev,
3687                                 "Flashing section type %d failed.\n",
3688                                 pflashcomp[i].img_type);
3689                         return status;
3690                 }
3691         }
3692         return 0;
3693 }
3694
3695 static int be_flash_skyhawk(struct be_adapter *adapter,
3696                 const struct firmware *fw,
3697                 struct be_dma_mem *flash_cmd, int num_of_images)
3698 {
3699         int status = 0, i, filehdr_size = 0;
3700         int img_offset, img_size, img_optype, redboot;
3701         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3702         const u8 *p = fw->data;
3703         struct flash_section_info *fsec = NULL;
3704
3705         filehdr_size = sizeof(struct flash_file_hdr_g3);
3706         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3707         if (!fsec) {
3708                 dev_err(&adapter->pdev->dev,
3709                         "Invalid Cookie. UFI corrupted ?\n");
3710                 return -1;
3711         }
3712
3713         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3714                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3715                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3716
3717                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3718                 case IMAGE_FIRMWARE_iSCSI:
3719                         img_optype = OPTYPE_ISCSI_ACTIVE;
3720                         break;
3721                 case IMAGE_BOOT_CODE:
3722                         img_optype = OPTYPE_REDBOOT;
3723                         break;
3724                 case IMAGE_OPTION_ROM_ISCSI:
3725                         img_optype = OPTYPE_BIOS;
3726                         break;
3727                 case IMAGE_OPTION_ROM_PXE:
3728                         img_optype = OPTYPE_PXE_BIOS;
3729                         break;
3730                 case IMAGE_OPTION_ROM_FCoE:
3731                         img_optype = OPTYPE_FCOE_BIOS;
3732                         break;
3733                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3734                         img_optype = OPTYPE_ISCSI_BACKUP;
3735                         break;
3736                 case IMAGE_NCSI:
3737                         img_optype = OPTYPE_NCSI_FW;
3738                         break;
3739                 default:
3740                         continue;
3741                 }
3742
3743                 if (img_optype == OPTYPE_REDBOOT) {
3744                         redboot = be_flash_redboot(adapter, fw->data,
3745                                         img_offset, img_size,
3746                                         filehdr_size + img_hdrs_size);
3747                         if (!redboot)
3748                                 continue;
3749                 }
3750
3751                 p = fw->data;
3752                 p += filehdr_size + img_offset + img_hdrs_size;
3753                 if (p + img_size > fw->data + fw->size)
3754                         return -1;
3755
3756                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3757                 if (status) {
3758                         dev_err(&adapter->pdev->dev,
3759                                 "Flashing section type %d failed.\n",
3760                                 fsec->fsec_entry[i].type);
3761                         return status;
3762                 }
3763         }
3764         return 0;
3765 }
3766
3767 static int lancer_fw_download(struct be_adapter *adapter,
3768                                 const struct firmware *fw)
3769 {
3770 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3771 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3772         struct be_dma_mem flash_cmd;
3773         const u8 *data_ptr = NULL;
3774         u8 *dest_image_ptr = NULL;
3775         size_t image_size = 0;
3776         u32 chunk_size = 0;
3777         u32 data_written = 0;
3778         u32 offset = 0;
3779         int status = 0;
3780         u8 add_status = 0;
3781         u8 change_status;
3782
3783         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3784                 dev_err(&adapter->pdev->dev,
3785                         "FW Image not properly aligned. "
3786                         "Length must be 4 byte aligned.\n");
3787                 status = -EINVAL;
3788                 goto lancer_fw_exit;
3789         }
3790
3791         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3792                                 + LANCER_FW_DOWNLOAD_CHUNK;
3793         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3794                                           &flash_cmd.dma, GFP_KERNEL);
3795         if (!flash_cmd.va) {
3796                 status = -ENOMEM;
3797                 goto lancer_fw_exit;
3798         }
3799
3800         dest_image_ptr = flash_cmd.va +
3801                                 sizeof(struct lancer_cmd_req_write_object);
3802         image_size = fw->size;
3803         data_ptr = fw->data;
3804
3805         while (image_size) {
3806                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3807
3808                 /* Copy the image chunk content. */
3809                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3810
3811                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3812                                                  chunk_size, offset,
3813                                                  LANCER_FW_DOWNLOAD_LOCATION,
3814                                                  &data_written, &change_status,
3815                                                  &add_status);
3816                 if (status)
3817                         break;
3818
3819                 offset += data_written;
3820                 data_ptr += data_written;
3821                 image_size -= data_written;
3822         }
3823
3824         if (!status) {
3825                 /* Commit the FW written */
3826                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3827                                                  0, offset,
3828                                                  LANCER_FW_DOWNLOAD_LOCATION,
3829                                                  &data_written, &change_status,
3830                                                  &add_status);
3831         }
3832
3833         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3834                                 flash_cmd.dma);
3835         if (status) {
3836                 dev_err(&adapter->pdev->dev,
3837                         "Firmware load error. "
3838                         "Status code: 0x%x Additional Status: 0x%x\n",
3839                         status, add_status);
3840                 goto lancer_fw_exit;
3841         }
3842
3843         if (change_status == LANCER_FW_RESET_NEEDED) {
3844                 dev_info(&adapter->pdev->dev,
3845                          "Resetting adapter to activate new FW\n");
3846                 status = lancer_physdev_ctrl(adapter,
3847                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3848                 if (status) {
3849                         dev_err(&adapter->pdev->dev,
3850                                 "Adapter busy for FW reset.\n"
3851                                 "New FW will not be active.\n");
3852                         goto lancer_fw_exit;
3853                 }
3854         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3855                         dev_err(&adapter->pdev->dev,
3856                                 "System reboot required for new FW"
3857                                 " to be active\n");
3858         }
3859
3860         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3861 lancer_fw_exit:
3862         return status;
3863 }
3864
3865 #define UFI_TYPE2               2
3866 #define UFI_TYPE3               3
3867 #define UFI_TYPE3R              10
3868 #define UFI_TYPE4               4
3869 static int be_get_ufi_type(struct be_adapter *adapter,
3870                            struct flash_file_hdr_g3 *fhdr)
3871 {
3872         if (fhdr == NULL)
3873                 goto be_get_ufi_exit;
3874
3875         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3876                 return UFI_TYPE4;
3877         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3878                 if (fhdr->asic_type_rev == 0x10)
3879                         return UFI_TYPE3R;
3880                 else
3881                         return UFI_TYPE3;
3882         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3883                 return UFI_TYPE2;
3884
3885 be_get_ufi_exit:
3886         dev_err(&adapter->pdev->dev,
3887                 "UFI and Interface are not compatible for flashing\n");
3888         return -1;
3889 }
3890
3891 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3892 {
3893         struct flash_file_hdr_g3 *fhdr3;
3894         struct image_hdr *img_hdr_ptr = NULL;
3895         struct be_dma_mem flash_cmd;
3896         const u8 *p;
3897         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3898
3899         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3900         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3901                                           &flash_cmd.dma, GFP_KERNEL);
3902         if (!flash_cmd.va) {
3903                 status = -ENOMEM;
3904                 goto be_fw_exit;
3905         }
3906
3907         p = fw->data;
3908         fhdr3 = (struct flash_file_hdr_g3 *)p;
3909
3910         ufi_type = be_get_ufi_type(adapter, fhdr3);
3911
3912         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3913         for (i = 0; i < num_imgs; i++) {
3914                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3915                                 (sizeof(struct flash_file_hdr_g3) +
3916                                  i * sizeof(struct image_hdr)));
3917                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3918                         switch (ufi_type) {
3919                         case UFI_TYPE4:
3920                                 status = be_flash_skyhawk(adapter, fw,
3921                                                         &flash_cmd, num_imgs);
3922                                 break;
3923                         case UFI_TYPE3R:
3924                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3925                                                       num_imgs);
3926                                 break;
3927                         case UFI_TYPE3:
3928                                 /* Do not flash this ufi on BE3-R cards */
3929                                 if (adapter->asic_rev < 0x10)
3930                                         status = be_flash_BEx(adapter, fw,
3931                                                               &flash_cmd,
3932                                                               num_imgs);
3933                                 else {
3934                                         status = -1;
3935                                         dev_err(&adapter->pdev->dev,
3936                                                 "Can't load BE3 UFI on BE3R\n");
3937                                 }
3938                         }
3939                 }
3940         }
3941
3942         if (ufi_type == UFI_TYPE2)
3943                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3944         else if (ufi_type == -1)
3945                 status = -1;
3946
3947         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3948                           flash_cmd.dma);
3949         if (status) {
3950                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3951                 goto be_fw_exit;
3952         }
3953
3954         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3955
3956 be_fw_exit:
3957         return status;
3958 }
3959
3960 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3961 {
3962         const struct firmware *fw;
3963         int status;
3964
3965         if (!netif_running(adapter->netdev)) {
3966                 dev_err(&adapter->pdev->dev,
3967                         "Firmware load not allowed (interface is down)\n");
3968                 return -1;
3969         }
3970
3971         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3972         if (status)
3973                 goto fw_exit;
3974
3975         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3976
3977         if (lancer_chip(adapter))
3978                 status = lancer_fw_download(adapter, fw);
3979         else
3980                 status = be_fw_download(adapter, fw);
3981
3982         if (!status)
3983                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3984                                   adapter->fw_on_flash);
3985
3986 fw_exit:
3987         release_firmware(fw);
3988         return status;
3989 }
3990
3991 static int be_ndo_bridge_setlink(struct net_device *dev,
3992                                     struct nlmsghdr *nlh)
3993 {
3994         struct be_adapter *adapter = netdev_priv(dev);
3995         struct nlattr *attr, *br_spec;
3996         int rem;
3997         int status = 0;
3998         u16 mode = 0;
3999
4000         if (!sriov_enabled(adapter))
4001                 return -EOPNOTSUPP;
4002
4003         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4004
4005         nla_for_each_nested(attr, br_spec, rem) {
4006                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4007                         continue;
4008
4009                 mode = nla_get_u16(attr);
4010                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4011                         return -EINVAL;
4012
4013                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4014                                                adapter->if_handle,
4015                                                mode == BRIDGE_MODE_VEPA ?
4016                                                PORT_FWD_TYPE_VEPA :
4017                                                PORT_FWD_TYPE_VEB);
4018                 if (status)
4019                         goto err;
4020
4021                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4022                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4023
4024                 return status;
4025         }
4026 err:
4027         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4028                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4029
4030         return status;
4031 }
4032
4033 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4034                                     struct net_device *dev,
4035                                     u32 filter_mask)
4036 {
4037         struct be_adapter *adapter = netdev_priv(dev);
4038         int status = 0;
4039         u8 hsw_mode;
4040
4041         if (!sriov_enabled(adapter))
4042                 return 0;
4043
4044         /* BE and Lancer chips support VEB mode only */
4045         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4046                 hsw_mode = PORT_FWD_TYPE_VEB;
4047         } else {
4048                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4049                                                adapter->if_handle, &hsw_mode);
4050                 if (status)
4051                         return 0;
4052         }
4053
4054         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4055                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4056                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4057 }
4058
4059 static const struct net_device_ops be_netdev_ops = {
4060         .ndo_open               = be_open,
4061         .ndo_stop               = be_close,
4062         .ndo_start_xmit         = be_xmit,
4063         .ndo_set_rx_mode        = be_set_rx_mode,
4064         .ndo_set_mac_address    = be_mac_addr_set,
4065         .ndo_change_mtu         = be_change_mtu,
4066         .ndo_get_stats64        = be_get_stats64,
4067         .ndo_validate_addr      = eth_validate_addr,
4068         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4069         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4070         .ndo_set_vf_mac         = be_set_vf_mac,
4071         .ndo_set_vf_vlan        = be_set_vf_vlan,
4072         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4073         .ndo_get_vf_config      = be_get_vf_config,
4074 #ifdef CONFIG_NET_POLL_CONTROLLER
4075         .ndo_poll_controller    = be_netpoll,
4076 #endif
4077         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4078         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4079 #ifdef CONFIG_NET_RX_BUSY_POLL
4080         .ndo_busy_poll          = be_busy_poll
4081 #endif
4082 };
4083
4084 static void be_netdev_init(struct net_device *netdev)
4085 {
4086         struct be_adapter *adapter = netdev_priv(netdev);
4087
4088         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4089                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4090                 NETIF_F_HW_VLAN_CTAG_TX;
4091         if (be_multi_rxq(adapter))
4092                 netdev->hw_features |= NETIF_F_RXHASH;
4093
4094         netdev->features |= netdev->hw_features |
4095                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4096
4097         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4098                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4099
4100         netdev->priv_flags |= IFF_UNICAST_FLT;
4101
4102         netdev->flags |= IFF_MULTICAST;
4103
4104         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4105
4106         netdev->netdev_ops = &be_netdev_ops;
4107
4108         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4109 }
4110
4111 static void be_unmap_pci_bars(struct be_adapter *adapter)
4112 {
4113         if (adapter->csr)
4114                 pci_iounmap(adapter->pdev, adapter->csr);
4115         if (adapter->db)
4116                 pci_iounmap(adapter->pdev, adapter->db);
4117 }
4118
4119 static int db_bar(struct be_adapter *adapter)
4120 {
4121         if (lancer_chip(adapter) || !be_physfn(adapter))
4122                 return 0;
4123         else
4124                 return 4;
4125 }
4126
4127 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4128 {
4129         if (skyhawk_chip(adapter)) {
4130                 adapter->roce_db.size = 4096;
4131                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4132                                                               db_bar(adapter));
4133                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4134                                                                db_bar(adapter));
4135         }
4136         return 0;
4137 }
4138
4139 static int be_map_pci_bars(struct be_adapter *adapter)
4140 {
4141         u8 __iomem *addr;
4142
4143         if (BEx_chip(adapter) && be_physfn(adapter)) {
4144                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4145                 if (adapter->csr == NULL)
4146                         return -ENOMEM;
4147         }
4148
4149         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4150         if (addr == NULL)
4151                 goto pci_map_err;
4152         adapter->db = addr;
4153
4154         be_roce_map_pci_bars(adapter);
4155         return 0;
4156
4157 pci_map_err:
4158         be_unmap_pci_bars(adapter);
4159         return -ENOMEM;
4160 }
4161
4162 static void be_ctrl_cleanup(struct be_adapter *adapter)
4163 {
4164         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4165
4166         be_unmap_pci_bars(adapter);
4167
4168         if (mem->va)
4169                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4170                                   mem->dma);
4171
4172         mem = &adapter->rx_filter;
4173         if (mem->va)
4174                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4175                                   mem->dma);
4176 }
4177
4178 static int be_ctrl_init(struct be_adapter *adapter)
4179 {
4180         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4181         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4182         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4183         u32 sli_intf;
4184         int status;
4185
4186         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4187         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4188                                  SLI_INTF_FAMILY_SHIFT;
4189         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4190
4191         status = be_map_pci_bars(adapter);
4192         if (status)
4193                 goto done;
4194
4195         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4196         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4197                                                 mbox_mem_alloc->size,
4198                                                 &mbox_mem_alloc->dma,
4199                                                 GFP_KERNEL);
4200         if (!mbox_mem_alloc->va) {
4201                 status = -ENOMEM;
4202                 goto unmap_pci_bars;
4203         }
4204         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4205         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4206         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4207         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4208
4209         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4210         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4211                                             rx_filter->size, &rx_filter->dma,
4212                                             GFP_KERNEL);
4213         if (rx_filter->va == NULL) {
4214                 status = -ENOMEM;
4215                 goto free_mbox;
4216         }
4217
4218         mutex_init(&adapter->mbox_lock);
4219         spin_lock_init(&adapter->mcc_lock);
4220         spin_lock_init(&adapter->mcc_cq_lock);
4221
4222         init_completion(&adapter->et_cmd_compl);
4223         pci_save_state(adapter->pdev);
4224         return 0;
4225
4226 free_mbox:
4227         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4228                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4229
4230 unmap_pci_bars:
4231         be_unmap_pci_bars(adapter);
4232
4233 done:
4234         return status;
4235 }
4236
4237 static void be_stats_cleanup(struct be_adapter *adapter)
4238 {
4239         struct be_dma_mem *cmd = &adapter->stats_cmd;
4240
4241         if (cmd->va)
4242                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4243                                   cmd->va, cmd->dma);
4244 }
4245
4246 static int be_stats_init(struct be_adapter *adapter)
4247 {
4248         struct be_dma_mem *cmd = &adapter->stats_cmd;
4249
4250         if (lancer_chip(adapter))
4251                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4252         else if (BE2_chip(adapter))
4253                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4254         else if (BE3_chip(adapter))
4255                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4256         else
4257                 /* ALL non-BE ASICs */
4258                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4259
4260         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4261                                       GFP_KERNEL);
4262         if (cmd->va == NULL)
4263                 return -1;
4264         return 0;
4265 }
4266
4267 static void be_remove(struct pci_dev *pdev)
4268 {
4269         struct be_adapter *adapter = pci_get_drvdata(pdev);
4270
4271         if (!adapter)
4272                 return;
4273
4274         be_roce_dev_remove(adapter);
4275         be_intr_set(adapter, false);
4276
4277         cancel_delayed_work_sync(&adapter->func_recovery_work);
4278
4279         unregister_netdev(adapter->netdev);
4280
4281         be_clear(adapter);
4282
4283         /* tell fw we're done with firing cmds */
4284         be_cmd_fw_clean(adapter);
4285
4286         be_stats_cleanup(adapter);
4287
4288         be_ctrl_cleanup(adapter);
4289
4290         pci_disable_pcie_error_reporting(pdev);
4291
4292         pci_release_regions(pdev);
4293         pci_disable_device(pdev);
4294
4295         free_netdev(adapter->netdev);
4296 }
4297
4298 bool be_is_wol_supported(struct be_adapter *adapter)
4299 {
4300         return ((adapter->wol_cap & BE_WOL_CAP) &&
4301                 !be_is_wol_excluded(adapter)) ? true : false;
4302 }
4303
4304 u32 be_get_fw_log_level(struct be_adapter *adapter)
4305 {
4306         struct be_dma_mem extfat_cmd;
4307         struct be_fat_conf_params *cfgs;
4308         int status;
4309         u32 level = 0;
4310         int j;
4311
4312         if (lancer_chip(adapter))
4313                 return 0;
4314
4315         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4316         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4317         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4318                                              &extfat_cmd.dma);
4319
4320         if (!extfat_cmd.va) {
4321                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4322                         __func__);
4323                 goto err;
4324         }
4325
4326         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4327         if (!status) {
4328                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4329                                                 sizeof(struct be_cmd_resp_hdr));
4330                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4331                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4332                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4333                 }
4334         }
4335         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4336                             extfat_cmd.dma);
4337 err:
4338         return level;
4339 }
4340
4341 static int be_get_initial_config(struct be_adapter *adapter)
4342 {
4343         int status;
4344         u32 level;
4345
4346         status = be_cmd_get_cntl_attributes(adapter);
4347         if (status)
4348                 return status;
4349
4350         status = be_cmd_get_acpi_wol_cap(adapter);
4351         if (status) {
4352                 /* in case of a failure to get wol capabillities
4353                  * check the exclusion list to determine WOL capability */
4354                 if (!be_is_wol_excluded(adapter))
4355                         adapter->wol_cap |= BE_WOL_CAP;
4356         }
4357
4358         if (be_is_wol_supported(adapter))
4359                 adapter->wol = true;
4360
4361         /* Must be a power of 2 or else MODULO will BUG_ON */
4362         adapter->be_get_temp_freq = 64;
4363
4364         level = be_get_fw_log_level(adapter);
4365         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4366
4367         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4368         return 0;
4369 }
4370
4371 static int lancer_recover_func(struct be_adapter *adapter)
4372 {
4373         struct device *dev = &adapter->pdev->dev;
4374         int status;
4375
4376         status = lancer_test_and_set_rdy_state(adapter);
4377         if (status)
4378                 goto err;
4379
4380         if (netif_running(adapter->netdev))
4381                 be_close(adapter->netdev);
4382
4383         be_clear(adapter);
4384
4385         be_clear_all_error(adapter);
4386
4387         status = be_setup(adapter);
4388         if (status)
4389                 goto err;
4390
4391         if (netif_running(adapter->netdev)) {
4392                 status = be_open(adapter->netdev);
4393                 if (status)
4394                         goto err;
4395         }
4396
4397         dev_err(dev, "Adapter recovery successful\n");
4398         return 0;
4399 err:
4400         if (status == -EAGAIN)
4401                 dev_err(dev, "Waiting for resource provisioning\n");
4402         else
4403                 dev_err(dev, "Adapter recovery failed\n");
4404
4405         return status;
4406 }
4407
4408 static void be_func_recovery_task(struct work_struct *work)
4409 {
4410         struct be_adapter *adapter =
4411                 container_of(work, struct be_adapter,  func_recovery_work.work);
4412         int status = 0;
4413
4414         be_detect_error(adapter);
4415
4416         if (adapter->hw_error && lancer_chip(adapter)) {
4417
4418                 rtnl_lock();
4419                 netif_device_detach(adapter->netdev);
4420                 rtnl_unlock();
4421
4422                 status = lancer_recover_func(adapter);
4423                 if (!status)
4424                         netif_device_attach(adapter->netdev);
4425         }
4426
4427         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4428          * no need to attempt further recovery.
4429          */
4430         if (!status || status == -EAGAIN)
4431                 schedule_delayed_work(&adapter->func_recovery_work,
4432                                       msecs_to_jiffies(1000));
4433 }
4434
4435 static void be_worker(struct work_struct *work)
4436 {
4437         struct be_adapter *adapter =
4438                 container_of(work, struct be_adapter, work.work);
4439         struct be_rx_obj *rxo;
4440         int i;
4441
4442         /* when interrupts are not yet enabled, just reap any pending
4443         * mcc completions */
4444         if (!netif_running(adapter->netdev)) {
4445                 local_bh_disable();
4446                 be_process_mcc(adapter);
4447                 local_bh_enable();
4448                 goto reschedule;
4449         }
4450
4451         if (!adapter->stats_cmd_sent) {
4452                 if (lancer_chip(adapter))
4453                         lancer_cmd_get_pport_stats(adapter,
4454                                                 &adapter->stats_cmd);
4455                 else
4456                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4457         }
4458
4459         if (be_physfn(adapter) &&
4460             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4461                 be_cmd_get_die_temperature(adapter);
4462
4463         for_all_rx_queues(adapter, rxo, i) {
4464                 /* Replenish RX-queues starved due to memory
4465                  * allocation failures.
4466                  */
4467                 if (rxo->rx_post_starved)
4468                         be_post_rx_frags(rxo, GFP_KERNEL);
4469         }
4470
4471         be_eqd_update(adapter);
4472
4473 reschedule:
4474         adapter->work_counter++;
4475         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4476 }
4477
4478 /* If any VFs are already enabled don't FLR the PF */
4479 static bool be_reset_required(struct be_adapter *adapter)
4480 {
4481         return pci_num_vf(adapter->pdev) ? false : true;
4482 }
4483
4484 static char *mc_name(struct be_adapter *adapter)
4485 {
4486         if (adapter->function_mode & FLEX10_MODE)
4487                 return "FLEX10";
4488         else if (adapter->function_mode & VNIC_MODE)
4489                 return "vNIC";
4490         else if (adapter->function_mode & UMC_ENABLED)
4491                 return "UMC";
4492         else
4493                 return "";
4494 }
4495
4496 static inline char *func_name(struct be_adapter *adapter)
4497 {
4498         return be_physfn(adapter) ? "PF" : "VF";
4499 }
4500
4501 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4502 {
4503         int status = 0;
4504         struct be_adapter *adapter;
4505         struct net_device *netdev;
4506         char port_name;
4507
4508         status = pci_enable_device(pdev);
4509         if (status)
4510                 goto do_none;
4511
4512         status = pci_request_regions(pdev, DRV_NAME);
4513         if (status)
4514                 goto disable_dev;
4515         pci_set_master(pdev);
4516
4517         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4518         if (netdev == NULL) {
4519                 status = -ENOMEM;
4520                 goto rel_reg;
4521         }
4522         adapter = netdev_priv(netdev);
4523         adapter->pdev = pdev;
4524         pci_set_drvdata(pdev, adapter);
4525         adapter->netdev = netdev;
4526         SET_NETDEV_DEV(netdev, &pdev->dev);
4527
4528         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4529         if (!status) {
4530                 netdev->features |= NETIF_F_HIGHDMA;
4531         } else {
4532                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4533                 if (status) {
4534                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4535                         goto free_netdev;
4536                 }
4537         }
4538
4539         if (be_physfn(adapter)) {
4540                 status = pci_enable_pcie_error_reporting(pdev);
4541                 if (!status)
4542                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4543         }
4544
4545         status = be_ctrl_init(adapter);
4546         if (status)
4547                 goto free_netdev;
4548
4549         /* sync up with fw's ready state */
4550         if (be_physfn(adapter)) {
4551                 status = be_fw_wait_ready(adapter);
4552                 if (status)
4553                         goto ctrl_clean;
4554         }
4555
4556         if (be_reset_required(adapter)) {
4557                 status = be_cmd_reset_function(adapter);
4558                 if (status)
4559                         goto ctrl_clean;
4560
4561                 /* Wait for interrupts to quiesce after an FLR */
4562                 msleep(100);
4563         }
4564
4565         /* Allow interrupts for other ULPs running on NIC function */
4566         be_intr_set(adapter, true);
4567
4568         /* tell fw we're ready to fire cmds */
4569         status = be_cmd_fw_init(adapter);
4570         if (status)
4571                 goto ctrl_clean;
4572
4573         status = be_stats_init(adapter);
4574         if (status)
4575                 goto ctrl_clean;
4576
4577         status = be_get_initial_config(adapter);
4578         if (status)
4579                 goto stats_clean;
4580
4581         INIT_DELAYED_WORK(&adapter->work, be_worker);
4582         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4583         adapter->rx_fc = adapter->tx_fc = true;
4584
4585         status = be_setup(adapter);
4586         if (status)
4587                 goto stats_clean;
4588
4589         be_netdev_init(netdev);
4590         status = register_netdev(netdev);
4591         if (status != 0)
4592                 goto unsetup;
4593
4594         be_roce_dev_add(adapter);
4595
4596         schedule_delayed_work(&adapter->func_recovery_work,
4597                               msecs_to_jiffies(1000));
4598
4599         be_cmd_query_port_name(adapter, &port_name);
4600
4601         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4602                  func_name(adapter), mc_name(adapter), port_name);
4603
4604         return 0;
4605
4606 unsetup:
4607         be_clear(adapter);
4608 stats_clean:
4609         be_stats_cleanup(adapter);
4610 ctrl_clean:
4611         be_ctrl_cleanup(adapter);
4612 free_netdev:
4613         free_netdev(netdev);
4614 rel_reg:
4615         pci_release_regions(pdev);
4616 disable_dev:
4617         pci_disable_device(pdev);
4618 do_none:
4619         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4620         return status;
4621 }
4622
4623 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4624 {
4625         struct be_adapter *adapter = pci_get_drvdata(pdev);
4626         struct net_device *netdev =  adapter->netdev;
4627
4628         if (adapter->wol)
4629                 be_setup_wol(adapter, true);
4630
4631         be_intr_set(adapter, false);
4632         cancel_delayed_work_sync(&adapter->func_recovery_work);
4633
4634         netif_device_detach(netdev);
4635         if (netif_running(netdev)) {
4636                 rtnl_lock();
4637                 be_close(netdev);
4638                 rtnl_unlock();
4639         }
4640         be_clear(adapter);
4641
4642         pci_save_state(pdev);
4643         pci_disable_device(pdev);
4644         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4645         return 0;
4646 }
4647
4648 static int be_resume(struct pci_dev *pdev)
4649 {
4650         int status = 0;
4651         struct be_adapter *adapter = pci_get_drvdata(pdev);
4652         struct net_device *netdev =  adapter->netdev;
4653
4654         netif_device_detach(netdev);
4655
4656         status = pci_enable_device(pdev);
4657         if (status)
4658                 return status;
4659
4660         pci_set_power_state(pdev, PCI_D0);
4661         pci_restore_state(pdev);
4662
4663         status = be_fw_wait_ready(adapter);
4664         if (status)
4665                 return status;
4666
4667         be_intr_set(adapter, true);
4668         /* tell fw we're ready to fire cmds */
4669         status = be_cmd_fw_init(adapter);
4670         if (status)
4671                 return status;
4672
4673         be_setup(adapter);
4674         if (netif_running(netdev)) {
4675                 rtnl_lock();
4676                 be_open(netdev);
4677                 rtnl_unlock();
4678         }
4679
4680         schedule_delayed_work(&adapter->func_recovery_work,
4681                               msecs_to_jiffies(1000));
4682         netif_device_attach(netdev);
4683
4684         if (adapter->wol)
4685                 be_setup_wol(adapter, false);
4686
4687         return 0;
4688 }
4689
4690 /*
4691  * An FLR will stop BE from DMAing any data.
4692  */
4693 static void be_shutdown(struct pci_dev *pdev)
4694 {
4695         struct be_adapter *adapter = pci_get_drvdata(pdev);
4696
4697         if (!adapter)
4698                 return;
4699
4700         cancel_delayed_work_sync(&adapter->work);
4701         cancel_delayed_work_sync(&adapter->func_recovery_work);
4702
4703         netif_device_detach(adapter->netdev);
4704
4705         be_cmd_reset_function(adapter);
4706
4707         pci_disable_device(pdev);
4708 }
4709
4710 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4711                                 pci_channel_state_t state)
4712 {
4713         struct be_adapter *adapter = pci_get_drvdata(pdev);
4714         struct net_device *netdev =  adapter->netdev;
4715
4716         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4717
4718         if (!adapter->eeh_error) {
4719                 adapter->eeh_error = true;
4720
4721                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4722
4723                 rtnl_lock();
4724                 netif_device_detach(netdev);
4725                 if (netif_running(netdev))
4726                         be_close(netdev);
4727                 rtnl_unlock();
4728
4729                 be_clear(adapter);
4730         }
4731
4732         if (state == pci_channel_io_perm_failure)
4733                 return PCI_ERS_RESULT_DISCONNECT;
4734
4735         pci_disable_device(pdev);
4736
4737         /* The error could cause the FW to trigger a flash debug dump.
4738          * Resetting the card while flash dump is in progress
4739          * can cause it not to recover; wait for it to finish.
4740          * Wait only for first function as it is needed only once per
4741          * adapter.
4742          */
4743         if (pdev->devfn == 0)
4744                 ssleep(30);
4745
4746         return PCI_ERS_RESULT_NEED_RESET;
4747 }
4748
4749 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4750 {
4751         struct be_adapter *adapter = pci_get_drvdata(pdev);
4752         int status;
4753
4754         dev_info(&adapter->pdev->dev, "EEH reset\n");
4755
4756         status = pci_enable_device(pdev);
4757         if (status)
4758                 return PCI_ERS_RESULT_DISCONNECT;
4759
4760         pci_set_master(pdev);
4761         pci_set_power_state(pdev, PCI_D0);
4762         pci_restore_state(pdev);
4763
4764         /* Check if card is ok and fw is ready */
4765         dev_info(&adapter->pdev->dev,
4766                  "Waiting for FW to be ready after EEH reset\n");
4767         status = be_fw_wait_ready(adapter);
4768         if (status)
4769                 return PCI_ERS_RESULT_DISCONNECT;
4770
4771         pci_cleanup_aer_uncorrect_error_status(pdev);
4772         be_clear_all_error(adapter);
4773         return PCI_ERS_RESULT_RECOVERED;
4774 }
4775
4776 static void be_eeh_resume(struct pci_dev *pdev)
4777 {
4778         int status = 0;
4779         struct be_adapter *adapter = pci_get_drvdata(pdev);
4780         struct net_device *netdev =  adapter->netdev;
4781
4782         dev_info(&adapter->pdev->dev, "EEH resume\n");
4783
4784         pci_save_state(pdev);
4785
4786         status = be_cmd_reset_function(adapter);
4787         if (status)
4788                 goto err;
4789
4790         /* tell fw we're ready to fire cmds */
4791         status = be_cmd_fw_init(adapter);
4792         if (status)
4793                 goto err;
4794
4795         status = be_setup(adapter);
4796         if (status)
4797                 goto err;
4798
4799         if (netif_running(netdev)) {
4800                 status = be_open(netdev);
4801                 if (status)
4802                         goto err;
4803         }
4804
4805         schedule_delayed_work(&adapter->func_recovery_work,
4806                               msecs_to_jiffies(1000));
4807         netif_device_attach(netdev);
4808         return;
4809 err:
4810         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4811 }
4812
4813 static const struct pci_error_handlers be_eeh_handlers = {
4814         .error_detected = be_eeh_err_detected,
4815         .slot_reset = be_eeh_reset,
4816         .resume = be_eeh_resume,
4817 };
4818
4819 static struct pci_driver be_driver = {
4820         .name = DRV_NAME,
4821         .id_table = be_dev_ids,
4822         .probe = be_probe,
4823         .remove = be_remove,
4824         .suspend = be_suspend,
4825         .resume = be_resume,
4826         .shutdown = be_shutdown,
4827         .err_handler = &be_eeh_handlers
4828 };
4829
4830 static int __init be_init_module(void)
4831 {
4832         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4833             rx_frag_size != 2048) {
4834                 printk(KERN_WARNING DRV_NAME
4835                         " : Module param rx_frag_size must be 2048/4096/8192."
4836                         " Using 2048\n");
4837                 rx_frag_size = 2048;
4838         }
4839
4840         return pci_register_driver(&be_driver);
4841 }
4842 module_init(be_init_module);
4843
4844 static void __exit be_exit_module(void)
4845 {
4846         pci_unregister_driver(&be_driver);
4847 }
4848 module_exit(be_exit_module);