be2net: take care of __vlan_put_tag return value
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631         return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637         u16 vlan_tag;
638
639         memset(hdr, 0, sizeof(*hdr));
640
641         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643         if (skb_is_gso(skb)) {
644                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646                         hdr, skb_shinfo(skb)->gso_size);
647                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 const struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = skb_frag_dma_map(dev, frag, 0,
718                                            skb_frag_size(frag), DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += skb_frag_size(frag);
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752                                              struct sk_buff *skb)
753 {
754         u16 vlan_tag = 0;
755
756         skb = skb_share_check(skb, GFP_ATOMIC);
757         if (unlikely(!skb))
758                 return skb;
759
760         if (vlan_tx_tag_present(skb)) {
761                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762                 skb = __vlan_put_tag(skb, vlan_tag);
763                 if (skb)
764                         skb->vlan_tci = 0;
765         }
766
767         return skb;
768 }
769
770 static netdev_tx_t be_xmit(struct sk_buff *skb,
771                         struct net_device *netdev)
772 {
773         struct be_adapter *adapter = netdev_priv(netdev);
774         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
775         struct be_queue_info *txq = &txo->q;
776         struct iphdr *ip = NULL;
777         u32 wrb_cnt = 0, copied = 0;
778         u32 start = txq->head, eth_hdr_len;
779         bool dummy_wrb, stopped = false;
780
781         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
782                 VLAN_ETH_HLEN : ETH_HLEN;
783
784         /* HW has a bug which considers padding bytes as legal
785          * and modifies the IPv4 hdr's 'tot_len' field
786          */
787         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
788                         is_ipv4_pkt(skb)) {
789                 ip = (struct iphdr *)ip_hdr(skb);
790                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
791         }
792
793         /* HW has a bug wherein it will calculate CSUM for VLAN
794          * pkts even though it is disabled.
795          * Manually insert VLAN in pkt.
796          */
797         if (skb->ip_summed != CHECKSUM_PARTIAL &&
798                         be_vlan_tag_chk(adapter, skb)) {
799                 skb = be_insert_vlan_in_pkt(adapter, skb);
800                 if (unlikely(!skb))
801                         goto tx_drop;
802         }
803
804         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
805
806         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
807         if (copied) {
808                 int gso_segs = skb_shinfo(skb)->gso_segs;
809
810                 /* record the sent skb in the sent_skb table */
811                 BUG_ON(txo->sent_skb_list[start]);
812                 txo->sent_skb_list[start] = skb;
813
814                 /* Ensure txq has space for the next skb; Else stop the queue
815                  * *BEFORE* ringing the tx doorbell, so that we serialze the
816                  * tx compls of the current transmit which'll wake up the queue
817                  */
818                 atomic_add(wrb_cnt, &txq->used);
819                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
820                                                                 txq->len) {
821                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
822                         stopped = true;
823                 }
824
825                 be_txq_notify(adapter, txq->id, wrb_cnt);
826
827                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
828         } else {
829                 txq->head = start;
830                 dev_kfree_skb_any(skb);
831         }
832 tx_drop:
833         return NETDEV_TX_OK;
834 }
835
836 static int be_change_mtu(struct net_device *netdev, int new_mtu)
837 {
838         struct be_adapter *adapter = netdev_priv(netdev);
839         if (new_mtu < BE_MIN_MTU ||
840                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
841                                         (ETH_HLEN + ETH_FCS_LEN))) {
842                 dev_info(&adapter->pdev->dev,
843                         "MTU must be between %d and %d bytes\n",
844                         BE_MIN_MTU,
845                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
846                 return -EINVAL;
847         }
848         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
849                         netdev->mtu, new_mtu);
850         netdev->mtu = new_mtu;
851         return 0;
852 }
853
854 /*
855  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
856  * If the user configures more, place BE in vlan promiscuous mode.
857  */
858 static int be_vid_config(struct be_adapter *adapter)
859 {
860         u16 vids[BE_NUM_VLANS_SUPPORTED];
861         u16 num = 0, i;
862         int status = 0;
863
864         /* No need to further configure vids if in promiscuous mode */
865         if (adapter->promiscuous)
866                 return 0;
867
868         if (adapter->vlans_added > adapter->max_vlans)
869                 goto set_vlan_promisc;
870
871         /* Construct VLAN Table to give to HW */
872         for (i = 0; i < VLAN_N_VID; i++)
873                 if (adapter->vlan_tag[i])
874                         vids[num++] = cpu_to_le16(i);
875
876         status = be_cmd_vlan_config(adapter, adapter->if_handle,
877                                     vids, num, 1, 0);
878
879         /* Set to VLAN promisc mode as setting VLAN filter failed */
880         if (status) {
881                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
882                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
883                 goto set_vlan_promisc;
884         }
885
886         return status;
887
888 set_vlan_promisc:
889         status = be_cmd_vlan_config(adapter, adapter->if_handle,
890                                     NULL, 0, 1, 1);
891         return status;
892 }
893
894 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
895 {
896         struct be_adapter *adapter = netdev_priv(netdev);
897         int status = 0;
898
899         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
900                 status = -EINVAL;
901                 goto ret;
902         }
903
904         /* Packets with VID 0 are always received by Lancer by default */
905         if (lancer_chip(adapter) && vid == 0)
906                 goto ret;
907
908         adapter->vlan_tag[vid] = 1;
909         if (adapter->vlans_added <= (adapter->max_vlans + 1))
910                 status = be_vid_config(adapter);
911
912         if (!status)
913                 adapter->vlans_added++;
914         else
915                 adapter->vlan_tag[vid] = 0;
916 ret:
917         return status;
918 }
919
920 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
921 {
922         struct be_adapter *adapter = netdev_priv(netdev);
923         int status = 0;
924
925         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
926                 status = -EINVAL;
927                 goto ret;
928         }
929
930         /* Packets with VID 0 are always received by Lancer by default */
931         if (lancer_chip(adapter) && vid == 0)
932                 goto ret;
933
934         adapter->vlan_tag[vid] = 0;
935         if (adapter->vlans_added <= adapter->max_vlans)
936                 status = be_vid_config(adapter);
937
938         if (!status)
939                 adapter->vlans_added--;
940         else
941                 adapter->vlan_tag[vid] = 1;
942 ret:
943         return status;
944 }
945
946 static void be_set_rx_mode(struct net_device *netdev)
947 {
948         struct be_adapter *adapter = netdev_priv(netdev);
949         int status;
950
951         if (netdev->flags & IFF_PROMISC) {
952                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
953                 adapter->promiscuous = true;
954                 goto done;
955         }
956
957         /* BE was previously in promiscuous mode; disable it */
958         if (adapter->promiscuous) {
959                 adapter->promiscuous = false;
960                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
961
962                 if (adapter->vlans_added)
963                         be_vid_config(adapter);
964         }
965
966         /* Enable multicast promisc if num configured exceeds what we support */
967         if (netdev->flags & IFF_ALLMULTI ||
968             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
969                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
970                 goto done;
971         }
972
973         if (netdev_uc_count(netdev) != adapter->uc_macs) {
974                 struct netdev_hw_addr *ha;
975                 int i = 1; /* First slot is claimed by the Primary MAC */
976
977                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
978                         be_cmd_pmac_del(adapter, adapter->if_handle,
979                                         adapter->pmac_id[i], 0);
980                 }
981
982                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
983                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
984                         adapter->promiscuous = true;
985                         goto done;
986                 }
987
988                 netdev_for_each_uc_addr(ha, adapter->netdev) {
989                         adapter->uc_macs++; /* First slot is for Primary MAC */
990                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
991                                         adapter->if_handle,
992                                         &adapter->pmac_id[adapter->uc_macs], 0);
993                 }
994         }
995
996         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
997
998         /* Set to MCAST promisc mode if setting MULTICAST address fails */
999         if (status) {
1000                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1001                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1002                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1003         }
1004 done:
1005         return;
1006 }
1007
1008 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1009 {
1010         struct be_adapter *adapter = netdev_priv(netdev);
1011         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1012         int status;
1013         bool active_mac = false;
1014         u32 pmac_id;
1015         u8 old_mac[ETH_ALEN];
1016
1017         if (!sriov_enabled(adapter))
1018                 return -EPERM;
1019
1020         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1021                 return -EINVAL;
1022
1023         if (lancer_chip(adapter)) {
1024                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1025                                                   &pmac_id, vf + 1);
1026                 if (!status && active_mac)
1027                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1028                                         pmac_id, vf + 1);
1029
1030                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1031         } else {
1032                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1033                                          vf_cfg->pmac_id, vf + 1);
1034
1035                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1036                                          &vf_cfg->pmac_id, vf + 1);
1037         }
1038
1039         if (status)
1040                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1041                                 mac, vf);
1042         else
1043                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1044
1045         return status;
1046 }
1047
1048 static int be_get_vf_config(struct net_device *netdev, int vf,
1049                         struct ifla_vf_info *vi)
1050 {
1051         struct be_adapter *adapter = netdev_priv(netdev);
1052         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1053
1054         if (!sriov_enabled(adapter))
1055                 return -EPERM;
1056
1057         if (vf >= adapter->num_vfs)
1058                 return -EINVAL;
1059
1060         vi->vf = vf;
1061         vi->tx_rate = vf_cfg->tx_rate;
1062         vi->vlan = vf_cfg->vlan_tag;
1063         vi->qos = 0;
1064         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1065
1066         return 0;
1067 }
1068
1069 static int be_set_vf_vlan(struct net_device *netdev,
1070                         int vf, u16 vlan, u8 qos)
1071 {
1072         struct be_adapter *adapter = netdev_priv(netdev);
1073         int status = 0;
1074
1075         if (!sriov_enabled(adapter))
1076                 return -EPERM;
1077
1078         if (vf >= adapter->num_vfs || vlan > 4095)
1079                 return -EINVAL;
1080
1081         if (vlan) {
1082                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1083                         /* If this is new value, program it. Else skip. */
1084                         adapter->vf_cfg[vf].vlan_tag = vlan;
1085
1086                         status = be_cmd_set_hsw_config(adapter, vlan,
1087                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1088                 }
1089         } else {
1090                 /* Reset Transparent Vlan Tagging. */
1091                 adapter->vf_cfg[vf].vlan_tag = 0;
1092                 vlan = adapter->vf_cfg[vf].def_vid;
1093                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1094                         adapter->vf_cfg[vf].if_handle);
1095         }
1096
1097
1098         if (status)
1099                 dev_info(&adapter->pdev->dev,
1100                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1101         return status;
1102 }
1103
1104 static int be_set_vf_tx_rate(struct net_device *netdev,
1105                         int vf, int rate)
1106 {
1107         struct be_adapter *adapter = netdev_priv(netdev);
1108         int status = 0;
1109
1110         if (!sriov_enabled(adapter))
1111                 return -EPERM;
1112
1113         if (vf >= adapter->num_vfs)
1114                 return -EINVAL;
1115
1116         if (rate < 100 || rate > 10000) {
1117                 dev_err(&adapter->pdev->dev,
1118                         "tx rate must be between 100 and 10000 Mbps\n");
1119                 return -EINVAL;
1120         }
1121
1122         if (lancer_chip(adapter))
1123                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1124         else
1125                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1126
1127         if (status)
1128                 dev_err(&adapter->pdev->dev,
1129                                 "tx rate %d on VF %d failed\n", rate, vf);
1130         else
1131                 adapter->vf_cfg[vf].tx_rate = rate;
1132         return status;
1133 }
1134
1135 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1136 {
1137         struct pci_dev *dev, *pdev = adapter->pdev;
1138         int vfs = 0, assigned_vfs = 0, pos;
1139         u16 offset, stride;
1140
1141         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1142         if (!pos)
1143                 return 0;
1144         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1145         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1146
1147         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1148         while (dev) {
1149                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1150                         vfs++;
1151                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1152                                 assigned_vfs++;
1153                 }
1154                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1155         }
1156         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1157 }
1158
1159 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1160 {
1161         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1162         ulong now = jiffies;
1163         ulong delta = now - stats->rx_jiffies;
1164         u64 pkts;
1165         unsigned int start, eqd;
1166
1167         if (!eqo->enable_aic) {
1168                 eqd = eqo->eqd;
1169                 goto modify_eqd;
1170         }
1171
1172         if (eqo->idx >= adapter->num_rx_qs)
1173                 return;
1174
1175         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1176
1177         /* Wrapped around */
1178         if (time_before(now, stats->rx_jiffies)) {
1179                 stats->rx_jiffies = now;
1180                 return;
1181         }
1182
1183         /* Update once a second */
1184         if (delta < HZ)
1185                 return;
1186
1187         do {
1188                 start = u64_stats_fetch_begin_bh(&stats->sync);
1189                 pkts = stats->rx_pkts;
1190         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1191
1192         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1193         stats->rx_pkts_prev = pkts;
1194         stats->rx_jiffies = now;
1195         eqd = (stats->rx_pps / 110000) << 3;
1196         eqd = min(eqd, eqo->max_eqd);
1197         eqd = max(eqd, eqo->min_eqd);
1198         if (eqd < 10)
1199                 eqd = 0;
1200
1201 modify_eqd:
1202         if (eqd != eqo->cur_eqd) {
1203                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1204                 eqo->cur_eqd = eqd;
1205         }
1206 }
1207
1208 static void be_rx_stats_update(struct be_rx_obj *rxo,
1209                 struct be_rx_compl_info *rxcp)
1210 {
1211         struct be_rx_stats *stats = rx_stats(rxo);
1212
1213         u64_stats_update_begin(&stats->sync);
1214         stats->rx_compl++;
1215         stats->rx_bytes += rxcp->pkt_size;
1216         stats->rx_pkts++;
1217         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1218                 stats->rx_mcast_pkts++;
1219         if (rxcp->err)
1220                 stats->rx_compl_err++;
1221         u64_stats_update_end(&stats->sync);
1222 }
1223
1224 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1225 {
1226         /* L4 checksum is not reliable for non TCP/UDP packets.
1227          * Also ignore ipcksm for ipv6 pkts */
1228         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1229                                 (rxcp->ip_csum || rxcp->ipv6);
1230 }
1231
1232 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1233                                                 u16 frag_idx)
1234 {
1235         struct be_adapter *adapter = rxo->adapter;
1236         struct be_rx_page_info *rx_page_info;
1237         struct be_queue_info *rxq = &rxo->q;
1238
1239         rx_page_info = &rxo->page_info_tbl[frag_idx];
1240         BUG_ON(!rx_page_info->page);
1241
1242         if (rx_page_info->last_page_user) {
1243                 dma_unmap_page(&adapter->pdev->dev,
1244                                dma_unmap_addr(rx_page_info, bus),
1245                                adapter->big_page_size, DMA_FROM_DEVICE);
1246                 rx_page_info->last_page_user = false;
1247         }
1248
1249         atomic_dec(&rxq->used);
1250         return rx_page_info;
1251 }
1252
1253 /* Throwaway the data in the Rx completion */
1254 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1255                                 struct be_rx_compl_info *rxcp)
1256 {
1257         struct be_queue_info *rxq = &rxo->q;
1258         struct be_rx_page_info *page_info;
1259         u16 i, num_rcvd = rxcp->num_rcvd;
1260
1261         for (i = 0; i < num_rcvd; i++) {
1262                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1263                 put_page(page_info->page);
1264                 memset(page_info, 0, sizeof(*page_info));
1265                 index_inc(&rxcp->rxq_idx, rxq->len);
1266         }
1267 }
1268
1269 /*
1270  * skb_fill_rx_data forms a complete skb for an ether frame
1271  * indicated by rxcp.
1272  */
1273 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1274                              struct be_rx_compl_info *rxcp)
1275 {
1276         struct be_queue_info *rxq = &rxo->q;
1277         struct be_rx_page_info *page_info;
1278         u16 i, j;
1279         u16 hdr_len, curr_frag_len, remaining;
1280         u8 *start;
1281
1282         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1283         start = page_address(page_info->page) + page_info->page_offset;
1284         prefetch(start);
1285
1286         /* Copy data in the first descriptor of this completion */
1287         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1288
1289         skb->len = curr_frag_len;
1290         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1291                 memcpy(skb->data, start, curr_frag_len);
1292                 /* Complete packet has now been moved to data */
1293                 put_page(page_info->page);
1294                 skb->data_len = 0;
1295                 skb->tail += curr_frag_len;
1296         } else {
1297                 hdr_len = ETH_HLEN;
1298                 memcpy(skb->data, start, hdr_len);
1299                 skb_shinfo(skb)->nr_frags = 1;
1300                 skb_frag_set_page(skb, 0, page_info->page);
1301                 skb_shinfo(skb)->frags[0].page_offset =
1302                                         page_info->page_offset + hdr_len;
1303                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1304                 skb->data_len = curr_frag_len - hdr_len;
1305                 skb->truesize += rx_frag_size;
1306                 skb->tail += hdr_len;
1307         }
1308         page_info->page = NULL;
1309
1310         if (rxcp->pkt_size <= rx_frag_size) {
1311                 BUG_ON(rxcp->num_rcvd != 1);
1312                 return;
1313         }
1314
1315         /* More frags present for this completion */
1316         index_inc(&rxcp->rxq_idx, rxq->len);
1317         remaining = rxcp->pkt_size - curr_frag_len;
1318         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1319                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1320                 curr_frag_len = min(remaining, rx_frag_size);
1321
1322                 /* Coalesce all frags from the same physical page in one slot */
1323                 if (page_info->page_offset == 0) {
1324                         /* Fresh page */
1325                         j++;
1326                         skb_frag_set_page(skb, j, page_info->page);
1327                         skb_shinfo(skb)->frags[j].page_offset =
1328                                                         page_info->page_offset;
1329                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1330                         skb_shinfo(skb)->nr_frags++;
1331                 } else {
1332                         put_page(page_info->page);
1333                 }
1334
1335                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1336                 skb->len += curr_frag_len;
1337                 skb->data_len += curr_frag_len;
1338                 skb->truesize += rx_frag_size;
1339                 remaining -= curr_frag_len;
1340                 index_inc(&rxcp->rxq_idx, rxq->len);
1341                 page_info->page = NULL;
1342         }
1343         BUG_ON(j > MAX_SKB_FRAGS);
1344 }
1345
1346 /* Process the RX completion indicated by rxcp when GRO is disabled */
1347 static void be_rx_compl_process(struct be_rx_obj *rxo,
1348                                 struct be_rx_compl_info *rxcp)
1349 {
1350         struct be_adapter *adapter = rxo->adapter;
1351         struct net_device *netdev = adapter->netdev;
1352         struct sk_buff *skb;
1353
1354         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1355         if (unlikely(!skb)) {
1356                 rx_stats(rxo)->rx_drops_no_skbs++;
1357                 be_rx_compl_discard(rxo, rxcp);
1358                 return;
1359         }
1360
1361         skb_fill_rx_data(rxo, skb, rxcp);
1362
1363         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1364                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1365         else
1366                 skb_checksum_none_assert(skb);
1367
1368         skb->protocol = eth_type_trans(skb, netdev);
1369         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1370         if (netdev->features & NETIF_F_RXHASH)
1371                 skb->rxhash = rxcp->rss_hash;
1372
1373
1374         if (rxcp->vlanf)
1375                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1376
1377         netif_receive_skb(skb);
1378 }
1379
1380 /* Process the RX completion indicated by rxcp when GRO is enabled */
1381 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1382                              struct be_rx_compl_info *rxcp)
1383 {
1384         struct be_adapter *adapter = rxo->adapter;
1385         struct be_rx_page_info *page_info;
1386         struct sk_buff *skb = NULL;
1387         struct be_queue_info *rxq = &rxo->q;
1388         u16 remaining, curr_frag_len;
1389         u16 i, j;
1390
1391         skb = napi_get_frags(napi);
1392         if (!skb) {
1393                 be_rx_compl_discard(rxo, rxcp);
1394                 return;
1395         }
1396
1397         remaining = rxcp->pkt_size;
1398         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1399                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1400
1401                 curr_frag_len = min(remaining, rx_frag_size);
1402
1403                 /* Coalesce all frags from the same physical page in one slot */
1404                 if (i == 0 || page_info->page_offset == 0) {
1405                         /* First frag or Fresh page */
1406                         j++;
1407                         skb_frag_set_page(skb, j, page_info->page);
1408                         skb_shinfo(skb)->frags[j].page_offset =
1409                                                         page_info->page_offset;
1410                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1411                 } else {
1412                         put_page(page_info->page);
1413                 }
1414                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1415                 skb->truesize += rx_frag_size;
1416                 remaining -= curr_frag_len;
1417                 index_inc(&rxcp->rxq_idx, rxq->len);
1418                 memset(page_info, 0, sizeof(*page_info));
1419         }
1420         BUG_ON(j > MAX_SKB_FRAGS);
1421
1422         skb_shinfo(skb)->nr_frags = j + 1;
1423         skb->len = rxcp->pkt_size;
1424         skb->data_len = rxcp->pkt_size;
1425         skb->ip_summed = CHECKSUM_UNNECESSARY;
1426         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1427         if (adapter->netdev->features & NETIF_F_RXHASH)
1428                 skb->rxhash = rxcp->rss_hash;
1429
1430         if (rxcp->vlanf)
1431                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1432
1433         napi_gro_frags(napi);
1434 }
1435
1436 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1437                                  struct be_rx_compl_info *rxcp)
1438 {
1439         rxcp->pkt_size =
1440                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1441         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1442         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1443         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1444         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1445         rxcp->ip_csum =
1446                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1447         rxcp->l4_csum =
1448                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1449         rxcp->ipv6 =
1450                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1451         rxcp->rxq_idx =
1452                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1453         rxcp->num_rcvd =
1454                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1455         rxcp->pkt_type =
1456                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1457         rxcp->rss_hash =
1458                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1459         if (rxcp->vlanf) {
1460                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1461                                           compl);
1462                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1463                                                compl);
1464         }
1465         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1466 }
1467
1468 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1469                                  struct be_rx_compl_info *rxcp)
1470 {
1471         rxcp->pkt_size =
1472                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1473         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1474         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1475         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1476         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1477         rxcp->ip_csum =
1478                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1479         rxcp->l4_csum =
1480                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1481         rxcp->ipv6 =
1482                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1483         rxcp->rxq_idx =
1484                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1485         rxcp->num_rcvd =
1486                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1487         rxcp->pkt_type =
1488                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1489         rxcp->rss_hash =
1490                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1491         if (rxcp->vlanf) {
1492                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1493                                           compl);
1494                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1495                                                compl);
1496         }
1497         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1498 }
1499
1500 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1501 {
1502         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1503         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1504         struct be_adapter *adapter = rxo->adapter;
1505
1506         /* For checking the valid bit it is Ok to use either definition as the
1507          * valid bit is at the same position in both v0 and v1 Rx compl */
1508         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1509                 return NULL;
1510
1511         rmb();
1512         be_dws_le_to_cpu(compl, sizeof(*compl));
1513
1514         if (adapter->be3_native)
1515                 be_parse_rx_compl_v1(compl, rxcp);
1516         else
1517                 be_parse_rx_compl_v0(compl, rxcp);
1518
1519         if (rxcp->vlanf) {
1520                 /* vlanf could be wrongly set in some cards.
1521                  * ignore if vtm is not set */
1522                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1523                         rxcp->vlanf = 0;
1524
1525                 if (!lancer_chip(adapter))
1526                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1527
1528                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1529                     !adapter->vlan_tag[rxcp->vlan_tag])
1530                         rxcp->vlanf = 0;
1531         }
1532
1533         /* As the compl has been parsed, reset it; we wont touch it again */
1534         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1535
1536         queue_tail_inc(&rxo->cq);
1537         return rxcp;
1538 }
1539
1540 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1541 {
1542         u32 order = get_order(size);
1543
1544         if (order > 0)
1545                 gfp |= __GFP_COMP;
1546         return  alloc_pages(gfp, order);
1547 }
1548
1549 /*
1550  * Allocate a page, split it to fragments of size rx_frag_size and post as
1551  * receive buffers to BE
1552  */
1553 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1554 {
1555         struct be_adapter *adapter = rxo->adapter;
1556         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1557         struct be_queue_info *rxq = &rxo->q;
1558         struct page *pagep = NULL;
1559         struct be_eth_rx_d *rxd;
1560         u64 page_dmaaddr = 0, frag_dmaaddr;
1561         u32 posted, page_offset = 0;
1562
1563         page_info = &rxo->page_info_tbl[rxq->head];
1564         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1565                 if (!pagep) {
1566                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1567                         if (unlikely(!pagep)) {
1568                                 rx_stats(rxo)->rx_post_fail++;
1569                                 break;
1570                         }
1571                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1572                                                     0, adapter->big_page_size,
1573                                                     DMA_FROM_DEVICE);
1574                         page_info->page_offset = 0;
1575                 } else {
1576                         get_page(pagep);
1577                         page_info->page_offset = page_offset + rx_frag_size;
1578                 }
1579                 page_offset = page_info->page_offset;
1580                 page_info->page = pagep;
1581                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1582                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1583
1584                 rxd = queue_head_node(rxq);
1585                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1586                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1587
1588                 /* Any space left in the current big page for another frag? */
1589                 if ((page_offset + rx_frag_size + rx_frag_size) >
1590                                         adapter->big_page_size) {
1591                         pagep = NULL;
1592                         page_info->last_page_user = true;
1593                 }
1594
1595                 prev_page_info = page_info;
1596                 queue_head_inc(rxq);
1597                 page_info = &rxo->page_info_tbl[rxq->head];
1598         }
1599         if (pagep)
1600                 prev_page_info->last_page_user = true;
1601
1602         if (posted) {
1603                 atomic_add(posted, &rxq->used);
1604                 be_rxq_notify(adapter, rxq->id, posted);
1605         } else if (atomic_read(&rxq->used) == 0) {
1606                 /* Let be_worker replenish when memory is available */
1607                 rxo->rx_post_starved = true;
1608         }
1609 }
1610
1611 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1612 {
1613         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1614
1615         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1616                 return NULL;
1617
1618         rmb();
1619         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1620
1621         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1622
1623         queue_tail_inc(tx_cq);
1624         return txcp;
1625 }
1626
1627 static u16 be_tx_compl_process(struct be_adapter *adapter,
1628                 struct be_tx_obj *txo, u16 last_index)
1629 {
1630         struct be_queue_info *txq = &txo->q;
1631         struct be_eth_wrb *wrb;
1632         struct sk_buff **sent_skbs = txo->sent_skb_list;
1633         struct sk_buff *sent_skb;
1634         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1635         bool unmap_skb_hdr = true;
1636
1637         sent_skb = sent_skbs[txq->tail];
1638         BUG_ON(!sent_skb);
1639         sent_skbs[txq->tail] = NULL;
1640
1641         /* skip header wrb */
1642         queue_tail_inc(txq);
1643
1644         do {
1645                 cur_index = txq->tail;
1646                 wrb = queue_tail_node(txq);
1647                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1648                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1649                 unmap_skb_hdr = false;
1650
1651                 num_wrbs++;
1652                 queue_tail_inc(txq);
1653         } while (cur_index != last_index);
1654
1655         kfree_skb(sent_skb);
1656         return num_wrbs;
1657 }
1658
1659 /* Return the number of events in the event queue */
1660 static inline int events_get(struct be_eq_obj *eqo)
1661 {
1662         struct be_eq_entry *eqe;
1663         int num = 0;
1664
1665         do {
1666                 eqe = queue_tail_node(&eqo->q);
1667                 if (eqe->evt == 0)
1668                         break;
1669
1670                 rmb();
1671                 eqe->evt = 0;
1672                 num++;
1673                 queue_tail_inc(&eqo->q);
1674         } while (true);
1675
1676         return num;
1677 }
1678
1679 /* Leaves the EQ is disarmed state */
1680 static void be_eq_clean(struct be_eq_obj *eqo)
1681 {
1682         int num = events_get(eqo);
1683
1684         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1685 }
1686
1687 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1688 {
1689         struct be_rx_page_info *page_info;
1690         struct be_queue_info *rxq = &rxo->q;
1691         struct be_queue_info *rx_cq = &rxo->cq;
1692         struct be_rx_compl_info *rxcp;
1693         struct be_adapter *adapter = rxo->adapter;
1694         int flush_wait = 0;
1695         u16 tail;
1696
1697         /* Consume pending rx completions.
1698          * Wait for the flush completion (identified by zero num_rcvd)
1699          * to arrive. Notify CQ even when there are no more CQ entries
1700          * for HW to flush partially coalesced CQ entries.
1701          * In Lancer, there is no need to wait for flush compl.
1702          */
1703         for (;;) {
1704                 rxcp = be_rx_compl_get(rxo);
1705                 if (rxcp == NULL) {
1706                         if (lancer_chip(adapter))
1707                                 break;
1708
1709                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1710                                 dev_warn(&adapter->pdev->dev,
1711                                          "did not receive flush compl\n");
1712                                 break;
1713                         }
1714                         be_cq_notify(adapter, rx_cq->id, true, 0);
1715                         mdelay(1);
1716                 } else {
1717                         be_rx_compl_discard(rxo, rxcp);
1718                         be_cq_notify(adapter, rx_cq->id, true, 1);
1719                         if (rxcp->num_rcvd == 0)
1720                                 break;
1721                 }
1722         }
1723
1724         /* After cleanup, leave the CQ in unarmed state */
1725         be_cq_notify(adapter, rx_cq->id, false, 0);
1726
1727         /* Then free posted rx buffers that were not used */
1728         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1729         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1730                 page_info = get_rx_page_info(rxo, tail);
1731                 put_page(page_info->page);
1732                 memset(page_info, 0, sizeof(*page_info));
1733         }
1734         BUG_ON(atomic_read(&rxq->used));
1735         rxq->tail = rxq->head = 0;
1736 }
1737
1738 static void be_tx_compl_clean(struct be_adapter *adapter)
1739 {
1740         struct be_tx_obj *txo;
1741         struct be_queue_info *txq;
1742         struct be_eth_tx_compl *txcp;
1743         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1744         struct sk_buff *sent_skb;
1745         bool dummy_wrb;
1746         int i, pending_txqs;
1747
1748         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1749         do {
1750                 pending_txqs = adapter->num_tx_qs;
1751
1752                 for_all_tx_queues(adapter, txo, i) {
1753                         txq = &txo->q;
1754                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1755                                 end_idx =
1756                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1757                                                       wrb_index, txcp);
1758                                 num_wrbs += be_tx_compl_process(adapter, txo,
1759                                                                 end_idx);
1760                                 cmpl++;
1761                         }
1762                         if (cmpl) {
1763                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1764                                 atomic_sub(num_wrbs, &txq->used);
1765                                 cmpl = 0;
1766                                 num_wrbs = 0;
1767                         }
1768                         if (atomic_read(&txq->used) == 0)
1769                                 pending_txqs--;
1770                 }
1771
1772                 if (pending_txqs == 0 || ++timeo > 200)
1773                         break;
1774
1775                 mdelay(1);
1776         } while (true);
1777
1778         for_all_tx_queues(adapter, txo, i) {
1779                 txq = &txo->q;
1780                 if (atomic_read(&txq->used))
1781                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1782                                 atomic_read(&txq->used));
1783
1784                 /* free posted tx for which compls will never arrive */
1785                 while (atomic_read(&txq->used)) {
1786                         sent_skb = txo->sent_skb_list[txq->tail];
1787                         end_idx = txq->tail;
1788                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1789                                                    &dummy_wrb);
1790                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1791                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1792                         atomic_sub(num_wrbs, &txq->used);
1793                 }
1794         }
1795 }
1796
1797 static void be_evt_queues_destroy(struct be_adapter *adapter)
1798 {
1799         struct be_eq_obj *eqo;
1800         int i;
1801
1802         for_all_evt_queues(adapter, eqo, i) {
1803                 if (eqo->q.created) {
1804                         be_eq_clean(eqo);
1805                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1806                 }
1807                 be_queue_free(adapter, &eqo->q);
1808         }
1809 }
1810
1811 static int be_evt_queues_create(struct be_adapter *adapter)
1812 {
1813         struct be_queue_info *eq;
1814         struct be_eq_obj *eqo;
1815         int i, rc;
1816
1817         adapter->num_evt_qs = num_irqs(adapter);
1818
1819         for_all_evt_queues(adapter, eqo, i) {
1820                 eqo->adapter = adapter;
1821                 eqo->tx_budget = BE_TX_BUDGET;
1822                 eqo->idx = i;
1823                 eqo->max_eqd = BE_MAX_EQD;
1824                 eqo->enable_aic = true;
1825
1826                 eq = &eqo->q;
1827                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1828                                         sizeof(struct be_eq_entry));
1829                 if (rc)
1830                         return rc;
1831
1832                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1833                 if (rc)
1834                         return rc;
1835         }
1836         return 0;
1837 }
1838
1839 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1840 {
1841         struct be_queue_info *q;
1842
1843         q = &adapter->mcc_obj.q;
1844         if (q->created)
1845                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1846         be_queue_free(adapter, q);
1847
1848         q = &adapter->mcc_obj.cq;
1849         if (q->created)
1850                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1851         be_queue_free(adapter, q);
1852 }
1853
1854 /* Must be called only after TX qs are created as MCC shares TX EQ */
1855 static int be_mcc_queues_create(struct be_adapter *adapter)
1856 {
1857         struct be_queue_info *q, *cq;
1858
1859         cq = &adapter->mcc_obj.cq;
1860         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1861                         sizeof(struct be_mcc_compl)))
1862                 goto err;
1863
1864         /* Use the default EQ for MCC completions */
1865         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1866                 goto mcc_cq_free;
1867
1868         q = &adapter->mcc_obj.q;
1869         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1870                 goto mcc_cq_destroy;
1871
1872         if (be_cmd_mccq_create(adapter, q, cq))
1873                 goto mcc_q_free;
1874
1875         return 0;
1876
1877 mcc_q_free:
1878         be_queue_free(adapter, q);
1879 mcc_cq_destroy:
1880         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1881 mcc_cq_free:
1882         be_queue_free(adapter, cq);
1883 err:
1884         return -1;
1885 }
1886
1887 static void be_tx_queues_destroy(struct be_adapter *adapter)
1888 {
1889         struct be_queue_info *q;
1890         struct be_tx_obj *txo;
1891         u8 i;
1892
1893         for_all_tx_queues(adapter, txo, i) {
1894                 q = &txo->q;
1895                 if (q->created)
1896                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1897                 be_queue_free(adapter, q);
1898
1899                 q = &txo->cq;
1900                 if (q->created)
1901                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1902                 be_queue_free(adapter, q);
1903         }
1904 }
1905
1906 static int be_num_txqs_want(struct be_adapter *adapter)
1907 {
1908         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1909             be_is_mc(adapter) ||
1910             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1911             BE2_chip(adapter))
1912                 return 1;
1913         else
1914                 return adapter->max_tx_queues;
1915 }
1916
1917 static int be_tx_cqs_create(struct be_adapter *adapter)
1918 {
1919         struct be_queue_info *cq, *eq;
1920         int status;
1921         struct be_tx_obj *txo;
1922         u8 i;
1923
1924         adapter->num_tx_qs = be_num_txqs_want(adapter);
1925         if (adapter->num_tx_qs != MAX_TX_QS) {
1926                 rtnl_lock();
1927                 netif_set_real_num_tx_queues(adapter->netdev,
1928                         adapter->num_tx_qs);
1929                 rtnl_unlock();
1930         }
1931
1932         for_all_tx_queues(adapter, txo, i) {
1933                 cq = &txo->cq;
1934                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1935                                         sizeof(struct be_eth_tx_compl));
1936                 if (status)
1937                         return status;
1938
1939                 /* If num_evt_qs is less than num_tx_qs, then more than
1940                  * one txq share an eq
1941                  */
1942                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1944                 if (status)
1945                         return status;
1946         }
1947         return 0;
1948 }
1949
1950 static int be_tx_qs_create(struct be_adapter *adapter)
1951 {
1952         struct be_tx_obj *txo;
1953         int i, status;
1954
1955         for_all_tx_queues(adapter, txo, i) {
1956                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1957                                         sizeof(struct be_eth_wrb));
1958                 if (status)
1959                         return status;
1960
1961                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1962                 if (status)
1963                         return status;
1964         }
1965
1966         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1967                  adapter->num_tx_qs);
1968         return 0;
1969 }
1970
1971 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1972 {
1973         struct be_queue_info *q;
1974         struct be_rx_obj *rxo;
1975         int i;
1976
1977         for_all_rx_queues(adapter, rxo, i) {
1978                 q = &rxo->cq;
1979                 if (q->created)
1980                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1981                 be_queue_free(adapter, q);
1982         }
1983 }
1984
1985 static int be_rx_cqs_create(struct be_adapter *adapter)
1986 {
1987         struct be_queue_info *eq, *cq;
1988         struct be_rx_obj *rxo;
1989         int rc, i;
1990
1991         /* We'll create as many RSS rings as there are irqs.
1992          * But when there's only one irq there's no use creating RSS rings
1993          */
1994         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1995                                 num_irqs(adapter) + 1 : 1;
1996         if (adapter->num_rx_qs != MAX_RX_QS) {
1997                 rtnl_lock();
1998                 netif_set_real_num_rx_queues(adapter->netdev,
1999                                              adapter->num_rx_qs);
2000                 rtnl_unlock();
2001         }
2002
2003         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2004         for_all_rx_queues(adapter, rxo, i) {
2005                 rxo->adapter = adapter;
2006                 cq = &rxo->cq;
2007                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2008                                 sizeof(struct be_eth_rx_compl));
2009                 if (rc)
2010                         return rc;
2011
2012                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2013                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2014                 if (rc)
2015                         return rc;
2016         }
2017
2018         dev_info(&adapter->pdev->dev,
2019                  "created %d RSS queue(s) and 1 default RX queue\n",
2020                  adapter->num_rx_qs - 1);
2021         return 0;
2022 }
2023
2024 static irqreturn_t be_intx(int irq, void *dev)
2025 {
2026         struct be_eq_obj *eqo = dev;
2027         struct be_adapter *adapter = eqo->adapter;
2028         int num_evts = 0;
2029
2030         /* IRQ is not expected when NAPI is scheduled as the EQ
2031          * will not be armed.
2032          * But, this can happen on Lancer INTx where it takes
2033          * a while to de-assert INTx or in BE2 where occasionaly
2034          * an interrupt may be raised even when EQ is unarmed.
2035          * If NAPI is already scheduled, then counting & notifying
2036          * events will orphan them.
2037          */
2038         if (napi_schedule_prep(&eqo->napi)) {
2039                 num_evts = events_get(eqo);
2040                 __napi_schedule(&eqo->napi);
2041                 if (num_evts)
2042                         eqo->spurious_intr = 0;
2043         }
2044         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2045
2046         /* Return IRQ_HANDLED only for the the first spurious intr
2047          * after a valid intr to stop the kernel from branding
2048          * this irq as a bad one!
2049          */
2050         if (num_evts || eqo->spurious_intr++ == 0)
2051                 return IRQ_HANDLED;
2052         else
2053                 return IRQ_NONE;
2054 }
2055
2056 static irqreturn_t be_msix(int irq, void *dev)
2057 {
2058         struct be_eq_obj *eqo = dev;
2059
2060         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2061         napi_schedule(&eqo->napi);
2062         return IRQ_HANDLED;
2063 }
2064
2065 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2066 {
2067         return (rxcp->tcpf && !rxcp->err) ? true : false;
2068 }
2069
2070 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2071                         int budget)
2072 {
2073         struct be_adapter *adapter = rxo->adapter;
2074         struct be_queue_info *rx_cq = &rxo->cq;
2075         struct be_rx_compl_info *rxcp;
2076         u32 work_done;
2077
2078         for (work_done = 0; work_done < budget; work_done++) {
2079                 rxcp = be_rx_compl_get(rxo);
2080                 if (!rxcp)
2081                         break;
2082
2083                 /* Is it a flush compl that has no data */
2084                 if (unlikely(rxcp->num_rcvd == 0))
2085                         goto loop_continue;
2086
2087                 /* Discard compl with partial DMA Lancer B0 */
2088                 if (unlikely(!rxcp->pkt_size)) {
2089                         be_rx_compl_discard(rxo, rxcp);
2090                         goto loop_continue;
2091                 }
2092
2093                 /* On BE drop pkts that arrive due to imperfect filtering in
2094                  * promiscuous mode on some skews
2095                  */
2096                 if (unlikely(rxcp->port != adapter->port_num &&
2097                                 !lancer_chip(adapter))) {
2098                         be_rx_compl_discard(rxo, rxcp);
2099                         goto loop_continue;
2100                 }
2101
2102                 if (do_gro(rxcp))
2103                         be_rx_compl_process_gro(rxo, napi, rxcp);
2104                 else
2105                         be_rx_compl_process(rxo, rxcp);
2106 loop_continue:
2107                 be_rx_stats_update(rxo, rxcp);
2108         }
2109
2110         if (work_done) {
2111                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2112
2113                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2114                         be_post_rx_frags(rxo, GFP_ATOMIC);
2115         }
2116
2117         return work_done;
2118 }
2119
2120 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2121                           int budget, int idx)
2122 {
2123         struct be_eth_tx_compl *txcp;
2124         int num_wrbs = 0, work_done;
2125
2126         for (work_done = 0; work_done < budget; work_done++) {
2127                 txcp = be_tx_compl_get(&txo->cq);
2128                 if (!txcp)
2129                         break;
2130                 num_wrbs += be_tx_compl_process(adapter, txo,
2131                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2132                                         wrb_index, txcp));
2133         }
2134
2135         if (work_done) {
2136                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2137                 atomic_sub(num_wrbs, &txo->q.used);
2138
2139                 /* As Tx wrbs have been freed up, wake up netdev queue
2140                  * if it was stopped due to lack of tx wrbs.  */
2141                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2142                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2143                         netif_wake_subqueue(adapter->netdev, idx);
2144                 }
2145
2146                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2147                 tx_stats(txo)->tx_compl += work_done;
2148                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2149         }
2150         return (work_done < budget); /* Done */
2151 }
2152
2153 int be_poll(struct napi_struct *napi, int budget)
2154 {
2155         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2156         struct be_adapter *adapter = eqo->adapter;
2157         int max_work = 0, work, i, num_evts;
2158         bool tx_done;
2159
2160         num_evts = events_get(eqo);
2161
2162         /* Process all TXQs serviced by this EQ */
2163         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2164                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2165                                         eqo->tx_budget, i);
2166                 if (!tx_done)
2167                         max_work = budget;
2168         }
2169
2170         /* This loop will iterate twice for EQ0 in which
2171          * completions of the last RXQ (default one) are also processed
2172          * For other EQs the loop iterates only once
2173          */
2174         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2175                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2176                 max_work = max(work, max_work);
2177         }
2178
2179         if (is_mcc_eqo(eqo))
2180                 be_process_mcc(adapter);
2181
2182         if (max_work < budget) {
2183                 napi_complete(napi);
2184                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2185         } else {
2186                 /* As we'll continue in polling mode, count and clear events */
2187                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2188         }
2189         return max_work;
2190 }
2191
2192 void be_detect_error(struct be_adapter *adapter)
2193 {
2194         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2195         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2196         u32 i;
2197
2198         if (be_hw_error(adapter))
2199                 return;
2200
2201         if (lancer_chip(adapter)) {
2202                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2203                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2204                         sliport_err1 = ioread32(adapter->db +
2205                                         SLIPORT_ERROR1_OFFSET);
2206                         sliport_err2 = ioread32(adapter->db +
2207                                         SLIPORT_ERROR2_OFFSET);
2208                 }
2209         } else {
2210                 pci_read_config_dword(adapter->pdev,
2211                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2212                 pci_read_config_dword(adapter->pdev,
2213                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2214                 pci_read_config_dword(adapter->pdev,
2215                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2216                 pci_read_config_dword(adapter->pdev,
2217                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2218
2219                 ue_lo = (ue_lo & ~ue_lo_mask);
2220                 ue_hi = (ue_hi & ~ue_hi_mask);
2221         }
2222
2223         /* On certain platforms BE hardware can indicate spurious UEs.
2224          * Allow the h/w to stop working completely in case of a real UE.
2225          * Hence not setting the hw_error for UE detection.
2226          */
2227         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2228                 adapter->hw_error = true;
2229                 dev_err(&adapter->pdev->dev,
2230                         "Error detected in the card\n");
2231         }
2232
2233         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2234                 dev_err(&adapter->pdev->dev,
2235                         "ERR: sliport status 0x%x\n", sliport_status);
2236                 dev_err(&adapter->pdev->dev,
2237                         "ERR: sliport error1 0x%x\n", sliport_err1);
2238                 dev_err(&adapter->pdev->dev,
2239                         "ERR: sliport error2 0x%x\n", sliport_err2);
2240         }
2241
2242         if (ue_lo) {
2243                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2244                         if (ue_lo & 1)
2245                                 dev_err(&adapter->pdev->dev,
2246                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2247                 }
2248         }
2249
2250         if (ue_hi) {
2251                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2252                         if (ue_hi & 1)
2253                                 dev_err(&adapter->pdev->dev,
2254                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2255                 }
2256         }
2257
2258 }
2259
2260 static void be_msix_disable(struct be_adapter *adapter)
2261 {
2262         if (msix_enabled(adapter)) {
2263                 pci_disable_msix(adapter->pdev);
2264                 adapter->num_msix_vec = 0;
2265         }
2266 }
2267
2268 static uint be_num_rss_want(struct be_adapter *adapter)
2269 {
2270         u32 num = 0;
2271
2272         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2273             (lancer_chip(adapter) ||
2274              (!sriov_want(adapter) && be_physfn(adapter)))) {
2275                 num = adapter->max_rss_queues;
2276                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2277         }
2278         return num;
2279 }
2280
2281 static void be_msix_enable(struct be_adapter *adapter)
2282 {
2283 #define BE_MIN_MSIX_VECTORS             1
2284         int i, status, num_vec, num_roce_vec = 0;
2285         struct device *dev = &adapter->pdev->dev;
2286
2287         /* If RSS queues are not used, need a vec for default RX Q */
2288         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2289         if (be_roce_supported(adapter)) {
2290                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2291                                         (num_online_cpus() + 1));
2292                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2293                 num_vec += num_roce_vec;
2294                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2295         }
2296         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2297
2298         for (i = 0; i < num_vec; i++)
2299                 adapter->msix_entries[i].entry = i;
2300
2301         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2302         if (status == 0) {
2303                 goto done;
2304         } else if (status >= BE_MIN_MSIX_VECTORS) {
2305                 num_vec = status;
2306                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2307                                 num_vec) == 0)
2308                         goto done;
2309         }
2310
2311         dev_warn(dev, "MSIx enable failed\n");
2312         return;
2313 done:
2314         if (be_roce_supported(adapter)) {
2315                 if (num_vec > num_roce_vec) {
2316                         adapter->num_msix_vec = num_vec - num_roce_vec;
2317                         adapter->num_msix_roce_vec =
2318                                 num_vec - adapter->num_msix_vec;
2319                 } else {
2320                         adapter->num_msix_vec = num_vec;
2321                         adapter->num_msix_roce_vec = 0;
2322                 }
2323         } else
2324                 adapter->num_msix_vec = num_vec;
2325         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2326         return;
2327 }
2328
2329 static inline int be_msix_vec_get(struct be_adapter *adapter,
2330                                 struct be_eq_obj *eqo)
2331 {
2332         return adapter->msix_entries[eqo->idx].vector;
2333 }
2334
2335 static int be_msix_register(struct be_adapter *adapter)
2336 {
2337         struct net_device *netdev = adapter->netdev;
2338         struct be_eq_obj *eqo;
2339         int status, i, vec;
2340
2341         for_all_evt_queues(adapter, eqo, i) {
2342                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2343                 vec = be_msix_vec_get(adapter, eqo);
2344                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2345                 if (status)
2346                         goto err_msix;
2347         }
2348
2349         return 0;
2350 err_msix:
2351         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2352                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2353         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2354                 status);
2355         be_msix_disable(adapter);
2356         return status;
2357 }
2358
2359 static int be_irq_register(struct be_adapter *adapter)
2360 {
2361         struct net_device *netdev = adapter->netdev;
2362         int status;
2363
2364         if (msix_enabled(adapter)) {
2365                 status = be_msix_register(adapter);
2366                 if (status == 0)
2367                         goto done;
2368                 /* INTx is not supported for VF */
2369                 if (!be_physfn(adapter))
2370                         return status;
2371         }
2372
2373         /* INTx: only the first EQ is used */
2374         netdev->irq = adapter->pdev->irq;
2375         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2376                              &adapter->eq_obj[0]);
2377         if (status) {
2378                 dev_err(&adapter->pdev->dev,
2379                         "INTx request IRQ failed - err %d\n", status);
2380                 return status;
2381         }
2382 done:
2383         adapter->isr_registered = true;
2384         return 0;
2385 }
2386
2387 static void be_irq_unregister(struct be_adapter *adapter)
2388 {
2389         struct net_device *netdev = adapter->netdev;
2390         struct be_eq_obj *eqo;
2391         int i;
2392
2393         if (!adapter->isr_registered)
2394                 return;
2395
2396         /* INTx */
2397         if (!msix_enabled(adapter)) {
2398                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2399                 goto done;
2400         }
2401
2402         /* MSIx */
2403         for_all_evt_queues(adapter, eqo, i)
2404                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2405
2406 done:
2407         adapter->isr_registered = false;
2408 }
2409
2410 static void be_rx_qs_destroy(struct be_adapter *adapter)
2411 {
2412         struct be_queue_info *q;
2413         struct be_rx_obj *rxo;
2414         int i;
2415
2416         for_all_rx_queues(adapter, rxo, i) {
2417                 q = &rxo->q;
2418                 if (q->created) {
2419                         be_cmd_rxq_destroy(adapter, q);
2420                         /* After the rxq is invalidated, wait for a grace time
2421                          * of 1ms for all dma to end and the flush compl to
2422                          * arrive
2423                          */
2424                         mdelay(1);
2425                         be_rx_cq_clean(rxo);
2426                 }
2427                 be_queue_free(adapter, q);
2428         }
2429 }
2430
2431 static int be_close(struct net_device *netdev)
2432 {
2433         struct be_adapter *adapter = netdev_priv(netdev);
2434         struct be_eq_obj *eqo;
2435         int i;
2436
2437         be_roce_dev_close(adapter);
2438
2439         if (!lancer_chip(adapter))
2440                 be_intr_set(adapter, false);
2441
2442         for_all_evt_queues(adapter, eqo, i)
2443                 napi_disable(&eqo->napi);
2444
2445         be_async_mcc_disable(adapter);
2446
2447         /* Wait for all pending tx completions to arrive so that
2448          * all tx skbs are freed.
2449          */
2450         be_tx_compl_clean(adapter);
2451
2452         be_rx_qs_destroy(adapter);
2453
2454         for_all_evt_queues(adapter, eqo, i) {
2455                 if (msix_enabled(adapter))
2456                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2457                 else
2458                         synchronize_irq(netdev->irq);
2459                 be_eq_clean(eqo);
2460         }
2461
2462         be_irq_unregister(adapter);
2463
2464         return 0;
2465 }
2466
2467 static int be_rx_qs_create(struct be_adapter *adapter)
2468 {
2469         struct be_rx_obj *rxo;
2470         int rc, i, j;
2471         u8 rsstable[128];
2472
2473         for_all_rx_queues(adapter, rxo, i) {
2474                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2475                                     sizeof(struct be_eth_rx_d));
2476                 if (rc)
2477                         return rc;
2478         }
2479
2480         /* The FW would like the default RXQ to be created first */
2481         rxo = default_rxo(adapter);
2482         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2483                                adapter->if_handle, false, &rxo->rss_id);
2484         if (rc)
2485                 return rc;
2486
2487         for_all_rss_queues(adapter, rxo, i) {
2488                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2489                                        rx_frag_size, adapter->if_handle,
2490                                        true, &rxo->rss_id);
2491                 if (rc)
2492                         return rc;
2493         }
2494
2495         if (be_multi_rxq(adapter)) {
2496                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2497                         for_all_rss_queues(adapter, rxo, i) {
2498                                 if ((j + i) >= 128)
2499                                         break;
2500                                 rsstable[j + i] = rxo->rss_id;
2501                         }
2502                 }
2503                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2504                 if (rc)
2505                         return rc;
2506         }
2507
2508         /* First time posting */
2509         for_all_rx_queues(adapter, rxo, i)
2510                 be_post_rx_frags(rxo, GFP_KERNEL);
2511         return 0;
2512 }
2513
2514 static int be_open(struct net_device *netdev)
2515 {
2516         struct be_adapter *adapter = netdev_priv(netdev);
2517         struct be_eq_obj *eqo;
2518         struct be_rx_obj *rxo;
2519         struct be_tx_obj *txo;
2520         u8 link_status;
2521         int status, i;
2522
2523         status = be_rx_qs_create(adapter);
2524         if (status)
2525                 goto err;
2526
2527         be_irq_register(adapter);
2528
2529         if (!lancer_chip(adapter))
2530                 be_intr_set(adapter, true);
2531
2532         for_all_rx_queues(adapter, rxo, i)
2533                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2534
2535         for_all_tx_queues(adapter, txo, i)
2536                 be_cq_notify(adapter, txo->cq.id, true, 0);
2537
2538         be_async_mcc_enable(adapter);
2539
2540         for_all_evt_queues(adapter, eqo, i) {
2541                 napi_enable(&eqo->napi);
2542                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2543         }
2544
2545         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2546         if (!status)
2547                 be_link_status_update(adapter, link_status);
2548
2549         be_roce_dev_open(adapter);
2550         return 0;
2551 err:
2552         be_close(adapter->netdev);
2553         return -EIO;
2554 }
2555
2556 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2557 {
2558         struct be_dma_mem cmd;
2559         int status = 0;
2560         u8 mac[ETH_ALEN];
2561
2562         memset(mac, 0, ETH_ALEN);
2563
2564         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2565         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2566                                     GFP_KERNEL);
2567         if (cmd.va == NULL)
2568                 return -1;
2569         memset(cmd.va, 0, cmd.size);
2570
2571         if (enable) {
2572                 status = pci_write_config_dword(adapter->pdev,
2573                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2574                 if (status) {
2575                         dev_err(&adapter->pdev->dev,
2576                                 "Could not enable Wake-on-lan\n");
2577                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2578                                           cmd.dma);
2579                         return status;
2580                 }
2581                 status = be_cmd_enable_magic_wol(adapter,
2582                                 adapter->netdev->dev_addr, &cmd);
2583                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2584                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2585         } else {
2586                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2587                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2588                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2589         }
2590
2591         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2592         return status;
2593 }
2594
2595 /*
2596  * Generate a seed MAC address from the PF MAC Address using jhash.
2597  * MAC Address for VFs are assigned incrementally starting from the seed.
2598  * These addresses are programmed in the ASIC by the PF and the VF driver
2599  * queries for the MAC address during its probe.
2600  */
2601 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2602 {
2603         u32 vf;
2604         int status = 0;
2605         u8 mac[ETH_ALEN];
2606         struct be_vf_cfg *vf_cfg;
2607
2608         be_vf_eth_addr_generate(adapter, mac);
2609
2610         for_all_vfs(adapter, vf_cfg, vf) {
2611                 if (lancer_chip(adapter)) {
2612                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2613                 } else {
2614                         status = be_cmd_pmac_add(adapter, mac,
2615                                                  vf_cfg->if_handle,
2616                                                  &vf_cfg->pmac_id, vf + 1);
2617                 }
2618
2619                 if (status)
2620                         dev_err(&adapter->pdev->dev,
2621                         "Mac address assignment failed for VF %d\n", vf);
2622                 else
2623                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2624
2625                 mac[5] += 1;
2626         }
2627         return status;
2628 }
2629
2630 static int be_vfs_mac_query(struct be_adapter *adapter)
2631 {
2632         int status, vf;
2633         u8 mac[ETH_ALEN];
2634         struct be_vf_cfg *vf_cfg;
2635         bool active;
2636
2637         for_all_vfs(adapter, vf_cfg, vf) {
2638                 be_cmd_get_mac_from_list(adapter, mac, &active,
2639                                          &vf_cfg->pmac_id, 0);
2640
2641                 status = be_cmd_mac_addr_query(adapter, mac, false,
2642                                                vf_cfg->if_handle, 0);
2643                 if (status)
2644                         return status;
2645                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2646         }
2647         return 0;
2648 }
2649
2650 static void be_vf_clear(struct be_adapter *adapter)
2651 {
2652         struct be_vf_cfg *vf_cfg;
2653         u32 vf;
2654
2655         if (be_find_vfs(adapter, ASSIGNED)) {
2656                 dev_warn(&adapter->pdev->dev,
2657                          "VFs are assigned to VMs: not disabling VFs\n");
2658                 goto done;
2659         }
2660
2661         for_all_vfs(adapter, vf_cfg, vf) {
2662                 if (lancer_chip(adapter))
2663                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2664                 else
2665                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2666                                         vf_cfg->pmac_id, vf + 1);
2667
2668                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2669         }
2670         pci_disable_sriov(adapter->pdev);
2671 done:
2672         kfree(adapter->vf_cfg);
2673         adapter->num_vfs = 0;
2674 }
2675
2676 static int be_clear(struct be_adapter *adapter)
2677 {
2678         int i = 1;
2679
2680         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2681                 cancel_delayed_work_sync(&adapter->work);
2682                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2683         }
2684
2685         if (sriov_enabled(adapter))
2686                 be_vf_clear(adapter);
2687
2688         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2689                 be_cmd_pmac_del(adapter, adapter->if_handle,
2690                         adapter->pmac_id[i], 0);
2691
2692         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2693
2694         be_mcc_queues_destroy(adapter);
2695         be_rx_cqs_destroy(adapter);
2696         be_tx_queues_destroy(adapter);
2697         be_evt_queues_destroy(adapter);
2698
2699         kfree(adapter->pmac_id);
2700         adapter->pmac_id = NULL;
2701
2702         be_msix_disable(adapter);
2703         return 0;
2704 }
2705
2706 static int be_vfs_if_create(struct be_adapter *adapter)
2707 {
2708         struct be_vf_cfg *vf_cfg;
2709         u32 cap_flags, en_flags, vf;
2710         int status;
2711
2712         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2713                     BE_IF_FLAGS_MULTICAST;
2714
2715         for_all_vfs(adapter, vf_cfg, vf) {
2716                 if (!BE3_chip(adapter))
2717                         be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2718
2719                 /* If a FW profile exists, then cap_flags are updated */
2720                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2721                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2722                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2723                                           &vf_cfg->if_handle, vf + 1);
2724                 if (status)
2725                         goto err;
2726         }
2727 err:
2728         return status;
2729 }
2730
2731 static int be_vf_setup_init(struct be_adapter *adapter)
2732 {
2733         struct be_vf_cfg *vf_cfg;
2734         int vf;
2735
2736         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2737                                   GFP_KERNEL);
2738         if (!adapter->vf_cfg)
2739                 return -ENOMEM;
2740
2741         for_all_vfs(adapter, vf_cfg, vf) {
2742                 vf_cfg->if_handle = -1;
2743                 vf_cfg->pmac_id = -1;
2744         }
2745         return 0;
2746 }
2747
2748 static int be_vf_setup(struct be_adapter *adapter)
2749 {
2750         struct be_vf_cfg *vf_cfg;
2751         u16 def_vlan, lnk_speed;
2752         int status, old_vfs, vf;
2753         struct device *dev = &adapter->pdev->dev;
2754
2755         old_vfs = be_find_vfs(adapter, ENABLED);
2756         if (old_vfs) {
2757                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2758                 if (old_vfs != num_vfs)
2759                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2760                 adapter->num_vfs = old_vfs;
2761         } else {
2762                 if (num_vfs > adapter->dev_num_vfs)
2763                         dev_info(dev, "Device supports %d VFs and not %d\n",
2764                                  adapter->dev_num_vfs, num_vfs);
2765                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2766
2767                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2768                 if (status) {
2769                         dev_err(dev, "SRIOV enable failed\n");
2770                         adapter->num_vfs = 0;
2771                         return 0;
2772                 }
2773         }
2774
2775         status = be_vf_setup_init(adapter);
2776         if (status)
2777                 goto err;
2778
2779         if (old_vfs) {
2780                 for_all_vfs(adapter, vf_cfg, vf) {
2781                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2782                         if (status)
2783                                 goto err;
2784                 }
2785         } else {
2786                 status = be_vfs_if_create(adapter);
2787                 if (status)
2788                         goto err;
2789         }
2790
2791         if (old_vfs) {
2792                 status = be_vfs_mac_query(adapter);
2793                 if (status)
2794                         goto err;
2795         } else {
2796                 status = be_vf_eth_addr_config(adapter);
2797                 if (status)
2798                         goto err;
2799         }
2800
2801         for_all_vfs(adapter, vf_cfg, vf) {
2802                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2803                  * Allow full available bandwidth
2804                  */
2805                 if (BE3_chip(adapter) && !old_vfs)
2806                         be_cmd_set_qos(adapter, 1000, vf+1);
2807
2808                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2809                                                   NULL, vf + 1);
2810                 if (!status)
2811                         vf_cfg->tx_rate = lnk_speed;
2812
2813                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2814                                                vf + 1, vf_cfg->if_handle);
2815                 if (status)
2816                         goto err;
2817                 vf_cfg->def_vid = def_vlan;
2818
2819                 be_cmd_enable_vf(adapter, vf + 1);
2820         }
2821         return 0;
2822 err:
2823         dev_err(dev, "VF setup failed\n");
2824         be_vf_clear(adapter);
2825         return status;
2826 }
2827
2828 static void be_setup_init(struct be_adapter *adapter)
2829 {
2830         adapter->vlan_prio_bmap = 0xff;
2831         adapter->phy.link_speed = -1;
2832         adapter->if_handle = -1;
2833         adapter->be3_native = false;
2834         adapter->promiscuous = false;
2835         if (be_physfn(adapter))
2836                 adapter->cmd_privileges = MAX_PRIVILEGES;
2837         else
2838                 adapter->cmd_privileges = MIN_PRIVILEGES;
2839 }
2840
2841 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2842                            bool *active_mac, u32 *pmac_id)
2843 {
2844         int status = 0;
2845
2846         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2847                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2848                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2849                         *active_mac = true;
2850                 else
2851                         *active_mac = false;
2852
2853                 return status;
2854         }
2855
2856         if (lancer_chip(adapter)) {
2857                 status = be_cmd_get_mac_from_list(adapter, mac,
2858                                                   active_mac, pmac_id, 0);
2859                 if (*active_mac) {
2860                         status = be_cmd_mac_addr_query(adapter, mac, false,
2861                                                        if_handle, *pmac_id);
2862                 }
2863         } else if (be_physfn(adapter)) {
2864                 /* For BE3, for PF get permanent MAC */
2865                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2866                 *active_mac = false;
2867         } else {
2868                 /* For BE3, for VF get soft MAC assigned by PF*/
2869                 status = be_cmd_mac_addr_query(adapter, mac, false,
2870                                                if_handle, 0);
2871                 *active_mac = true;
2872         }
2873         return status;
2874 }
2875
2876 static void be_get_resources(struct be_adapter *adapter)
2877 {
2878         u16 dev_num_vfs;
2879         int pos, status;
2880         bool profile_present = false;
2881
2882         if (!BEx_chip(adapter)) {
2883                 status = be_cmd_get_func_config(adapter);
2884                 if (!status)
2885                         profile_present = true;
2886         }
2887
2888         if (profile_present) {
2889                 /* Sanity fixes for Lancer */
2890                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2891                                               BE_UC_PMAC_COUNT);
2892                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2893                                            BE_NUM_VLANS_SUPPORTED);
2894                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2895                                                BE_MAX_MC);
2896                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2897                                                MAX_TX_QS);
2898                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2899                                                 BE3_MAX_RSS_QS);
2900                 adapter->max_event_queues = min_t(u16,
2901                                                   adapter->max_event_queues,
2902                                                   BE3_MAX_RSS_QS);
2903
2904                 if (adapter->max_rss_queues &&
2905                     adapter->max_rss_queues == adapter->max_rx_queues)
2906                         adapter->max_rss_queues -= 1;
2907
2908                 if (adapter->max_event_queues < adapter->max_rss_queues)
2909                         adapter->max_rss_queues = adapter->max_event_queues;
2910
2911         } else {
2912                 if (be_physfn(adapter))
2913                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2914                 else
2915                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2916
2917                 if (adapter->function_mode & FLEX10_MODE)
2918                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2919                 else
2920                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2921
2922                 adapter->max_mcast_mac = BE_MAX_MC;
2923                 adapter->max_tx_queues = MAX_TX_QS;
2924                 adapter->max_rss_queues = (adapter->be3_native) ?
2925                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2926                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2927
2928                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2929                                         BE_IF_FLAGS_BROADCAST |
2930                                         BE_IF_FLAGS_MULTICAST |
2931                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2932                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2933                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2934                                         BE_IF_FLAGS_PROMISCUOUS;
2935
2936                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2937                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2938         }
2939
2940         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2941         if (pos) {
2942                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2943                                      &dev_num_vfs);
2944                 if (BE3_chip(adapter))
2945                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2946                 adapter->dev_num_vfs = dev_num_vfs;
2947         }
2948 }
2949
2950 /* Routine to query per function resource limits */
2951 static int be_get_config(struct be_adapter *adapter)
2952 {
2953         int status;
2954
2955         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2956                                      &adapter->function_mode,
2957                                      &adapter->function_caps);
2958         if (status)
2959                 goto err;
2960
2961         be_get_resources(adapter);
2962
2963         /* primary mac needs 1 pmac entry */
2964         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2965                                    sizeof(u32), GFP_KERNEL);
2966         if (!adapter->pmac_id) {
2967                 status = -ENOMEM;
2968                 goto err;
2969         }
2970
2971 err:
2972         return status;
2973 }
2974
2975 static int be_setup(struct be_adapter *adapter)
2976 {
2977         struct device *dev = &adapter->pdev->dev;
2978         u32 en_flags;
2979         u32 tx_fc, rx_fc;
2980         int status;
2981         u8 mac[ETH_ALEN];
2982         bool active_mac;
2983
2984         be_setup_init(adapter);
2985
2986         if (!lancer_chip(adapter))
2987                 be_cmd_req_native_mode(adapter);
2988
2989         status = be_get_config(adapter);
2990         if (status)
2991                 goto err;
2992
2993         be_msix_enable(adapter);
2994
2995         status = be_evt_queues_create(adapter);
2996         if (status)
2997                 goto err;
2998
2999         status = be_tx_cqs_create(adapter);
3000         if (status)
3001                 goto err;
3002
3003         status = be_rx_cqs_create(adapter);
3004         if (status)
3005                 goto err;
3006
3007         status = be_mcc_queues_create(adapter);
3008         if (status)
3009                 goto err;
3010
3011         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3012         /* In UMC mode FW does not return right privileges.
3013          * Override with correct privilege equivalent to PF.
3014          */
3015         if (be_is_mc(adapter))
3016                 adapter->cmd_privileges = MAX_PRIVILEGES;
3017
3018         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3019                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3020
3021         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3022                 en_flags |= BE_IF_FLAGS_RSS;
3023
3024         en_flags = en_flags & adapter->if_cap_flags;
3025
3026         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3027                                   &adapter->if_handle, 0);
3028         if (status != 0)
3029                 goto err;
3030
3031         memset(mac, 0, ETH_ALEN);
3032         active_mac = false;
3033         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3034                                  &active_mac, &adapter->pmac_id[0]);
3035         if (status != 0)
3036                 goto err;
3037
3038         if (!active_mac) {
3039                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3040                                          &adapter->pmac_id[0], 0);
3041                 if (status != 0)
3042                         goto err;
3043         }
3044
3045         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3046                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3047                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3048         }
3049
3050         status = be_tx_qs_create(adapter);
3051         if (status)
3052                 goto err;
3053
3054         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3055
3056         if (adapter->vlans_added)
3057                 be_vid_config(adapter);
3058
3059         be_set_rx_mode(adapter->netdev);
3060
3061         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3062
3063         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3064                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3065                                         adapter->rx_fc);
3066
3067         if (be_physfn(adapter) && num_vfs) {
3068                 if (adapter->dev_num_vfs)
3069                         be_vf_setup(adapter);
3070                 else
3071                         dev_warn(dev, "device doesn't support SRIOV\n");
3072         }
3073
3074         status = be_cmd_get_phy_info(adapter);
3075         if (!status && be_pause_supported(adapter))
3076                 adapter->phy.fc_autoneg = 1;
3077
3078         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3079         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3080         return 0;
3081 err:
3082         be_clear(adapter);
3083         return status;
3084 }
3085
3086 #ifdef CONFIG_NET_POLL_CONTROLLER
3087 static void be_netpoll(struct net_device *netdev)
3088 {
3089         struct be_adapter *adapter = netdev_priv(netdev);
3090         struct be_eq_obj *eqo;
3091         int i;
3092
3093         for_all_evt_queues(adapter, eqo, i) {
3094                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3095                 napi_schedule(&eqo->napi);
3096         }
3097
3098         return;
3099 }
3100 #endif
3101
3102 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3103 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3104
3105 static bool be_flash_redboot(struct be_adapter *adapter,
3106                         const u8 *p, u32 img_start, int image_size,
3107                         int hdr_size)
3108 {
3109         u32 crc_offset;
3110         u8 flashed_crc[4];
3111         int status;
3112
3113         crc_offset = hdr_size + img_start + image_size - 4;
3114
3115         p += crc_offset;
3116
3117         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3118                         (image_size - 4));
3119         if (status) {
3120                 dev_err(&adapter->pdev->dev,
3121                 "could not get crc from flash, not flashing redboot\n");
3122                 return false;
3123         }
3124
3125         /*update redboot only if crc does not match*/
3126         if (!memcmp(flashed_crc, p, 4))
3127                 return false;
3128         else
3129                 return true;
3130 }
3131
3132 static bool phy_flashing_required(struct be_adapter *adapter)
3133 {
3134         return (adapter->phy.phy_type == TN_8022 &&
3135                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3136 }
3137
3138 static bool is_comp_in_ufi(struct be_adapter *adapter,
3139                            struct flash_section_info *fsec, int type)
3140 {
3141         int i = 0, img_type = 0;
3142         struct flash_section_info_g2 *fsec_g2 = NULL;
3143
3144         if (BE2_chip(adapter))
3145                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3146
3147         for (i = 0; i < MAX_FLASH_COMP; i++) {
3148                 if (fsec_g2)
3149                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3150                 else
3151                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3152
3153                 if (img_type == type)
3154                         return true;
3155         }
3156         return false;
3157
3158 }
3159
3160 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3161                                          int header_size,
3162                                          const struct firmware *fw)
3163 {
3164         struct flash_section_info *fsec = NULL;
3165         const u8 *p = fw->data;
3166
3167         p += header_size;
3168         while (p < (fw->data + fw->size)) {
3169                 fsec = (struct flash_section_info *)p;
3170                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3171                         return fsec;
3172                 p += 32;
3173         }
3174         return NULL;
3175 }
3176
3177 static int be_flash(struct be_adapter *adapter, const u8 *img,
3178                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3179 {
3180         u32 total_bytes = 0, flash_op, num_bytes = 0;
3181         int status = 0;
3182         struct be_cmd_write_flashrom *req = flash_cmd->va;
3183
3184         total_bytes = img_size;
3185         while (total_bytes) {
3186                 num_bytes = min_t(u32, 32*1024, total_bytes);
3187
3188                 total_bytes -= num_bytes;
3189
3190                 if (!total_bytes) {
3191                         if (optype == OPTYPE_PHY_FW)
3192                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3193                         else
3194                                 flash_op = FLASHROM_OPER_FLASH;
3195                 } else {
3196                         if (optype == OPTYPE_PHY_FW)
3197                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3198                         else
3199                                 flash_op = FLASHROM_OPER_SAVE;
3200                 }
3201
3202                 memcpy(req->data_buf, img, num_bytes);
3203                 img += num_bytes;
3204                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3205                                                 flash_op, num_bytes);
3206                 if (status) {
3207                         if (status == ILLEGAL_IOCTL_REQ &&
3208                             optype == OPTYPE_PHY_FW)
3209                                 break;
3210                         dev_err(&adapter->pdev->dev,
3211                                 "cmd to write to flash rom failed.\n");
3212                         return status;
3213                 }
3214         }
3215         return 0;
3216 }
3217
3218 /* For BE2 and BE3 */
3219 static int be_flash_BEx(struct be_adapter *adapter,
3220                          const struct firmware *fw,
3221                          struct be_dma_mem *flash_cmd,
3222                          int num_of_images)
3223
3224 {
3225         int status = 0, i, filehdr_size = 0;
3226         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3227         const u8 *p = fw->data;
3228         const struct flash_comp *pflashcomp;
3229         int num_comp, redboot;
3230         struct flash_section_info *fsec = NULL;
3231
3232         struct flash_comp gen3_flash_types[] = {
3233                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3234                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3235                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3236                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3237                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3238                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3239                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3240                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3241                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3242                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3243                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3244                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3245                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3246                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3247                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3248                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3249                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3250                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3251                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3252                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3253         };
3254
3255         struct flash_comp gen2_flash_types[] = {
3256                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3257                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3258                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3259                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3260                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3261                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3262                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3263                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3264                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3265                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3266                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3267                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3268                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3269                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3270                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3271                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3272         };
3273
3274         if (BE3_chip(adapter)) {
3275                 pflashcomp = gen3_flash_types;
3276                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3277                 num_comp = ARRAY_SIZE(gen3_flash_types);
3278         } else {
3279                 pflashcomp = gen2_flash_types;
3280                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3281                 num_comp = ARRAY_SIZE(gen2_flash_types);
3282         }
3283
3284         /* Get flash section info*/
3285         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3286         if (!fsec) {
3287                 dev_err(&adapter->pdev->dev,
3288                         "Invalid Cookie. UFI corrupted ?\n");
3289                 return -1;
3290         }
3291         for (i = 0; i < num_comp; i++) {
3292                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3293                         continue;
3294
3295                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3296                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3297                         continue;
3298
3299                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3300                     !phy_flashing_required(adapter))
3301                                 continue;
3302
3303                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3304                         redboot = be_flash_redboot(adapter, fw->data,
3305                                 pflashcomp[i].offset, pflashcomp[i].size,
3306                                 filehdr_size + img_hdrs_size);
3307                         if (!redboot)
3308                                 continue;
3309                 }
3310
3311                 p = fw->data;
3312                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3313                 if (p + pflashcomp[i].size > fw->data + fw->size)
3314                         return -1;
3315
3316                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3317                                         pflashcomp[i].size);
3318                 if (status) {
3319                         dev_err(&adapter->pdev->dev,
3320                                 "Flashing section type %d failed.\n",
3321                                 pflashcomp[i].img_type);
3322                         return status;
3323                 }
3324         }
3325         return 0;
3326 }
3327
3328 static int be_flash_skyhawk(struct be_adapter *adapter,
3329                 const struct firmware *fw,
3330                 struct be_dma_mem *flash_cmd, int num_of_images)
3331 {
3332         int status = 0, i, filehdr_size = 0;
3333         int img_offset, img_size, img_optype, redboot;
3334         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3335         const u8 *p = fw->data;
3336         struct flash_section_info *fsec = NULL;
3337
3338         filehdr_size = sizeof(struct flash_file_hdr_g3);
3339         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3340         if (!fsec) {
3341                 dev_err(&adapter->pdev->dev,
3342                         "Invalid Cookie. UFI corrupted ?\n");
3343                 return -1;
3344         }
3345
3346         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3347                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3348                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3349
3350                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3351                 case IMAGE_FIRMWARE_iSCSI:
3352                         img_optype = OPTYPE_ISCSI_ACTIVE;
3353                         break;
3354                 case IMAGE_BOOT_CODE:
3355                         img_optype = OPTYPE_REDBOOT;
3356                         break;
3357                 case IMAGE_OPTION_ROM_ISCSI:
3358                         img_optype = OPTYPE_BIOS;
3359                         break;
3360                 case IMAGE_OPTION_ROM_PXE:
3361                         img_optype = OPTYPE_PXE_BIOS;
3362                         break;
3363                 case IMAGE_OPTION_ROM_FCoE:
3364                         img_optype = OPTYPE_FCOE_BIOS;
3365                         break;
3366                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3367                         img_optype = OPTYPE_ISCSI_BACKUP;
3368                         break;
3369                 case IMAGE_NCSI:
3370                         img_optype = OPTYPE_NCSI_FW;
3371                         break;
3372                 default:
3373                         continue;
3374                 }
3375
3376                 if (img_optype == OPTYPE_REDBOOT) {
3377                         redboot = be_flash_redboot(adapter, fw->data,
3378                                         img_offset, img_size,
3379                                         filehdr_size + img_hdrs_size);
3380                         if (!redboot)
3381                                 continue;
3382                 }
3383
3384                 p = fw->data;
3385                 p += filehdr_size + img_offset + img_hdrs_size;
3386                 if (p + img_size > fw->data + fw->size)
3387                         return -1;
3388
3389                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3390                 if (status) {
3391                         dev_err(&adapter->pdev->dev,
3392                                 "Flashing section type %d failed.\n",
3393                                 fsec->fsec_entry[i].type);
3394                         return status;
3395                 }
3396         }
3397         return 0;
3398 }
3399
3400 static int lancer_wait_idle(struct be_adapter *adapter)
3401 {
3402 #define SLIPORT_IDLE_TIMEOUT 30
3403         u32 reg_val;
3404         int status = 0, i;
3405
3406         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3407                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3408                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3409                         break;
3410
3411                 ssleep(1);
3412         }
3413
3414         if (i == SLIPORT_IDLE_TIMEOUT)
3415                 status = -1;
3416
3417         return status;
3418 }
3419
3420 static int lancer_fw_reset(struct be_adapter *adapter)
3421 {
3422         int status = 0;
3423
3424         status = lancer_wait_idle(adapter);
3425         if (status)
3426                 return status;
3427
3428         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3429                   PHYSDEV_CONTROL_OFFSET);
3430
3431         return status;
3432 }
3433
3434 static int lancer_fw_download(struct be_adapter *adapter,
3435                                 const struct firmware *fw)
3436 {
3437 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3438 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3439         struct be_dma_mem flash_cmd;
3440         const u8 *data_ptr = NULL;
3441         u8 *dest_image_ptr = NULL;
3442         size_t image_size = 0;
3443         u32 chunk_size = 0;
3444         u32 data_written = 0;
3445         u32 offset = 0;
3446         int status = 0;
3447         u8 add_status = 0;
3448         u8 change_status;
3449
3450         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3451                 dev_err(&adapter->pdev->dev,
3452                         "FW Image not properly aligned. "
3453                         "Length must be 4 byte aligned.\n");
3454                 status = -EINVAL;
3455                 goto lancer_fw_exit;
3456         }
3457
3458         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3459                                 + LANCER_FW_DOWNLOAD_CHUNK;
3460         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3461                                                 &flash_cmd.dma, GFP_KERNEL);
3462         if (!flash_cmd.va) {
3463                 status = -ENOMEM;
3464                 dev_err(&adapter->pdev->dev,
3465                         "Memory allocation failure while flashing\n");
3466                 goto lancer_fw_exit;
3467         }
3468
3469         dest_image_ptr = flash_cmd.va +
3470                                 sizeof(struct lancer_cmd_req_write_object);
3471         image_size = fw->size;
3472         data_ptr = fw->data;
3473
3474         while (image_size) {
3475                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3476
3477                 /* Copy the image chunk content. */
3478                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3479
3480                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3481                                                  chunk_size, offset,
3482                                                  LANCER_FW_DOWNLOAD_LOCATION,
3483                                                  &data_written, &change_status,
3484                                                  &add_status);
3485                 if (status)
3486                         break;
3487
3488                 offset += data_written;
3489                 data_ptr += data_written;
3490                 image_size -= data_written;
3491         }
3492
3493         if (!status) {
3494                 /* Commit the FW written */
3495                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3496                                                  0, offset,
3497                                                  LANCER_FW_DOWNLOAD_LOCATION,
3498                                                  &data_written, &change_status,
3499                                                  &add_status);
3500         }
3501
3502         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3503                                 flash_cmd.dma);
3504         if (status) {
3505                 dev_err(&adapter->pdev->dev,
3506                         "Firmware load error. "
3507                         "Status code: 0x%x Additional Status: 0x%x\n",
3508                         status, add_status);
3509                 goto lancer_fw_exit;
3510         }
3511
3512         if (change_status == LANCER_FW_RESET_NEEDED) {
3513                 status = lancer_fw_reset(adapter);
3514                 if (status) {
3515                         dev_err(&adapter->pdev->dev,
3516                                 "Adapter busy for FW reset.\n"
3517                                 "New FW will not be active.\n");
3518                         goto lancer_fw_exit;
3519                 }
3520         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3521                         dev_err(&adapter->pdev->dev,
3522                                 "System reboot required for new FW"
3523                                 " to be active\n");
3524         }
3525
3526         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3527 lancer_fw_exit:
3528         return status;
3529 }
3530
3531 #define UFI_TYPE2               2
3532 #define UFI_TYPE3               3
3533 #define UFI_TYPE4               4
3534 static int be_get_ufi_type(struct be_adapter *adapter,
3535                            struct flash_file_hdr_g2 *fhdr)
3536 {
3537         if (fhdr == NULL)
3538                 goto be_get_ufi_exit;
3539
3540         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3541                 return UFI_TYPE4;
3542         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3543                 return UFI_TYPE3;
3544         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3545                 return UFI_TYPE2;
3546
3547 be_get_ufi_exit:
3548         dev_err(&adapter->pdev->dev,
3549                 "UFI and Interface are not compatible for flashing\n");
3550         return -1;
3551 }
3552
3553 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3554 {
3555         struct flash_file_hdr_g2 *fhdr;
3556         struct flash_file_hdr_g3 *fhdr3;
3557         struct image_hdr *img_hdr_ptr = NULL;
3558         struct be_dma_mem flash_cmd;
3559         const u8 *p;
3560         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3561
3562         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3563         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3564                                           &flash_cmd.dma, GFP_KERNEL);
3565         if (!flash_cmd.va) {
3566                 status = -ENOMEM;
3567                 dev_err(&adapter->pdev->dev,
3568                         "Memory allocation failure while flashing\n");
3569                 goto be_fw_exit;
3570         }
3571
3572         p = fw->data;
3573         fhdr = (struct flash_file_hdr_g2 *)p;
3574
3575         ufi_type = be_get_ufi_type(adapter, fhdr);
3576
3577         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3578         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3579         for (i = 0; i < num_imgs; i++) {
3580                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3581                                 (sizeof(struct flash_file_hdr_g3) +
3582                                  i * sizeof(struct image_hdr)));
3583                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3584                         if (ufi_type == UFI_TYPE4)
3585                                 status = be_flash_skyhawk(adapter, fw,
3586                                                         &flash_cmd, num_imgs);
3587                         else if (ufi_type == UFI_TYPE3)
3588                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3589                                                       num_imgs);
3590                 }
3591         }
3592
3593         if (ufi_type == UFI_TYPE2)
3594                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3595         else if (ufi_type == -1)
3596                 status = -1;
3597
3598         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3599                           flash_cmd.dma);
3600         if (status) {
3601                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3602                 goto be_fw_exit;
3603         }
3604
3605         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3606
3607 be_fw_exit:
3608         return status;
3609 }
3610
3611 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3612 {
3613         const struct firmware *fw;
3614         int status;
3615
3616         if (!netif_running(adapter->netdev)) {
3617                 dev_err(&adapter->pdev->dev,
3618                         "Firmware load not allowed (interface is down)\n");
3619                 return -1;
3620         }
3621
3622         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3623         if (status)
3624                 goto fw_exit;
3625
3626         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3627
3628         if (lancer_chip(adapter))
3629                 status = lancer_fw_download(adapter, fw);
3630         else
3631                 status = be_fw_download(adapter, fw);
3632
3633 fw_exit:
3634         release_firmware(fw);
3635         return status;
3636 }
3637
3638 static const struct net_device_ops be_netdev_ops = {
3639         .ndo_open               = be_open,
3640         .ndo_stop               = be_close,
3641         .ndo_start_xmit         = be_xmit,
3642         .ndo_set_rx_mode        = be_set_rx_mode,
3643         .ndo_set_mac_address    = be_mac_addr_set,
3644         .ndo_change_mtu         = be_change_mtu,
3645         .ndo_get_stats64        = be_get_stats64,
3646         .ndo_validate_addr      = eth_validate_addr,
3647         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3648         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3649         .ndo_set_vf_mac         = be_set_vf_mac,
3650         .ndo_set_vf_vlan        = be_set_vf_vlan,
3651         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3652         .ndo_get_vf_config      = be_get_vf_config,
3653 #ifdef CONFIG_NET_POLL_CONTROLLER
3654         .ndo_poll_controller    = be_netpoll,
3655 #endif
3656 };
3657
3658 static void be_netdev_init(struct net_device *netdev)
3659 {
3660         struct be_adapter *adapter = netdev_priv(netdev);
3661         struct be_eq_obj *eqo;
3662         int i;
3663
3664         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3665                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3666                 NETIF_F_HW_VLAN_TX;
3667         if (be_multi_rxq(adapter))
3668                 netdev->hw_features |= NETIF_F_RXHASH;
3669
3670         netdev->features |= netdev->hw_features |
3671                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3672
3673         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3674                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3675
3676         netdev->priv_flags |= IFF_UNICAST_FLT;
3677
3678         netdev->flags |= IFF_MULTICAST;
3679
3680         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3681
3682         netdev->netdev_ops = &be_netdev_ops;
3683
3684         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3685
3686         for_all_evt_queues(adapter, eqo, i)
3687                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3688 }
3689
3690 static void be_unmap_pci_bars(struct be_adapter *adapter)
3691 {
3692         if (adapter->csr)
3693                 pci_iounmap(adapter->pdev, adapter->csr);
3694         if (adapter->db)
3695                 pci_iounmap(adapter->pdev, adapter->db);
3696 }
3697
3698 static int db_bar(struct be_adapter *adapter)
3699 {
3700         if (lancer_chip(adapter) || !be_physfn(adapter))
3701                 return 0;
3702         else
3703                 return 4;
3704 }
3705
3706 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3707 {
3708         if (skyhawk_chip(adapter)) {
3709                 adapter->roce_db.size = 4096;
3710                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3711                                                               db_bar(adapter));
3712                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3713                                                                db_bar(adapter));
3714         }
3715         return 0;
3716 }
3717
3718 static int be_map_pci_bars(struct be_adapter *adapter)
3719 {
3720         u8 __iomem *addr;
3721         u32 sli_intf;
3722
3723         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3724         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3725                                 SLI_INTF_IF_TYPE_SHIFT;
3726
3727         if (BEx_chip(adapter) && be_physfn(adapter)) {
3728                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3729                 if (adapter->csr == NULL)
3730                         return -ENOMEM;
3731         }
3732
3733         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3734         if (addr == NULL)
3735                 goto pci_map_err;
3736         adapter->db = addr;
3737
3738         be_roce_map_pci_bars(adapter);
3739         return 0;
3740
3741 pci_map_err:
3742         be_unmap_pci_bars(adapter);
3743         return -ENOMEM;
3744 }
3745
3746 static void be_ctrl_cleanup(struct be_adapter *adapter)
3747 {
3748         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3749
3750         be_unmap_pci_bars(adapter);
3751
3752         if (mem->va)
3753                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3754                                   mem->dma);
3755
3756         mem = &adapter->rx_filter;
3757         if (mem->va)
3758                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3759                                   mem->dma);
3760 }
3761
3762 static int be_ctrl_init(struct be_adapter *adapter)
3763 {
3764         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3765         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3766         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3767         u32 sli_intf;
3768         int status;
3769
3770         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3772                                  SLI_INTF_FAMILY_SHIFT;
3773         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3774
3775         status = be_map_pci_bars(adapter);
3776         if (status)
3777                 goto done;
3778
3779         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3780         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3781                                                 mbox_mem_alloc->size,
3782                                                 &mbox_mem_alloc->dma,
3783                                                 GFP_KERNEL);
3784         if (!mbox_mem_alloc->va) {
3785                 status = -ENOMEM;
3786                 goto unmap_pci_bars;
3787         }
3788         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3789         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3790         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3791         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3792
3793         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3794         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3795                                         &rx_filter->dma, GFP_KERNEL);
3796         if (rx_filter->va == NULL) {
3797                 status = -ENOMEM;
3798                 goto free_mbox;
3799         }
3800         memset(rx_filter->va, 0, rx_filter->size);
3801         mutex_init(&adapter->mbox_lock);
3802         spin_lock_init(&adapter->mcc_lock);
3803         spin_lock_init(&adapter->mcc_cq_lock);
3804
3805         init_completion(&adapter->flash_compl);
3806         pci_save_state(adapter->pdev);
3807         return 0;
3808
3809 free_mbox:
3810         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3811                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3812
3813 unmap_pci_bars:
3814         be_unmap_pci_bars(adapter);
3815
3816 done:
3817         return status;
3818 }
3819
3820 static void be_stats_cleanup(struct be_adapter *adapter)
3821 {
3822         struct be_dma_mem *cmd = &adapter->stats_cmd;
3823
3824         if (cmd->va)
3825                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3826                                   cmd->va, cmd->dma);
3827 }
3828
3829 static int be_stats_init(struct be_adapter *adapter)
3830 {
3831         struct be_dma_mem *cmd = &adapter->stats_cmd;
3832
3833         if (lancer_chip(adapter))
3834                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3835         else if (BE2_chip(adapter))
3836                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3837         else
3838                 /* BE3 and Skyhawk */
3839                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3840
3841         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3842                                      GFP_KERNEL);
3843         if (cmd->va == NULL)
3844                 return -1;
3845         memset(cmd->va, 0, cmd->size);
3846         return 0;
3847 }
3848
3849 static void be_remove(struct pci_dev *pdev)
3850 {
3851         struct be_adapter *adapter = pci_get_drvdata(pdev);
3852
3853         if (!adapter)
3854                 return;
3855
3856         be_roce_dev_remove(adapter);
3857
3858         cancel_delayed_work_sync(&adapter->func_recovery_work);
3859
3860         unregister_netdev(adapter->netdev);
3861
3862         be_clear(adapter);
3863
3864         /* tell fw we're done with firing cmds */
3865         be_cmd_fw_clean(adapter);
3866
3867         be_stats_cleanup(adapter);
3868
3869         be_ctrl_cleanup(adapter);
3870
3871         pci_disable_pcie_error_reporting(pdev);
3872
3873         pci_set_drvdata(pdev, NULL);
3874         pci_release_regions(pdev);
3875         pci_disable_device(pdev);
3876
3877         free_netdev(adapter->netdev);
3878 }
3879
3880 bool be_is_wol_supported(struct be_adapter *adapter)
3881 {
3882         return ((adapter->wol_cap & BE_WOL_CAP) &&
3883                 !be_is_wol_excluded(adapter)) ? true : false;
3884 }
3885
3886 u32 be_get_fw_log_level(struct be_adapter *adapter)
3887 {
3888         struct be_dma_mem extfat_cmd;
3889         struct be_fat_conf_params *cfgs;
3890         int status;
3891         u32 level = 0;
3892         int j;
3893
3894         if (lancer_chip(adapter))
3895                 return 0;
3896
3897         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3898         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3899         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3900                                              &extfat_cmd.dma);
3901
3902         if (!extfat_cmd.va) {
3903                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3904                         __func__);
3905                 goto err;
3906         }
3907
3908         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3909         if (!status) {
3910                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3911                                                 sizeof(struct be_cmd_resp_hdr));
3912                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3913                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3914                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3915                 }
3916         }
3917         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3918                             extfat_cmd.dma);
3919 err:
3920         return level;
3921 }
3922
3923 static int be_get_initial_config(struct be_adapter *adapter)
3924 {
3925         int status;
3926         u32 level;
3927
3928         status = be_cmd_get_cntl_attributes(adapter);
3929         if (status)
3930                 return status;
3931
3932         status = be_cmd_get_acpi_wol_cap(adapter);
3933         if (status) {
3934                 /* in case of a failure to get wol capabillities
3935                  * check the exclusion list to determine WOL capability */
3936                 if (!be_is_wol_excluded(adapter))
3937                         adapter->wol_cap |= BE_WOL_CAP;
3938         }
3939
3940         if (be_is_wol_supported(adapter))
3941                 adapter->wol = true;
3942
3943         /* Must be a power of 2 or else MODULO will BUG_ON */
3944         adapter->be_get_temp_freq = 64;
3945
3946         level = be_get_fw_log_level(adapter);
3947         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3948
3949         return 0;
3950 }
3951
3952 static int lancer_recover_func(struct be_adapter *adapter)
3953 {
3954         int status;
3955
3956         status = lancer_test_and_set_rdy_state(adapter);
3957         if (status)
3958                 goto err;
3959
3960         if (netif_running(adapter->netdev))
3961                 be_close(adapter->netdev);
3962
3963         be_clear(adapter);
3964
3965         adapter->hw_error = false;
3966         adapter->fw_timeout = false;
3967
3968         status = be_setup(adapter);
3969         if (status)
3970                 goto err;
3971
3972         if (netif_running(adapter->netdev)) {
3973                 status = be_open(adapter->netdev);
3974                 if (status)
3975                         goto err;
3976         }
3977
3978         dev_err(&adapter->pdev->dev,
3979                 "Adapter SLIPORT recovery succeeded\n");
3980         return 0;
3981 err:
3982         if (adapter->eeh_error)
3983                 dev_err(&adapter->pdev->dev,
3984                         "Adapter SLIPORT recovery failed\n");
3985
3986         return status;
3987 }
3988
3989 static void be_func_recovery_task(struct work_struct *work)
3990 {
3991         struct be_adapter *adapter =
3992                 container_of(work, struct be_adapter,  func_recovery_work.work);
3993         int status;
3994
3995         be_detect_error(adapter);
3996
3997         if (adapter->hw_error && lancer_chip(adapter)) {
3998
3999                 if (adapter->eeh_error)
4000                         goto out;
4001
4002                 rtnl_lock();
4003                 netif_device_detach(adapter->netdev);
4004                 rtnl_unlock();
4005
4006                 status = lancer_recover_func(adapter);
4007
4008                 if (!status)
4009                         netif_device_attach(adapter->netdev);
4010         }
4011
4012 out:
4013         schedule_delayed_work(&adapter->func_recovery_work,
4014                               msecs_to_jiffies(1000));
4015 }
4016
4017 static void be_worker(struct work_struct *work)
4018 {
4019         struct be_adapter *adapter =
4020                 container_of(work, struct be_adapter, work.work);
4021         struct be_rx_obj *rxo;
4022         struct be_eq_obj *eqo;
4023         int i;
4024
4025         /* when interrupts are not yet enabled, just reap any pending
4026         * mcc completions */
4027         if (!netif_running(adapter->netdev)) {
4028                 local_bh_disable();
4029                 be_process_mcc(adapter);
4030                 local_bh_enable();
4031                 goto reschedule;
4032         }
4033
4034         if (!adapter->stats_cmd_sent) {
4035                 if (lancer_chip(adapter))
4036                         lancer_cmd_get_pport_stats(adapter,
4037                                                 &adapter->stats_cmd);
4038                 else
4039                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4040         }
4041
4042         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4043                 be_cmd_get_die_temperature(adapter);
4044
4045         for_all_rx_queues(adapter, rxo, i) {
4046                 if (rxo->rx_post_starved) {
4047                         rxo->rx_post_starved = false;
4048                         be_post_rx_frags(rxo, GFP_KERNEL);
4049                 }
4050         }
4051
4052         for_all_evt_queues(adapter, eqo, i)
4053                 be_eqd_update(adapter, eqo);
4054
4055 reschedule:
4056         adapter->work_counter++;
4057         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4058 }
4059
4060 static bool be_reset_required(struct be_adapter *adapter)
4061 {
4062         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4063 }
4064
4065 static char *mc_name(struct be_adapter *adapter)
4066 {
4067         if (adapter->function_mode & FLEX10_MODE)
4068                 return "FLEX10";
4069         else if (adapter->function_mode & VNIC_MODE)
4070                 return "vNIC";
4071         else if (adapter->function_mode & UMC_ENABLED)
4072                 return "UMC";
4073         else
4074                 return "";
4075 }
4076
4077 static inline char *func_name(struct be_adapter *adapter)
4078 {
4079         return be_physfn(adapter) ? "PF" : "VF";
4080 }
4081
4082 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4083 {
4084         int status = 0;
4085         struct be_adapter *adapter;
4086         struct net_device *netdev;
4087         char port_name;
4088
4089         status = pci_enable_device(pdev);
4090         if (status)
4091                 goto do_none;
4092
4093         status = pci_request_regions(pdev, DRV_NAME);
4094         if (status)
4095                 goto disable_dev;
4096         pci_set_master(pdev);
4097
4098         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4099         if (netdev == NULL) {
4100                 status = -ENOMEM;
4101                 goto rel_reg;
4102         }
4103         adapter = netdev_priv(netdev);
4104         adapter->pdev = pdev;
4105         pci_set_drvdata(pdev, adapter);
4106         adapter->netdev = netdev;
4107         SET_NETDEV_DEV(netdev, &pdev->dev);
4108
4109         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4110         if (!status) {
4111                 netdev->features |= NETIF_F_HIGHDMA;
4112         } else {
4113                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4114                 if (status) {
4115                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4116                         goto free_netdev;
4117                 }
4118         }
4119
4120         status = pci_enable_pcie_error_reporting(pdev);
4121         if (status)
4122                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4123
4124         status = be_ctrl_init(adapter);
4125         if (status)
4126                 goto free_netdev;
4127
4128         /* sync up with fw's ready state */
4129         if (be_physfn(adapter)) {
4130                 status = be_fw_wait_ready(adapter);
4131                 if (status)
4132                         goto ctrl_clean;
4133         }
4134
4135         /* tell fw we're ready to fire cmds */
4136         status = be_cmd_fw_init(adapter);
4137         if (status)
4138                 goto ctrl_clean;
4139
4140         if (be_reset_required(adapter)) {
4141                 status = be_cmd_reset_function(adapter);
4142                 if (status)
4143                         goto ctrl_clean;
4144         }
4145
4146         /* The INTR bit may be set in the card when probed by a kdump kernel
4147          * after a crash.
4148          */
4149         if (!lancer_chip(adapter))
4150                 be_intr_set(adapter, false);
4151
4152         status = be_stats_init(adapter);
4153         if (status)
4154                 goto ctrl_clean;
4155
4156         status = be_get_initial_config(adapter);
4157         if (status)
4158                 goto stats_clean;
4159
4160         INIT_DELAYED_WORK(&adapter->work, be_worker);
4161         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4162         adapter->rx_fc = adapter->tx_fc = true;
4163
4164         status = be_setup(adapter);
4165         if (status)
4166                 goto stats_clean;
4167
4168         be_netdev_init(netdev);
4169         status = register_netdev(netdev);
4170         if (status != 0)
4171                 goto unsetup;
4172
4173         be_roce_dev_add(adapter);
4174
4175         schedule_delayed_work(&adapter->func_recovery_work,
4176                               msecs_to_jiffies(1000));
4177
4178         be_cmd_query_port_name(adapter, &port_name);
4179
4180         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4181                  func_name(adapter), mc_name(adapter), port_name);
4182
4183         return 0;
4184
4185 unsetup:
4186         be_clear(adapter);
4187 stats_clean:
4188         be_stats_cleanup(adapter);
4189 ctrl_clean:
4190         be_ctrl_cleanup(adapter);
4191 free_netdev:
4192         free_netdev(netdev);
4193         pci_set_drvdata(pdev, NULL);
4194 rel_reg:
4195         pci_release_regions(pdev);
4196 disable_dev:
4197         pci_disable_device(pdev);
4198 do_none:
4199         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4200         return status;
4201 }
4202
4203 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4204 {
4205         struct be_adapter *adapter = pci_get_drvdata(pdev);
4206         struct net_device *netdev =  adapter->netdev;
4207
4208         if (adapter->wol)
4209                 be_setup_wol(adapter, true);
4210
4211         cancel_delayed_work_sync(&adapter->func_recovery_work);
4212
4213         netif_device_detach(netdev);
4214         if (netif_running(netdev)) {
4215                 rtnl_lock();
4216                 be_close(netdev);
4217                 rtnl_unlock();
4218         }
4219         be_clear(adapter);
4220
4221         pci_save_state(pdev);
4222         pci_disable_device(pdev);
4223         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4224         return 0;
4225 }
4226
4227 static int be_resume(struct pci_dev *pdev)
4228 {
4229         int status = 0;
4230         struct be_adapter *adapter = pci_get_drvdata(pdev);
4231         struct net_device *netdev =  adapter->netdev;
4232
4233         netif_device_detach(netdev);
4234
4235         status = pci_enable_device(pdev);
4236         if (status)
4237                 return status;
4238
4239         pci_set_power_state(pdev, 0);
4240         pci_restore_state(pdev);
4241
4242         /* tell fw we're ready to fire cmds */
4243         status = be_cmd_fw_init(adapter);
4244         if (status)
4245                 return status;
4246
4247         be_setup(adapter);
4248         if (netif_running(netdev)) {
4249                 rtnl_lock();
4250                 be_open(netdev);
4251                 rtnl_unlock();
4252         }
4253
4254         schedule_delayed_work(&adapter->func_recovery_work,
4255                               msecs_to_jiffies(1000));
4256         netif_device_attach(netdev);
4257
4258         if (adapter->wol)
4259                 be_setup_wol(adapter, false);
4260
4261         return 0;
4262 }
4263
4264 /*
4265  * An FLR will stop BE from DMAing any data.
4266  */
4267 static void be_shutdown(struct pci_dev *pdev)
4268 {
4269         struct be_adapter *adapter = pci_get_drvdata(pdev);
4270
4271         if (!adapter)
4272                 return;
4273
4274         cancel_delayed_work_sync(&adapter->work);
4275         cancel_delayed_work_sync(&adapter->func_recovery_work);
4276
4277         netif_device_detach(adapter->netdev);
4278
4279         be_cmd_reset_function(adapter);
4280
4281         pci_disable_device(pdev);
4282 }
4283
4284 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4285                                 pci_channel_state_t state)
4286 {
4287         struct be_adapter *adapter = pci_get_drvdata(pdev);
4288         struct net_device *netdev =  adapter->netdev;
4289
4290         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4291
4292         adapter->eeh_error = true;
4293
4294         cancel_delayed_work_sync(&adapter->func_recovery_work);
4295
4296         rtnl_lock();
4297         netif_device_detach(netdev);
4298         rtnl_unlock();
4299
4300         if (netif_running(netdev)) {
4301                 rtnl_lock();
4302                 be_close(netdev);
4303                 rtnl_unlock();
4304         }
4305         be_clear(adapter);
4306
4307         if (state == pci_channel_io_perm_failure)
4308                 return PCI_ERS_RESULT_DISCONNECT;
4309
4310         pci_disable_device(pdev);
4311
4312         /* The error could cause the FW to trigger a flash debug dump.
4313          * Resetting the card while flash dump is in progress
4314          * can cause it not to recover; wait for it to finish.
4315          * Wait only for first function as it is needed only once per
4316          * adapter.
4317          */
4318         if (pdev->devfn == 0)
4319                 ssleep(30);
4320
4321         return PCI_ERS_RESULT_NEED_RESET;
4322 }
4323
4324 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4325 {
4326         struct be_adapter *adapter = pci_get_drvdata(pdev);
4327         int status;
4328
4329         dev_info(&adapter->pdev->dev, "EEH reset\n");
4330         be_clear_all_error(adapter);
4331
4332         status = pci_enable_device(pdev);
4333         if (status)
4334                 return PCI_ERS_RESULT_DISCONNECT;
4335
4336         pci_set_master(pdev);
4337         pci_set_power_state(pdev, 0);
4338         pci_restore_state(pdev);
4339
4340         /* Check if card is ok and fw is ready */
4341         dev_info(&adapter->pdev->dev,
4342                  "Waiting for FW to be ready after EEH reset\n");
4343         status = be_fw_wait_ready(adapter);
4344         if (status)
4345                 return PCI_ERS_RESULT_DISCONNECT;
4346
4347         pci_cleanup_aer_uncorrect_error_status(pdev);
4348         return PCI_ERS_RESULT_RECOVERED;
4349 }
4350
4351 static void be_eeh_resume(struct pci_dev *pdev)
4352 {
4353         int status = 0;
4354         struct be_adapter *adapter = pci_get_drvdata(pdev);
4355         struct net_device *netdev =  adapter->netdev;
4356
4357         dev_info(&adapter->pdev->dev, "EEH resume\n");
4358
4359         pci_save_state(pdev);
4360
4361         /* tell fw we're ready to fire cmds */
4362         status = be_cmd_fw_init(adapter);
4363         if (status)
4364                 goto err;
4365
4366         status = be_cmd_reset_function(adapter);
4367         if (status)
4368                 goto err;
4369
4370         status = be_setup(adapter);
4371         if (status)
4372                 goto err;
4373
4374         if (netif_running(netdev)) {
4375                 status = be_open(netdev);
4376                 if (status)
4377                         goto err;
4378         }
4379
4380         schedule_delayed_work(&adapter->func_recovery_work,
4381                               msecs_to_jiffies(1000));
4382         netif_device_attach(netdev);
4383         return;
4384 err:
4385         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4386 }
4387
4388 static const struct pci_error_handlers be_eeh_handlers = {
4389         .error_detected = be_eeh_err_detected,
4390         .slot_reset = be_eeh_reset,
4391         .resume = be_eeh_resume,
4392 };
4393
4394 static struct pci_driver be_driver = {
4395         .name = DRV_NAME,
4396         .id_table = be_dev_ids,
4397         .probe = be_probe,
4398         .remove = be_remove,
4399         .suspend = be_suspend,
4400         .resume = be_resume,
4401         .shutdown = be_shutdown,
4402         .err_handler = &be_eeh_handlers
4403 };
4404
4405 static int __init be_init_module(void)
4406 {
4407         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4408             rx_frag_size != 2048) {
4409                 printk(KERN_WARNING DRV_NAME
4410                         " : Module param rx_frag_size must be 2048/4096/8192."
4411                         " Using 2048\n");
4412                 rx_frag_size = 2048;
4413         }
4414
4415         return pci_register_driver(&be_driver);
4416 }
4417 module_init(be_init_module);
4418
4419 static void __exit be_exit_module(void)
4420 {
4421         pci_unregister_driver(&be_driver);
4422 }
4423 module_exit(be_exit_module);