be2net: fix vfs enumeration
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static const char * const ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static const char * const ue_status_hi_desc[] = {
87         "LPCMEMHOST",
88         "MGMT_MAC",
89         "PCS0ONLINE",
90         "MPU_IRAM",
91         "PCS1ONLINE",
92         "PCTL0",
93         "PCTL1",
94         "PMEM",
95         "RR",
96         "TXPB",
97         "RXPP",
98         "XAUI",
99         "TXP",
100         "ARM",
101         "IPC",
102         "HOST2",
103         "HOST3",
104         "HOST4",
105         "HOST5",
106         "HOST6",
107         "HOST7",
108         "HOST8",
109         "HOST9",
110         "NETC",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown"
119 };
120
121 /* Is BE in a multi-channel mode */
122 static inline bool be_is_mc(struct be_adapter *adapter) {
123         return (adapter->function_mode & FLEX10_MODE ||
124                 adapter->function_mode & VNIC_MODE ||
125                 adapter->function_mode & UMC_ENABLED);
126 }
127
128 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131         if (mem->va) {
132                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133                                   mem->dma);
134                 mem->va = NULL;
135         }
136 }
137
138 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139                 u16 len, u16 entry_size)
140 {
141         struct be_dma_mem *mem = &q->dma_mem;
142
143         memset(q, 0, sizeof(*q));
144         q->len = len;
145         q->entry_size = entry_size;
146         mem->size = len * entry_size;
147         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148                                      GFP_KERNEL);
149         if (!mem->va)
150                 return -ENOMEM;
151         memset(mem->va, 0, mem->size);
152         return 0;
153 }
154
155 static void be_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         if (adapter->eeh_error)
160                 return;
161
162         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163                                 &reg);
164         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
166         if (!enabled && enable)
167                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168         else if (enabled && !enable)
169                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170         else
171                 return;
172
173         pci_write_config_dword(adapter->pdev,
174                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
175 }
176
177 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 {
179         u32 val = 0;
180         val |= qid & DB_RQ_RING_ID_MASK;
181         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
182
183         wmb();
184         iowrite32(val, adapter->db + DB_RQ_OFFSET);
185 }
186
187 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_TXULP_RING_ID_MASK;
191         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
195 }
196
197 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
198                 bool arm, bool clear_int, u16 num_popped)
199 {
200         u32 val = 0;
201         val |= qid & DB_EQ_RING_ID_MASK;
202         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
204
205         if (adapter->eeh_error)
206                 return;
207
208         if (arm)
209                 val |= 1 << DB_EQ_REARM_SHIFT;
210         if (clear_int)
211                 val |= 1 << DB_EQ_CLR_SHIFT;
212         val |= 1 << DB_EQ_EVNT_SHIFT;
213         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_EQ_OFFSET);
215 }
216
217 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
218 {
219         u32 val = 0;
220         val |= qid & DB_CQ_RING_ID_MASK;
221         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
223
224         if (adapter->eeh_error)
225                 return;
226
227         if (arm)
228                 val |= 1 << DB_CQ_REARM_SHIFT;
229         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
230         iowrite32(val, adapter->db + DB_CQ_OFFSET);
231 }
232
233 static int be_mac_addr_set(struct net_device *netdev, void *p)
234 {
235         struct be_adapter *adapter = netdev_priv(netdev);
236         struct sockaddr *addr = p;
237         int status = 0;
238         u8 current_mac[ETH_ALEN];
239         u32 pmac_id = adapter->pmac_id[0];
240
241         if (!is_valid_ether_addr(addr->sa_data))
242                 return -EADDRNOTAVAIL;
243
244         status = be_cmd_mac_addr_query(adapter, current_mac, false,
245                                        adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561         wrb->rsvd0 = 0;
562 }
563
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565                                         struct sk_buff *skb)
566 {
567         u8 vlan_prio;
568         u16 vlan_tag;
569
570         vlan_tag = vlan_tx_tag_get(skb);
571         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572         /* If vlan priority provided by OS is NOT in available bmap */
573         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575                                 adapter->recommended_prio;
576
577         return vlan_tag;
578 }
579
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581 {
582         return vlan_tx_tag_present(skb) || adapter->pvid;
583 }
584
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
587 {
588         u16 vlan_tag;
589
590         memset(hdr, 0, sizeof(*hdr));
591
592         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
594         if (skb_is_gso(skb)) {
595                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597                         hdr, skb_shinfo(skb)->gso_size);
598                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600                 if (lancer_chip(adapter) && adapter->sli_family  ==
601                                                         LANCER_A0_SLI_FAMILY) {
602                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603                         if (is_tcp_pkt(skb))
604                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605                                                                 tcpcs, hdr, 1);
606                         else if (is_udp_pkt(skb))
607                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608                                                                 udpcs, hdr, 1);
609                 }
610         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611                 if (is_tcp_pkt(skb))
612                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613                 else if (is_udp_pkt(skb))
614                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615         }
616
617         if (vlan_tx_tag_present(skb)) {
618                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
621         }
622
623         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627 }
628
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
630                 bool unmap_single)
631 {
632         dma_addr_t dma;
633
634         be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
637         if (wrb->frag_len) {
638                 if (unmap_single)
639                         dma_unmap_single(dev, dma, wrb->frag_len,
640                                          DMA_TO_DEVICE);
641                 else
642                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
643         }
644 }
645
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648 {
649         dma_addr_t busaddr;
650         int i, copied = 0;
651         struct device *dev = &adapter->pdev->dev;
652         struct sk_buff *first_skb = skb;
653         struct be_eth_wrb *wrb;
654         struct be_eth_hdr_wrb *hdr;
655         bool map_single = false;
656         u16 map_head;
657
658         hdr = queue_head_node(txq);
659         queue_head_inc(txq);
660         map_head = txq->head;
661
662         if (skb->len > skb->data_len) {
663                 int len = skb_headlen(skb);
664                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665                 if (dma_mapping_error(dev, busaddr))
666                         goto dma_err;
667                 map_single = true;
668                 wrb = queue_head_node(txq);
669                 wrb_fill(wrb, busaddr, len);
670                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671                 queue_head_inc(txq);
672                 copied += len;
673         }
674
675         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676                 const struct skb_frag_struct *frag =
677                         &skb_shinfo(skb)->frags[i];
678                 busaddr = skb_frag_dma_map(dev, frag, 0,
679                                            skb_frag_size(frag), DMA_TO_DEVICE);
680                 if (dma_mapping_error(dev, busaddr))
681                         goto dma_err;
682                 wrb = queue_head_node(txq);
683                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
684                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685                 queue_head_inc(txq);
686                 copied += skb_frag_size(frag);
687         }
688
689         if (dummy_wrb) {
690                 wrb = queue_head_node(txq);
691                 wrb_fill(wrb, 0, 0);
692                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693                 queue_head_inc(txq);
694         }
695
696         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697         be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699         return copied;
700 dma_err:
701         txq->head = map_head;
702         while (copied) {
703                 wrb = queue_head_node(txq);
704                 unmap_tx_frag(dev, wrb, map_single);
705                 map_single = false;
706                 copied -= wrb->frag_len;
707                 queue_head_inc(txq);
708         }
709         return 0;
710 }
711
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713                                              struct sk_buff *skb)
714 {
715         u16 vlan_tag = 0;
716
717         skb = skb_share_check(skb, GFP_ATOMIC);
718         if (unlikely(!skb))
719                 return skb;
720
721         if (vlan_tx_tag_present(skb)) {
722                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723                 __vlan_put_tag(skb, vlan_tag);
724                 skb->vlan_tci = 0;
725         }
726
727         return skb;
728 }
729
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731                         struct net_device *netdev)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735         struct be_queue_info *txq = &txo->q;
736         struct iphdr *ip = NULL;
737         u32 wrb_cnt = 0, copied = 0;
738         u32 start = txq->head, eth_hdr_len;
739         bool dummy_wrb, stopped = false;
740
741         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742                 VLAN_ETH_HLEN : ETH_HLEN;
743
744         /* HW has a bug which considers padding bytes as legal
745          * and modifies the IPv4 hdr's 'tot_len' field
746          */
747         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748                         is_ipv4_pkt(skb)) {
749                 ip = (struct iphdr *)ip_hdr(skb);
750                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751         }
752
753         /* HW has a bug wherein it will calculate CSUM for VLAN
754          * pkts even though it is disabled.
755          * Manually insert VLAN in pkt.
756          */
757         if (skb->ip_summed != CHECKSUM_PARTIAL &&
758                         be_vlan_tag_chk(adapter, skb)) {
759                 skb = be_insert_vlan_in_pkt(adapter, skb);
760                 if (unlikely(!skb))
761                         goto tx_drop;
762         }
763
764         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
765
766         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
767         if (copied) {
768                 int gso_segs = skb_shinfo(skb)->gso_segs;
769
770                 /* record the sent skb in the sent_skb table */
771                 BUG_ON(txo->sent_skb_list[start]);
772                 txo->sent_skb_list[start] = skb;
773
774                 /* Ensure txq has space for the next skb; Else stop the queue
775                  * *BEFORE* ringing the tx doorbell, so that we serialze the
776                  * tx compls of the current transmit which'll wake up the queue
777                  */
778                 atomic_add(wrb_cnt, &txq->used);
779                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780                                                                 txq->len) {
781                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782                         stopped = true;
783                 }
784
785                 be_txq_notify(adapter, txq->id, wrb_cnt);
786
787                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
788         } else {
789                 txq->head = start;
790                 dev_kfree_skb_any(skb);
791         }
792 tx_drop:
793         return NETDEV_TX_OK;
794 }
795
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799         if (new_mtu < BE_MIN_MTU ||
800                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801                                         (ETH_HLEN + ETH_FCS_LEN))) {
802                 dev_info(&adapter->pdev->dev,
803                         "MTU must be between %d and %d bytes\n",
804                         BE_MIN_MTU,
805                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806                 return -EINVAL;
807         }
808         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809                         netdev->mtu, new_mtu);
810         netdev->mtu = new_mtu;
811         return 0;
812 }
813
814 /*
815  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816  * If the user configures more, place BE in vlan promiscuous mode.
817  */
818 static int be_vid_config(struct be_adapter *adapter)
819 {
820         u16 vids[BE_NUM_VLANS_SUPPORTED];
821         u16 num = 0, i;
822         int status = 0;
823
824         /* No need to further configure vids if in promiscuous mode */
825         if (adapter->promiscuous)
826                 return 0;
827
828         if (adapter->vlans_added > adapter->max_vlans)
829                 goto set_vlan_promisc;
830
831         /* Construct VLAN Table to give to HW */
832         for (i = 0; i < VLAN_N_VID; i++)
833                 if (adapter->vlan_tag[i])
834                         vids[num++] = cpu_to_le16(i);
835
836         status = be_cmd_vlan_config(adapter, adapter->if_handle,
837                                     vids, num, 1, 0);
838
839         /* Set to VLAN promisc mode as setting VLAN filter failed */
840         if (status) {
841                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843                 goto set_vlan_promisc;
844         }
845
846         return status;
847
848 set_vlan_promisc:
849         status = be_cmd_vlan_config(adapter, adapter->if_handle,
850                                     NULL, 0, 1, 1);
851         return status;
852 }
853
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
855 {
856         struct be_adapter *adapter = netdev_priv(netdev);
857         int status = 0;
858
859         if (!be_physfn(adapter)) {
860                 status = -EINVAL;
861                 goto ret;
862         }
863
864         adapter->vlan_tag[vid] = 1;
865         if (adapter->vlans_added <= (adapter->max_vlans + 1))
866                 status = be_vid_config(adapter);
867
868         if (!status)
869                 adapter->vlans_added++;
870         else
871                 adapter->vlan_tag[vid] = 0;
872 ret:
873         return status;
874 }
875
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
877 {
878         struct be_adapter *adapter = netdev_priv(netdev);
879         int status = 0;
880
881         if (!be_physfn(adapter)) {
882                 status = -EINVAL;
883                 goto ret;
884         }
885
886         adapter->vlan_tag[vid] = 0;
887         if (adapter->vlans_added <= adapter->max_vlans)
888                 status = be_vid_config(adapter);
889
890         if (!status)
891                 adapter->vlans_added--;
892         else
893                 adapter->vlan_tag[vid] = 1;
894 ret:
895         return status;
896 }
897
898 static void be_set_rx_mode(struct net_device *netdev)
899 {
900         struct be_adapter *adapter = netdev_priv(netdev);
901         int status;
902
903         if (netdev->flags & IFF_PROMISC) {
904                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905                 adapter->promiscuous = true;
906                 goto done;
907         }
908
909         /* BE was previously in promiscuous mode; disable it */
910         if (adapter->promiscuous) {
911                 adapter->promiscuous = false;
912                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
913
914                 if (adapter->vlans_added)
915                         be_vid_config(adapter);
916         }
917
918         /* Enable multicast promisc if num configured exceeds what we support */
919         if (netdev->flags & IFF_ALLMULTI ||
920                         netdev_mc_count(netdev) > BE_MAX_MC) {
921                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922                 goto done;
923         }
924
925         if (netdev_uc_count(netdev) != adapter->uc_macs) {
926                 struct netdev_hw_addr *ha;
927                 int i = 1; /* First slot is claimed by the Primary MAC */
928
929                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930                         be_cmd_pmac_del(adapter, adapter->if_handle,
931                                         adapter->pmac_id[i], 0);
932                 }
933
934                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936                         adapter->promiscuous = true;
937                         goto done;
938                 }
939
940                 netdev_for_each_uc_addr(ha, adapter->netdev) {
941                         adapter->uc_macs++; /* First slot is for Primary MAC */
942                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943                                         adapter->if_handle,
944                                         &adapter->pmac_id[adapter->uc_macs], 0);
945                 }
946         }
947
948         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950         /* Set to MCAST promisc mode if setting MULTICAST address fails */
951         if (status) {
952                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955         }
956 done:
957         return;
958 }
959
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961 {
962         struct be_adapter *adapter = netdev_priv(netdev);
963         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964         int status;
965
966         if (!sriov_enabled(adapter))
967                 return -EPERM;
968
969         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
970                 return -EINVAL;
971
972         if (lancer_chip(adapter)) {
973                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
974         } else {
975                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976                                          vf_cfg->pmac_id, vf + 1);
977
978                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979                                          &vf_cfg->pmac_id, vf + 1);
980         }
981
982         if (status)
983                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984                                 mac, vf);
985         else
986                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
987
988         return status;
989 }
990
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992                         struct ifla_vf_info *vi)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
996
997         if (!sriov_enabled(adapter))
998                 return -EPERM;
999
1000         if (vf >= adapter->num_vfs)
1001                 return -EINVAL;
1002
1003         vi->vf = vf;
1004         vi->tx_rate = vf_cfg->tx_rate;
1005         vi->vlan = vf_cfg->vlan_tag;
1006         vi->qos = 0;
1007         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1008
1009         return 0;
1010 }
1011
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013                         int vf, u16 vlan, u8 qos)
1014 {
1015         struct be_adapter *adapter = netdev_priv(netdev);
1016         int status = 0;
1017
1018         if (!sriov_enabled(adapter))
1019                 return -EPERM;
1020
1021         if (vf >= adapter->num_vfs || vlan > 4095)
1022                 return -EINVAL;
1023
1024         if (vlan) {
1025                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026                         /* If this is new value, program it. Else skip. */
1027                         adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029                         status = be_cmd_set_hsw_config(adapter, vlan,
1030                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1031                 }
1032         } else {
1033                 /* Reset Transparent Vlan Tagging. */
1034                 adapter->vf_cfg[vf].vlan_tag = 0;
1035                 vlan = adapter->vf_cfg[vf].def_vid;
1036                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037                         adapter->vf_cfg[vf].if_handle);
1038         }
1039
1040
1041         if (status)
1042                 dev_info(&adapter->pdev->dev,
1043                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1044         return status;
1045 }
1046
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1048                         int vf, int rate)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         int status = 0;
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         if (rate < 100 || rate > 10000) {
1060                 dev_err(&adapter->pdev->dev,
1061                         "tx rate must be between 100 and 10000 Mbps\n");
1062                 return -EINVAL;
1063         }
1064
1065         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066
1067         if (status)
1068                 dev_err(&adapter->pdev->dev,
1069                                 "tx rate %d on VF %d failed\n", rate, vf);
1070         else
1071                 adapter->vf_cfg[vf].tx_rate = rate;
1072         return status;
1073 }
1074
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076 {
1077         struct pci_dev *dev, *pdev = adapter->pdev;
1078         int vfs = 0, assigned_vfs = 0, pos;
1079         u16 offset, stride;
1080
1081         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082         if (!pos)
1083                 return 0;
1084         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088         while (dev) {
1089                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1090                         vfs++;
1091                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1092                                 assigned_vfs++;
1093                 }
1094                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1095         }
1096         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1097 }
1098
1099 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1100 {
1101         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1102         ulong now = jiffies;
1103         ulong delta = now - stats->rx_jiffies;
1104         u64 pkts;
1105         unsigned int start, eqd;
1106
1107         if (!eqo->enable_aic) {
1108                 eqd = eqo->eqd;
1109                 goto modify_eqd;
1110         }
1111
1112         if (eqo->idx >= adapter->num_rx_qs)
1113                 return;
1114
1115         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1116
1117         /* Wrapped around */
1118         if (time_before(now, stats->rx_jiffies)) {
1119                 stats->rx_jiffies = now;
1120                 return;
1121         }
1122
1123         /* Update once a second */
1124         if (delta < HZ)
1125                 return;
1126
1127         do {
1128                 start = u64_stats_fetch_begin_bh(&stats->sync);
1129                 pkts = stats->rx_pkts;
1130         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1131
1132         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1133         stats->rx_pkts_prev = pkts;
1134         stats->rx_jiffies = now;
1135         eqd = (stats->rx_pps / 110000) << 3;
1136         eqd = min(eqd, eqo->max_eqd);
1137         eqd = max(eqd, eqo->min_eqd);
1138         if (eqd < 10)
1139                 eqd = 0;
1140
1141 modify_eqd:
1142         if (eqd != eqo->cur_eqd) {
1143                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1144                 eqo->cur_eqd = eqd;
1145         }
1146 }
1147
1148 static void be_rx_stats_update(struct be_rx_obj *rxo,
1149                 struct be_rx_compl_info *rxcp)
1150 {
1151         struct be_rx_stats *stats = rx_stats(rxo);
1152
1153         u64_stats_update_begin(&stats->sync);
1154         stats->rx_compl++;
1155         stats->rx_bytes += rxcp->pkt_size;
1156         stats->rx_pkts++;
1157         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1158                 stats->rx_mcast_pkts++;
1159         if (rxcp->err)
1160                 stats->rx_compl_err++;
1161         u64_stats_update_end(&stats->sync);
1162 }
1163
1164 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1165 {
1166         /* L4 checksum is not reliable for non TCP/UDP packets.
1167          * Also ignore ipcksm for ipv6 pkts */
1168         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1169                                 (rxcp->ip_csum || rxcp->ipv6);
1170 }
1171
1172 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1173                                                 u16 frag_idx)
1174 {
1175         struct be_adapter *adapter = rxo->adapter;
1176         struct be_rx_page_info *rx_page_info;
1177         struct be_queue_info *rxq = &rxo->q;
1178
1179         rx_page_info = &rxo->page_info_tbl[frag_idx];
1180         BUG_ON(!rx_page_info->page);
1181
1182         if (rx_page_info->last_page_user) {
1183                 dma_unmap_page(&adapter->pdev->dev,
1184                                dma_unmap_addr(rx_page_info, bus),
1185                                adapter->big_page_size, DMA_FROM_DEVICE);
1186                 rx_page_info->last_page_user = false;
1187         }
1188
1189         atomic_dec(&rxq->used);
1190         return rx_page_info;
1191 }
1192
1193 /* Throwaway the data in the Rx completion */
1194 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1195                                 struct be_rx_compl_info *rxcp)
1196 {
1197         struct be_queue_info *rxq = &rxo->q;
1198         struct be_rx_page_info *page_info;
1199         u16 i, num_rcvd = rxcp->num_rcvd;
1200
1201         for (i = 0; i < num_rcvd; i++) {
1202                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1203                 put_page(page_info->page);
1204                 memset(page_info, 0, sizeof(*page_info));
1205                 index_inc(&rxcp->rxq_idx, rxq->len);
1206         }
1207 }
1208
1209 /*
1210  * skb_fill_rx_data forms a complete skb for an ether frame
1211  * indicated by rxcp.
1212  */
1213 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1214                              struct be_rx_compl_info *rxcp)
1215 {
1216         struct be_queue_info *rxq = &rxo->q;
1217         struct be_rx_page_info *page_info;
1218         u16 i, j;
1219         u16 hdr_len, curr_frag_len, remaining;
1220         u8 *start;
1221
1222         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1223         start = page_address(page_info->page) + page_info->page_offset;
1224         prefetch(start);
1225
1226         /* Copy data in the first descriptor of this completion */
1227         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1228
1229         skb->len = curr_frag_len;
1230         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1231                 memcpy(skb->data, start, curr_frag_len);
1232                 /* Complete packet has now been moved to data */
1233                 put_page(page_info->page);
1234                 skb->data_len = 0;
1235                 skb->tail += curr_frag_len;
1236         } else {
1237                 hdr_len = ETH_HLEN;
1238                 memcpy(skb->data, start, hdr_len);
1239                 skb_shinfo(skb)->nr_frags = 1;
1240                 skb_frag_set_page(skb, 0, page_info->page);
1241                 skb_shinfo(skb)->frags[0].page_offset =
1242                                         page_info->page_offset + hdr_len;
1243                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1244                 skb->data_len = curr_frag_len - hdr_len;
1245                 skb->truesize += rx_frag_size;
1246                 skb->tail += hdr_len;
1247         }
1248         page_info->page = NULL;
1249
1250         if (rxcp->pkt_size <= rx_frag_size) {
1251                 BUG_ON(rxcp->num_rcvd != 1);
1252                 return;
1253         }
1254
1255         /* More frags present for this completion */
1256         index_inc(&rxcp->rxq_idx, rxq->len);
1257         remaining = rxcp->pkt_size - curr_frag_len;
1258         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1259                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1260                 curr_frag_len = min(remaining, rx_frag_size);
1261
1262                 /* Coalesce all frags from the same physical page in one slot */
1263                 if (page_info->page_offset == 0) {
1264                         /* Fresh page */
1265                         j++;
1266                         skb_frag_set_page(skb, j, page_info->page);
1267                         skb_shinfo(skb)->frags[j].page_offset =
1268                                                         page_info->page_offset;
1269                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1270                         skb_shinfo(skb)->nr_frags++;
1271                 } else {
1272                         put_page(page_info->page);
1273                 }
1274
1275                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1276                 skb->len += curr_frag_len;
1277                 skb->data_len += curr_frag_len;
1278                 skb->truesize += rx_frag_size;
1279                 remaining -= curr_frag_len;
1280                 index_inc(&rxcp->rxq_idx, rxq->len);
1281                 page_info->page = NULL;
1282         }
1283         BUG_ON(j > MAX_SKB_FRAGS);
1284 }
1285
1286 /* Process the RX completion indicated by rxcp when GRO is disabled */
1287 static void be_rx_compl_process(struct be_rx_obj *rxo,
1288                                 struct be_rx_compl_info *rxcp)
1289 {
1290         struct be_adapter *adapter = rxo->adapter;
1291         struct net_device *netdev = adapter->netdev;
1292         struct sk_buff *skb;
1293
1294         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1295         if (unlikely(!skb)) {
1296                 rx_stats(rxo)->rx_drops_no_skbs++;
1297                 be_rx_compl_discard(rxo, rxcp);
1298                 return;
1299         }
1300
1301         skb_fill_rx_data(rxo, skb, rxcp);
1302
1303         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1304                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305         else
1306                 skb_checksum_none_assert(skb);
1307
1308         skb->protocol = eth_type_trans(skb, netdev);
1309         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1310         if (netdev->features & NETIF_F_RXHASH)
1311                 skb->rxhash = rxcp->rss_hash;
1312
1313
1314         if (rxcp->vlanf)
1315                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1316
1317         netif_receive_skb(skb);
1318 }
1319
1320 /* Process the RX completion indicated by rxcp when GRO is enabled */
1321 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1322                              struct be_rx_compl_info *rxcp)
1323 {
1324         struct be_adapter *adapter = rxo->adapter;
1325         struct be_rx_page_info *page_info;
1326         struct sk_buff *skb = NULL;
1327         struct be_queue_info *rxq = &rxo->q;
1328         u16 remaining, curr_frag_len;
1329         u16 i, j;
1330
1331         skb = napi_get_frags(napi);
1332         if (!skb) {
1333                 be_rx_compl_discard(rxo, rxcp);
1334                 return;
1335         }
1336
1337         remaining = rxcp->pkt_size;
1338         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1339                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1340
1341                 curr_frag_len = min(remaining, rx_frag_size);
1342
1343                 /* Coalesce all frags from the same physical page in one slot */
1344                 if (i == 0 || page_info->page_offset == 0) {
1345                         /* First frag or Fresh page */
1346                         j++;
1347                         skb_frag_set_page(skb, j, page_info->page);
1348                         skb_shinfo(skb)->frags[j].page_offset =
1349                                                         page_info->page_offset;
1350                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1351                 } else {
1352                         put_page(page_info->page);
1353                 }
1354                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1355                 skb->truesize += rx_frag_size;
1356                 remaining -= curr_frag_len;
1357                 index_inc(&rxcp->rxq_idx, rxq->len);
1358                 memset(page_info, 0, sizeof(*page_info));
1359         }
1360         BUG_ON(j > MAX_SKB_FRAGS);
1361
1362         skb_shinfo(skb)->nr_frags = j + 1;
1363         skb->len = rxcp->pkt_size;
1364         skb->data_len = rxcp->pkt_size;
1365         skb->ip_summed = CHECKSUM_UNNECESSARY;
1366         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1367         if (adapter->netdev->features & NETIF_F_RXHASH)
1368                 skb->rxhash = rxcp->rss_hash;
1369
1370         if (rxcp->vlanf)
1371                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1372
1373         napi_gro_frags(napi);
1374 }
1375
1376 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1377                                  struct be_rx_compl_info *rxcp)
1378 {
1379         rxcp->pkt_size =
1380                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1381         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1382         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1383         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1384         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1385         rxcp->ip_csum =
1386                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1387         rxcp->l4_csum =
1388                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1389         rxcp->ipv6 =
1390                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1391         rxcp->rxq_idx =
1392                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1393         rxcp->num_rcvd =
1394                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1395         rxcp->pkt_type =
1396                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1397         rxcp->rss_hash =
1398                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1399         if (rxcp->vlanf) {
1400                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1401                                           compl);
1402                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1403                                                compl);
1404         }
1405         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1406 }
1407
1408 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1409                                  struct be_rx_compl_info *rxcp)
1410 {
1411         rxcp->pkt_size =
1412                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1413         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1414         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1415         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1416         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1417         rxcp->ip_csum =
1418                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1419         rxcp->l4_csum =
1420                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1421         rxcp->ipv6 =
1422                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1423         rxcp->rxq_idx =
1424                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1425         rxcp->num_rcvd =
1426                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1427         rxcp->pkt_type =
1428                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1429         rxcp->rss_hash =
1430                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1431         if (rxcp->vlanf) {
1432                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1433                                           compl);
1434                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1435                                                compl);
1436         }
1437         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1438 }
1439
1440 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1441 {
1442         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1443         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1444         struct be_adapter *adapter = rxo->adapter;
1445
1446         /* For checking the valid bit it is Ok to use either definition as the
1447          * valid bit is at the same position in both v0 and v1 Rx compl */
1448         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1449                 return NULL;
1450
1451         rmb();
1452         be_dws_le_to_cpu(compl, sizeof(*compl));
1453
1454         if (adapter->be3_native)
1455                 be_parse_rx_compl_v1(compl, rxcp);
1456         else
1457                 be_parse_rx_compl_v0(compl, rxcp);
1458
1459         if (rxcp->vlanf) {
1460                 /* vlanf could be wrongly set in some cards.
1461                  * ignore if vtm is not set */
1462                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1463                         rxcp->vlanf = 0;
1464
1465                 if (!lancer_chip(adapter))
1466                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1467
1468                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1469                     !adapter->vlan_tag[rxcp->vlan_tag])
1470                         rxcp->vlanf = 0;
1471         }
1472
1473         /* As the compl has been parsed, reset it; we wont touch it again */
1474         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1475
1476         queue_tail_inc(&rxo->cq);
1477         return rxcp;
1478 }
1479
1480 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1481 {
1482         u32 order = get_order(size);
1483
1484         if (order > 0)
1485                 gfp |= __GFP_COMP;
1486         return  alloc_pages(gfp, order);
1487 }
1488
1489 /*
1490  * Allocate a page, split it to fragments of size rx_frag_size and post as
1491  * receive buffers to BE
1492  */
1493 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1494 {
1495         struct be_adapter *adapter = rxo->adapter;
1496         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1497         struct be_queue_info *rxq = &rxo->q;
1498         struct page *pagep = NULL;
1499         struct be_eth_rx_d *rxd;
1500         u64 page_dmaaddr = 0, frag_dmaaddr;
1501         u32 posted, page_offset = 0;
1502
1503         page_info = &rxo->page_info_tbl[rxq->head];
1504         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1505                 if (!pagep) {
1506                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1507                         if (unlikely(!pagep)) {
1508                                 rx_stats(rxo)->rx_post_fail++;
1509                                 break;
1510                         }
1511                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1512                                                     0, adapter->big_page_size,
1513                                                     DMA_FROM_DEVICE);
1514                         page_info->page_offset = 0;
1515                 } else {
1516                         get_page(pagep);
1517                         page_info->page_offset = page_offset + rx_frag_size;
1518                 }
1519                 page_offset = page_info->page_offset;
1520                 page_info->page = pagep;
1521                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1522                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1523
1524                 rxd = queue_head_node(rxq);
1525                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1526                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1527
1528                 /* Any space left in the current big page for another frag? */
1529                 if ((page_offset + rx_frag_size + rx_frag_size) >
1530                                         adapter->big_page_size) {
1531                         pagep = NULL;
1532                         page_info->last_page_user = true;
1533                 }
1534
1535                 prev_page_info = page_info;
1536                 queue_head_inc(rxq);
1537                 page_info = &rxo->page_info_tbl[rxq->head];
1538         }
1539         if (pagep)
1540                 prev_page_info->last_page_user = true;
1541
1542         if (posted) {
1543                 atomic_add(posted, &rxq->used);
1544                 be_rxq_notify(adapter, rxq->id, posted);
1545         } else if (atomic_read(&rxq->used) == 0) {
1546                 /* Let be_worker replenish when memory is available */
1547                 rxo->rx_post_starved = true;
1548         }
1549 }
1550
1551 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1552 {
1553         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1554
1555         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1556                 return NULL;
1557
1558         rmb();
1559         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1560
1561         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1562
1563         queue_tail_inc(tx_cq);
1564         return txcp;
1565 }
1566
1567 static u16 be_tx_compl_process(struct be_adapter *adapter,
1568                 struct be_tx_obj *txo, u16 last_index)
1569 {
1570         struct be_queue_info *txq = &txo->q;
1571         struct be_eth_wrb *wrb;
1572         struct sk_buff **sent_skbs = txo->sent_skb_list;
1573         struct sk_buff *sent_skb;
1574         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1575         bool unmap_skb_hdr = true;
1576
1577         sent_skb = sent_skbs[txq->tail];
1578         BUG_ON(!sent_skb);
1579         sent_skbs[txq->tail] = NULL;
1580
1581         /* skip header wrb */
1582         queue_tail_inc(txq);
1583
1584         do {
1585                 cur_index = txq->tail;
1586                 wrb = queue_tail_node(txq);
1587                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1588                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1589                 unmap_skb_hdr = false;
1590
1591                 num_wrbs++;
1592                 queue_tail_inc(txq);
1593         } while (cur_index != last_index);
1594
1595         kfree_skb(sent_skb);
1596         return num_wrbs;
1597 }
1598
1599 /* Return the number of events in the event queue */
1600 static inline int events_get(struct be_eq_obj *eqo)
1601 {
1602         struct be_eq_entry *eqe;
1603         int num = 0;
1604
1605         do {
1606                 eqe = queue_tail_node(&eqo->q);
1607                 if (eqe->evt == 0)
1608                         break;
1609
1610                 rmb();
1611                 eqe->evt = 0;
1612                 num++;
1613                 queue_tail_inc(&eqo->q);
1614         } while (true);
1615
1616         return num;
1617 }
1618
1619 static int event_handle(struct be_eq_obj *eqo)
1620 {
1621         bool rearm = false;
1622         int num = events_get(eqo);
1623
1624         /* Deal with any spurious interrupts that come without events */
1625         if (!num)
1626                 rearm = true;
1627
1628         if (num || msix_enabled(eqo->adapter))
1629                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1630
1631         if (num)
1632                 napi_schedule(&eqo->napi);
1633
1634         return num;
1635 }
1636
1637 /* Leaves the EQ is disarmed state */
1638 static void be_eq_clean(struct be_eq_obj *eqo)
1639 {
1640         int num = events_get(eqo);
1641
1642         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1643 }
1644
1645 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1646 {
1647         struct be_rx_page_info *page_info;
1648         struct be_queue_info *rxq = &rxo->q;
1649         struct be_queue_info *rx_cq = &rxo->cq;
1650         struct be_rx_compl_info *rxcp;
1651         u16 tail;
1652
1653         /* First cleanup pending rx completions */
1654         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1655                 be_rx_compl_discard(rxo, rxcp);
1656                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1657         }
1658
1659         /* Then free posted rx buffer that were not used */
1660         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1661         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1662                 page_info = get_rx_page_info(rxo, tail);
1663                 put_page(page_info->page);
1664                 memset(page_info, 0, sizeof(*page_info));
1665         }
1666         BUG_ON(atomic_read(&rxq->used));
1667         rxq->tail = rxq->head = 0;
1668 }
1669
1670 static void be_tx_compl_clean(struct be_adapter *adapter)
1671 {
1672         struct be_tx_obj *txo;
1673         struct be_queue_info *txq;
1674         struct be_eth_tx_compl *txcp;
1675         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1676         struct sk_buff *sent_skb;
1677         bool dummy_wrb;
1678         int i, pending_txqs;
1679
1680         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1681         do {
1682                 pending_txqs = adapter->num_tx_qs;
1683
1684                 for_all_tx_queues(adapter, txo, i) {
1685                         txq = &txo->q;
1686                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1687                                 end_idx =
1688                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1689                                                       wrb_index, txcp);
1690                                 num_wrbs += be_tx_compl_process(adapter, txo,
1691                                                                 end_idx);
1692                                 cmpl++;
1693                         }
1694                         if (cmpl) {
1695                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1696                                 atomic_sub(num_wrbs, &txq->used);
1697                                 cmpl = 0;
1698                                 num_wrbs = 0;
1699                         }
1700                         if (atomic_read(&txq->used) == 0)
1701                                 pending_txqs--;
1702                 }
1703
1704                 if (pending_txqs == 0 || ++timeo > 200)
1705                         break;
1706
1707                 mdelay(1);
1708         } while (true);
1709
1710         for_all_tx_queues(adapter, txo, i) {
1711                 txq = &txo->q;
1712                 if (atomic_read(&txq->used))
1713                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1714                                 atomic_read(&txq->used));
1715
1716                 /* free posted tx for which compls will never arrive */
1717                 while (atomic_read(&txq->used)) {
1718                         sent_skb = txo->sent_skb_list[txq->tail];
1719                         end_idx = txq->tail;
1720                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1721                                                    &dummy_wrb);
1722                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1723                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1724                         atomic_sub(num_wrbs, &txq->used);
1725                 }
1726         }
1727 }
1728
1729 static void be_evt_queues_destroy(struct be_adapter *adapter)
1730 {
1731         struct be_eq_obj *eqo;
1732         int i;
1733
1734         for_all_evt_queues(adapter, eqo, i) {
1735                 if (eqo->q.created) {
1736                         be_eq_clean(eqo);
1737                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1738                 }
1739                 be_queue_free(adapter, &eqo->q);
1740         }
1741 }
1742
1743 static int be_evt_queues_create(struct be_adapter *adapter)
1744 {
1745         struct be_queue_info *eq;
1746         struct be_eq_obj *eqo;
1747         int i, rc;
1748
1749         adapter->num_evt_qs = num_irqs(adapter);
1750
1751         for_all_evt_queues(adapter, eqo, i) {
1752                 eqo->adapter = adapter;
1753                 eqo->tx_budget = BE_TX_BUDGET;
1754                 eqo->idx = i;
1755                 eqo->max_eqd = BE_MAX_EQD;
1756                 eqo->enable_aic = true;
1757
1758                 eq = &eqo->q;
1759                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1760                                         sizeof(struct be_eq_entry));
1761                 if (rc)
1762                         return rc;
1763
1764                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1765                 if (rc)
1766                         return rc;
1767         }
1768         return 0;
1769 }
1770
1771 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1772 {
1773         struct be_queue_info *q;
1774
1775         q = &adapter->mcc_obj.q;
1776         if (q->created)
1777                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1778         be_queue_free(adapter, q);
1779
1780         q = &adapter->mcc_obj.cq;
1781         if (q->created)
1782                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1783         be_queue_free(adapter, q);
1784 }
1785
1786 /* Must be called only after TX qs are created as MCC shares TX EQ */
1787 static int be_mcc_queues_create(struct be_adapter *adapter)
1788 {
1789         struct be_queue_info *q, *cq;
1790
1791         cq = &adapter->mcc_obj.cq;
1792         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1793                         sizeof(struct be_mcc_compl)))
1794                 goto err;
1795
1796         /* Use the default EQ for MCC completions */
1797         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1798                 goto mcc_cq_free;
1799
1800         q = &adapter->mcc_obj.q;
1801         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1802                 goto mcc_cq_destroy;
1803
1804         if (be_cmd_mccq_create(adapter, q, cq))
1805                 goto mcc_q_free;
1806
1807         return 0;
1808
1809 mcc_q_free:
1810         be_queue_free(adapter, q);
1811 mcc_cq_destroy:
1812         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1813 mcc_cq_free:
1814         be_queue_free(adapter, cq);
1815 err:
1816         return -1;
1817 }
1818
1819 static void be_tx_queues_destroy(struct be_adapter *adapter)
1820 {
1821         struct be_queue_info *q;
1822         struct be_tx_obj *txo;
1823         u8 i;
1824
1825         for_all_tx_queues(adapter, txo, i) {
1826                 q = &txo->q;
1827                 if (q->created)
1828                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1829                 be_queue_free(adapter, q);
1830
1831                 q = &txo->cq;
1832                 if (q->created)
1833                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1834                 be_queue_free(adapter, q);
1835         }
1836 }
1837
1838 static int be_num_txqs_want(struct be_adapter *adapter)
1839 {
1840         if (sriov_want(adapter) || be_is_mc(adapter) ||
1841             lancer_chip(adapter) || !be_physfn(adapter) ||
1842             adapter->generation == BE_GEN2)
1843                 return 1;
1844         else
1845                 return MAX_TX_QS;
1846 }
1847
1848 static int be_tx_cqs_create(struct be_adapter *adapter)
1849 {
1850         struct be_queue_info *cq, *eq;
1851         int status;
1852         struct be_tx_obj *txo;
1853         u8 i;
1854
1855         adapter->num_tx_qs = be_num_txqs_want(adapter);
1856         if (adapter->num_tx_qs != MAX_TX_QS) {
1857                 rtnl_lock();
1858                 netif_set_real_num_tx_queues(adapter->netdev,
1859                         adapter->num_tx_qs);
1860                 rtnl_unlock();
1861         }
1862
1863         for_all_tx_queues(adapter, txo, i) {
1864                 cq = &txo->cq;
1865                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1866                                         sizeof(struct be_eth_tx_compl));
1867                 if (status)
1868                         return status;
1869
1870                 /* If num_evt_qs is less than num_tx_qs, then more than
1871                  * one txq share an eq
1872                  */
1873                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1874                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1875                 if (status)
1876                         return status;
1877         }
1878         return 0;
1879 }
1880
1881 static int be_tx_qs_create(struct be_adapter *adapter)
1882 {
1883         struct be_tx_obj *txo;
1884         int i, status;
1885
1886         for_all_tx_queues(adapter, txo, i) {
1887                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1888                                         sizeof(struct be_eth_wrb));
1889                 if (status)
1890                         return status;
1891
1892                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1893                 if (status)
1894                         return status;
1895         }
1896
1897         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1898                  adapter->num_tx_qs);
1899         return 0;
1900 }
1901
1902 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1903 {
1904         struct be_queue_info *q;
1905         struct be_rx_obj *rxo;
1906         int i;
1907
1908         for_all_rx_queues(adapter, rxo, i) {
1909                 q = &rxo->cq;
1910                 if (q->created)
1911                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912                 be_queue_free(adapter, q);
1913         }
1914 }
1915
1916 static int be_rx_cqs_create(struct be_adapter *adapter)
1917 {
1918         struct be_queue_info *eq, *cq;
1919         struct be_rx_obj *rxo;
1920         int rc, i;
1921
1922         /* We'll create as many RSS rings as there are irqs.
1923          * But when there's only one irq there's no use creating RSS rings
1924          */
1925         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926                                 num_irqs(adapter) + 1 : 1;
1927         if (adapter->num_rx_qs != MAX_RX_QS) {
1928                 rtnl_lock();
1929                 netif_set_real_num_rx_queues(adapter->netdev,
1930                                              adapter->num_rx_qs);
1931                 rtnl_unlock();
1932         }
1933
1934         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1935         for_all_rx_queues(adapter, rxo, i) {
1936                 rxo->adapter = adapter;
1937                 cq = &rxo->cq;
1938                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939                                 sizeof(struct be_eth_rx_compl));
1940                 if (rc)
1941                         return rc;
1942
1943                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1945                 if (rc)
1946                         return rc;
1947         }
1948
1949         dev_info(&adapter->pdev->dev,
1950                  "created %d RSS queue(s) and 1 default RX queue\n",
1951                  adapter->num_rx_qs - 1);
1952         return 0;
1953 }
1954
1955 static irqreturn_t be_intx(int irq, void *dev)
1956 {
1957         struct be_adapter *adapter = dev;
1958         int num_evts;
1959
1960         /* With INTx only one EQ is used */
1961         num_evts = event_handle(&adapter->eq_obj[0]);
1962         if (num_evts)
1963                 return IRQ_HANDLED;
1964         else
1965                 return IRQ_NONE;
1966 }
1967
1968 static irqreturn_t be_msix(int irq, void *dev)
1969 {
1970         struct be_eq_obj *eqo = dev;
1971
1972         event_handle(eqo);
1973         return IRQ_HANDLED;
1974 }
1975
1976 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1977 {
1978         return (rxcp->tcpf && !rxcp->err) ? true : false;
1979 }
1980
1981 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982                         int budget)
1983 {
1984         struct be_adapter *adapter = rxo->adapter;
1985         struct be_queue_info *rx_cq = &rxo->cq;
1986         struct be_rx_compl_info *rxcp;
1987         u32 work_done;
1988
1989         for (work_done = 0; work_done < budget; work_done++) {
1990                 rxcp = be_rx_compl_get(rxo);
1991                 if (!rxcp)
1992                         break;
1993
1994                 /* Is it a flush compl that has no data */
1995                 if (unlikely(rxcp->num_rcvd == 0))
1996                         goto loop_continue;
1997
1998                 /* Discard compl with partial DMA Lancer B0 */
1999                 if (unlikely(!rxcp->pkt_size)) {
2000                         be_rx_compl_discard(rxo, rxcp);
2001                         goto loop_continue;
2002                 }
2003
2004                 /* On BE drop pkts that arrive due to imperfect filtering in
2005                  * promiscuous mode on some skews
2006                  */
2007                 if (unlikely(rxcp->port != adapter->port_num &&
2008                                 !lancer_chip(adapter))) {
2009                         be_rx_compl_discard(rxo, rxcp);
2010                         goto loop_continue;
2011                 }
2012
2013                 if (do_gro(rxcp))
2014                         be_rx_compl_process_gro(rxo, napi, rxcp);
2015                 else
2016                         be_rx_compl_process(rxo, rxcp);
2017 loop_continue:
2018                 be_rx_stats_update(rxo, rxcp);
2019         }
2020
2021         if (work_done) {
2022                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2023
2024                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025                         be_post_rx_frags(rxo, GFP_ATOMIC);
2026         }
2027
2028         return work_done;
2029 }
2030
2031 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032                           int budget, int idx)
2033 {
2034         struct be_eth_tx_compl *txcp;
2035         int num_wrbs = 0, work_done;
2036
2037         for (work_done = 0; work_done < budget; work_done++) {
2038                 txcp = be_tx_compl_get(&txo->cq);
2039                 if (!txcp)
2040                         break;
2041                 num_wrbs += be_tx_compl_process(adapter, txo,
2042                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043                                         wrb_index, txcp));
2044         }
2045
2046         if (work_done) {
2047                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048                 atomic_sub(num_wrbs, &txo->q.used);
2049
2050                 /* As Tx wrbs have been freed up, wake up netdev queue
2051                  * if it was stopped due to lack of tx wrbs.  */
2052                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2054                         netif_wake_subqueue(adapter->netdev, idx);
2055                 }
2056
2057                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058                 tx_stats(txo)->tx_compl += work_done;
2059                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2060         }
2061         return (work_done < budget); /* Done */
2062 }
2063
2064 int be_poll(struct napi_struct *napi, int budget)
2065 {
2066         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067         struct be_adapter *adapter = eqo->adapter;
2068         int max_work = 0, work, i;
2069         bool tx_done;
2070
2071         /* Process all TXQs serviced by this EQ */
2072         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074                                         eqo->tx_budget, i);
2075                 if (!tx_done)
2076                         max_work = budget;
2077         }
2078
2079         /* This loop will iterate twice for EQ0 in which
2080          * completions of the last RXQ (default one) are also processed
2081          * For other EQs the loop iterates only once
2082          */
2083         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085                 max_work = max(work, max_work);
2086         }
2087
2088         if (is_mcc_eqo(eqo))
2089                 be_process_mcc(adapter);
2090
2091         if (max_work < budget) {
2092                 napi_complete(napi);
2093                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094         } else {
2095                 /* As we'll continue in polling mode, count and clear events */
2096                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2097         }
2098         return max_work;
2099 }
2100
2101 void be_detect_error(struct be_adapter *adapter)
2102 {
2103         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2105         u32 i;
2106
2107         if (be_crit_error(adapter))
2108                 return;
2109
2110         if (lancer_chip(adapter)) {
2111                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113                         sliport_err1 = ioread32(adapter->db +
2114                                         SLIPORT_ERROR1_OFFSET);
2115                         sliport_err2 = ioread32(adapter->db +
2116                                         SLIPORT_ERROR2_OFFSET);
2117                 }
2118         } else {
2119                 pci_read_config_dword(adapter->pdev,
2120                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2121                 pci_read_config_dword(adapter->pdev,
2122                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123                 pci_read_config_dword(adapter->pdev,
2124                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125                 pci_read_config_dword(adapter->pdev,
2126                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2127
2128                 ue_lo = (ue_lo & ~ue_lo_mask);
2129                 ue_hi = (ue_hi & ~ue_hi_mask);
2130         }
2131
2132         if (ue_lo || ue_hi ||
2133                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2134                 adapter->hw_error = true;
2135                 dev_err(&adapter->pdev->dev,
2136                         "Error detected in the card\n");
2137         }
2138
2139         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2140                 dev_err(&adapter->pdev->dev,
2141                         "ERR: sliport status 0x%x\n", sliport_status);
2142                 dev_err(&adapter->pdev->dev,
2143                         "ERR: sliport error1 0x%x\n", sliport_err1);
2144                 dev_err(&adapter->pdev->dev,
2145                         "ERR: sliport error2 0x%x\n", sliport_err2);
2146         }
2147
2148         if (ue_lo) {
2149                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2150                         if (ue_lo & 1)
2151                                 dev_err(&adapter->pdev->dev,
2152                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2153                 }
2154         }
2155
2156         if (ue_hi) {
2157                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2158                         if (ue_hi & 1)
2159                                 dev_err(&adapter->pdev->dev,
2160                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2161                 }
2162         }
2163
2164 }
2165
2166 static void be_msix_disable(struct be_adapter *adapter)
2167 {
2168         if (msix_enabled(adapter)) {
2169                 pci_disable_msix(adapter->pdev);
2170                 adapter->num_msix_vec = 0;
2171         }
2172 }
2173
2174 static uint be_num_rss_want(struct be_adapter *adapter)
2175 {
2176         u32 num = 0;
2177         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2178              !sriov_want(adapter) && be_physfn(adapter)) {
2179                 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2180                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2181         }
2182         return num;
2183 }
2184
2185 static void be_msix_enable(struct be_adapter *adapter)
2186 {
2187 #define BE_MIN_MSIX_VECTORS             1
2188         int i, status, num_vec, num_roce_vec = 0;
2189         struct device *dev = &adapter->pdev->dev;
2190
2191         /* If RSS queues are not used, need a vec for default RX Q */
2192         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2193         if (be_roce_supported(adapter)) {
2194                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2195                                         (num_online_cpus() + 1));
2196                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2197                 num_vec += num_roce_vec;
2198                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2199         }
2200         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2201
2202         for (i = 0; i < num_vec; i++)
2203                 adapter->msix_entries[i].entry = i;
2204
2205         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2206         if (status == 0) {
2207                 goto done;
2208         } else if (status >= BE_MIN_MSIX_VECTORS) {
2209                 num_vec = status;
2210                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2211                                 num_vec) == 0)
2212                         goto done;
2213         }
2214
2215         dev_warn(dev, "MSIx enable failed\n");
2216         return;
2217 done:
2218         if (be_roce_supported(adapter)) {
2219                 if (num_vec > num_roce_vec) {
2220                         adapter->num_msix_vec = num_vec - num_roce_vec;
2221                         adapter->num_msix_roce_vec =
2222                                 num_vec - adapter->num_msix_vec;
2223                 } else {
2224                         adapter->num_msix_vec = num_vec;
2225                         adapter->num_msix_roce_vec = 0;
2226                 }
2227         } else
2228                 adapter->num_msix_vec = num_vec;
2229         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2230         return;
2231 }
2232
2233 static inline int be_msix_vec_get(struct be_adapter *adapter,
2234                                 struct be_eq_obj *eqo)
2235 {
2236         return adapter->msix_entries[eqo->idx].vector;
2237 }
2238
2239 static int be_msix_register(struct be_adapter *adapter)
2240 {
2241         struct net_device *netdev = adapter->netdev;
2242         struct be_eq_obj *eqo;
2243         int status, i, vec;
2244
2245         for_all_evt_queues(adapter, eqo, i) {
2246                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2247                 vec = be_msix_vec_get(adapter, eqo);
2248                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2249                 if (status)
2250                         goto err_msix;
2251         }
2252
2253         return 0;
2254 err_msix:
2255         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2256                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2257         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2258                 status);
2259         be_msix_disable(adapter);
2260         return status;
2261 }
2262
2263 static int be_irq_register(struct be_adapter *adapter)
2264 {
2265         struct net_device *netdev = adapter->netdev;
2266         int status;
2267
2268         if (msix_enabled(adapter)) {
2269                 status = be_msix_register(adapter);
2270                 if (status == 0)
2271                         goto done;
2272                 /* INTx is not supported for VF */
2273                 if (!be_physfn(adapter))
2274                         return status;
2275         }
2276
2277         /* INTx */
2278         netdev->irq = adapter->pdev->irq;
2279         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2280                         adapter);
2281         if (status) {
2282                 dev_err(&adapter->pdev->dev,
2283                         "INTx request IRQ failed - err %d\n", status);
2284                 return status;
2285         }
2286 done:
2287         adapter->isr_registered = true;
2288         return 0;
2289 }
2290
2291 static void be_irq_unregister(struct be_adapter *adapter)
2292 {
2293         struct net_device *netdev = adapter->netdev;
2294         struct be_eq_obj *eqo;
2295         int i;
2296
2297         if (!adapter->isr_registered)
2298                 return;
2299
2300         /* INTx */
2301         if (!msix_enabled(adapter)) {
2302                 free_irq(netdev->irq, adapter);
2303                 goto done;
2304         }
2305
2306         /* MSIx */
2307         for_all_evt_queues(adapter, eqo, i)
2308                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2309
2310 done:
2311         adapter->isr_registered = false;
2312 }
2313
2314 static void be_rx_qs_destroy(struct be_adapter *adapter)
2315 {
2316         struct be_queue_info *q;
2317         struct be_rx_obj *rxo;
2318         int i;
2319
2320         for_all_rx_queues(adapter, rxo, i) {
2321                 q = &rxo->q;
2322                 if (q->created) {
2323                         be_cmd_rxq_destroy(adapter, q);
2324                         /* After the rxq is invalidated, wait for a grace time
2325                          * of 1ms for all dma to end and the flush compl to
2326                          * arrive
2327                          */
2328                         mdelay(1);
2329                         be_rx_cq_clean(rxo);
2330                 }
2331                 be_queue_free(adapter, q);
2332         }
2333 }
2334
2335 static int be_close(struct net_device *netdev)
2336 {
2337         struct be_adapter *adapter = netdev_priv(netdev);
2338         struct be_eq_obj *eqo;
2339         int i;
2340
2341         be_roce_dev_close(adapter);
2342
2343         be_async_mcc_disable(adapter);
2344
2345         if (!lancer_chip(adapter))
2346                 be_intr_set(adapter, false);
2347
2348         for_all_evt_queues(adapter, eqo, i) {
2349                 napi_disable(&eqo->napi);
2350                 if (msix_enabled(adapter))
2351                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2352                 else
2353                         synchronize_irq(netdev->irq);
2354                 be_eq_clean(eqo);
2355         }
2356
2357         be_irq_unregister(adapter);
2358
2359         /* Wait for all pending tx completions to arrive so that
2360          * all tx skbs are freed.
2361          */
2362         be_tx_compl_clean(adapter);
2363
2364         be_rx_qs_destroy(adapter);
2365         return 0;
2366 }
2367
2368 static int be_rx_qs_create(struct be_adapter *adapter)
2369 {
2370         struct be_rx_obj *rxo;
2371         int rc, i, j;
2372         u8 rsstable[128];
2373
2374         for_all_rx_queues(adapter, rxo, i) {
2375                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2376                                     sizeof(struct be_eth_rx_d));
2377                 if (rc)
2378                         return rc;
2379         }
2380
2381         /* The FW would like the default RXQ to be created first */
2382         rxo = default_rxo(adapter);
2383         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2384                                adapter->if_handle, false, &rxo->rss_id);
2385         if (rc)
2386                 return rc;
2387
2388         for_all_rss_queues(adapter, rxo, i) {
2389                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2390                                        rx_frag_size, adapter->if_handle,
2391                                        true, &rxo->rss_id);
2392                 if (rc)
2393                         return rc;
2394         }
2395
2396         if (be_multi_rxq(adapter)) {
2397                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2398                         for_all_rss_queues(adapter, rxo, i) {
2399                                 if ((j + i) >= 128)
2400                                         break;
2401                                 rsstable[j + i] = rxo->rss_id;
2402                         }
2403                 }
2404                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2405                 if (rc)
2406                         return rc;
2407         }
2408
2409         /* First time posting */
2410         for_all_rx_queues(adapter, rxo, i)
2411                 be_post_rx_frags(rxo, GFP_KERNEL);
2412         return 0;
2413 }
2414
2415 static int be_open(struct net_device *netdev)
2416 {
2417         struct be_adapter *adapter = netdev_priv(netdev);
2418         struct be_eq_obj *eqo;
2419         struct be_rx_obj *rxo;
2420         struct be_tx_obj *txo;
2421         u8 link_status;
2422         int status, i;
2423
2424         status = be_rx_qs_create(adapter);
2425         if (status)
2426                 goto err;
2427
2428         be_irq_register(adapter);
2429
2430         if (!lancer_chip(adapter))
2431                 be_intr_set(adapter, true);
2432
2433         for_all_rx_queues(adapter, rxo, i)
2434                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2435
2436         for_all_tx_queues(adapter, txo, i)
2437                 be_cq_notify(adapter, txo->cq.id, true, 0);
2438
2439         be_async_mcc_enable(adapter);
2440
2441         for_all_evt_queues(adapter, eqo, i) {
2442                 napi_enable(&eqo->napi);
2443                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2444         }
2445
2446         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2447         if (!status)
2448                 be_link_status_update(adapter, link_status);
2449
2450         be_roce_dev_open(adapter);
2451         return 0;
2452 err:
2453         be_close(adapter->netdev);
2454         return -EIO;
2455 }
2456
2457 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2458 {
2459         struct be_dma_mem cmd;
2460         int status = 0;
2461         u8 mac[ETH_ALEN];
2462
2463         memset(mac, 0, ETH_ALEN);
2464
2465         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2466         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2467                                     GFP_KERNEL);
2468         if (cmd.va == NULL)
2469                 return -1;
2470         memset(cmd.va, 0, cmd.size);
2471
2472         if (enable) {
2473                 status = pci_write_config_dword(adapter->pdev,
2474                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2475                 if (status) {
2476                         dev_err(&adapter->pdev->dev,
2477                                 "Could not enable Wake-on-lan\n");
2478                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2479                                           cmd.dma);
2480                         return status;
2481                 }
2482                 status = be_cmd_enable_magic_wol(adapter,
2483                                 adapter->netdev->dev_addr, &cmd);
2484                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2485                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2486         } else {
2487                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2488                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2489                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2490         }
2491
2492         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2493         return status;
2494 }
2495
2496 /*
2497  * Generate a seed MAC address from the PF MAC Address using jhash.
2498  * MAC Address for VFs are assigned incrementally starting from the seed.
2499  * These addresses are programmed in the ASIC by the PF and the VF driver
2500  * queries for the MAC address during its probe.
2501  */
2502 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2503 {
2504         u32 vf;
2505         int status = 0;
2506         u8 mac[ETH_ALEN];
2507         struct be_vf_cfg *vf_cfg;
2508
2509         be_vf_eth_addr_generate(adapter, mac);
2510
2511         for_all_vfs(adapter, vf_cfg, vf) {
2512                 if (lancer_chip(adapter)) {
2513                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2514                 } else {
2515                         status = be_cmd_pmac_add(adapter, mac,
2516                                                  vf_cfg->if_handle,
2517                                                  &vf_cfg->pmac_id, vf + 1);
2518                 }
2519
2520                 if (status)
2521                         dev_err(&adapter->pdev->dev,
2522                         "Mac address assignment failed for VF %d\n", vf);
2523                 else
2524                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2525
2526                 mac[5] += 1;
2527         }
2528         return status;
2529 }
2530
2531 static void be_vf_clear(struct be_adapter *adapter)
2532 {
2533         struct be_vf_cfg *vf_cfg;
2534         u32 vf;
2535
2536         if (be_find_vfs(adapter, ASSIGNED)) {
2537                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2538                 goto done;
2539         }
2540
2541         for_all_vfs(adapter, vf_cfg, vf) {
2542                 if (lancer_chip(adapter))
2543                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2544                 else
2545                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2546                                         vf_cfg->pmac_id, vf + 1);
2547
2548                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2549         }
2550         pci_disable_sriov(adapter->pdev);
2551 done:
2552         kfree(adapter->vf_cfg);
2553         adapter->num_vfs = 0;
2554 }
2555
2556 static int be_clear(struct be_adapter *adapter)
2557 {
2558         int i = 1;
2559
2560         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2561                 cancel_delayed_work_sync(&adapter->work);
2562                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2563         }
2564
2565         if (sriov_enabled(adapter))
2566                 be_vf_clear(adapter);
2567
2568         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2569                 be_cmd_pmac_del(adapter, adapter->if_handle,
2570                         adapter->pmac_id[i], 0);
2571
2572         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2573
2574         be_mcc_queues_destroy(adapter);
2575         be_rx_cqs_destroy(adapter);
2576         be_tx_queues_destroy(adapter);
2577         be_evt_queues_destroy(adapter);
2578
2579         be_msix_disable(adapter);
2580         return 0;
2581 }
2582
2583 static int be_vf_setup_init(struct be_adapter *adapter)
2584 {
2585         struct be_vf_cfg *vf_cfg;
2586         int vf;
2587
2588         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2589                                   GFP_KERNEL);
2590         if (!adapter->vf_cfg)
2591                 return -ENOMEM;
2592
2593         for_all_vfs(adapter, vf_cfg, vf) {
2594                 vf_cfg->if_handle = -1;
2595                 vf_cfg->pmac_id = -1;
2596         }
2597         return 0;
2598 }
2599
2600 static int be_vf_setup(struct be_adapter *adapter)
2601 {
2602         struct be_vf_cfg *vf_cfg;
2603         struct device *dev = &adapter->pdev->dev;
2604         u32 cap_flags, en_flags, vf;
2605         u16 def_vlan, lnk_speed;
2606         int status, enabled_vfs;
2607
2608         enabled_vfs = be_find_vfs(adapter, ENABLED);
2609         if (enabled_vfs) {
2610                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2611                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2612                 return 0;
2613         }
2614
2615         if (num_vfs > adapter->dev_num_vfs) {
2616                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2617                          adapter->dev_num_vfs, num_vfs);
2618                 num_vfs = adapter->dev_num_vfs;
2619         }
2620
2621         status = pci_enable_sriov(adapter->pdev, num_vfs);
2622         if (!status) {
2623                 adapter->num_vfs = num_vfs;
2624         } else {
2625                 /* Platform doesn't support SRIOV though device supports it */
2626                 dev_warn(dev, "SRIOV enable failed\n");
2627                 return 0;
2628         }
2629
2630         status = be_vf_setup_init(adapter);
2631         if (status)
2632                 goto err;
2633
2634         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2635                                 BE_IF_FLAGS_MULTICAST;
2636         for_all_vfs(adapter, vf_cfg, vf) {
2637                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2638                                           &vf_cfg->if_handle, vf + 1);
2639                 if (status)
2640                         goto err;
2641         }
2642
2643         if (!enabled_vfs) {
2644                 status = be_vf_eth_addr_config(adapter);
2645                 if (status)
2646                         goto err;
2647         }
2648
2649         for_all_vfs(adapter, vf_cfg, vf) {
2650                 lnk_speed = 1000;
2651                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2652                 if (status)
2653                         goto err;
2654                 vf_cfg->tx_rate = lnk_speed * 10;
2655
2656                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2657                                 vf + 1, vf_cfg->if_handle);
2658                 if (status)
2659                         goto err;
2660                 vf_cfg->def_vid = def_vlan;
2661         }
2662         return 0;
2663 err:
2664         return status;
2665 }
2666
2667 static void be_setup_init(struct be_adapter *adapter)
2668 {
2669         adapter->vlan_prio_bmap = 0xff;
2670         adapter->phy.link_speed = -1;
2671         adapter->if_handle = -1;
2672         adapter->be3_native = false;
2673         adapter->promiscuous = false;
2674         adapter->eq_next_idx = 0;
2675 }
2676
2677 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2678                            bool *active_mac, u32 *pmac_id)
2679 {
2680         int status = 0;
2681
2682         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2683                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2684                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2685                         *active_mac = true;
2686                 else
2687                         *active_mac = false;
2688
2689                 return status;
2690         }
2691
2692         if (lancer_chip(adapter)) {
2693                 status = be_cmd_get_mac_from_list(adapter, mac,
2694                                                   active_mac, pmac_id, 0);
2695                 if (*active_mac) {
2696                         status = be_cmd_mac_addr_query(adapter, mac, false,
2697                                                        if_handle, *pmac_id);
2698                 }
2699         } else if (be_physfn(adapter)) {
2700                 /* For BE3, for PF get permanent MAC */
2701                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2702                 *active_mac = false;
2703         } else {
2704                 /* For BE3, for VF get soft MAC assigned by PF*/
2705                 status = be_cmd_mac_addr_query(adapter, mac, false,
2706                                                if_handle, 0);
2707                 *active_mac = true;
2708         }
2709         return status;
2710 }
2711
2712 /* Routine to query per function resource limits */
2713 static int be_get_config(struct be_adapter *adapter)
2714 {
2715         int pos;
2716         u16 dev_num_vfs;
2717
2718         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2719         if (pos) {
2720                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2721                                      &dev_num_vfs);
2722                 if (!lancer_chip(adapter))
2723                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2724                 adapter->dev_num_vfs = dev_num_vfs;
2725         }
2726         return 0;
2727 }
2728
2729 static int be_setup(struct be_adapter *adapter)
2730 {
2731         struct device *dev = &adapter->pdev->dev;
2732         u32 cap_flags, en_flags;
2733         u32 tx_fc, rx_fc;
2734         int status;
2735         u8 mac[ETH_ALEN];
2736         bool active_mac;
2737
2738         be_setup_init(adapter);
2739
2740         be_get_config(adapter);
2741
2742         be_cmd_req_native_mode(adapter);
2743
2744         be_msix_enable(adapter);
2745
2746         status = be_evt_queues_create(adapter);
2747         if (status)
2748                 goto err;
2749
2750         status = be_tx_cqs_create(adapter);
2751         if (status)
2752                 goto err;
2753
2754         status = be_rx_cqs_create(adapter);
2755         if (status)
2756                 goto err;
2757
2758         status = be_mcc_queues_create(adapter);
2759         if (status)
2760                 goto err;
2761
2762         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2763                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2764         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2765                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2766
2767         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2768                 cap_flags |= BE_IF_FLAGS_RSS;
2769                 en_flags |= BE_IF_FLAGS_RSS;
2770         }
2771
2772         if (lancer_chip(adapter) && !be_physfn(adapter)) {
2773                 en_flags = BE_IF_FLAGS_UNTAGGED |
2774                             BE_IF_FLAGS_BROADCAST |
2775                             BE_IF_FLAGS_MULTICAST;
2776                 cap_flags = en_flags;
2777         }
2778
2779         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2780                                   &adapter->if_handle, 0);
2781         if (status != 0)
2782                 goto err;
2783
2784         memset(mac, 0, ETH_ALEN);
2785         active_mac = false;
2786         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2787                                  &active_mac, &adapter->pmac_id[0]);
2788         if (status != 0)
2789                 goto err;
2790
2791         if (!active_mac) {
2792                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2793                                          &adapter->pmac_id[0], 0);
2794                 if (status != 0)
2795                         goto err;
2796         }
2797
2798         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2799                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2800                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2801         }
2802
2803         status = be_tx_qs_create(adapter);
2804         if (status)
2805                 goto err;
2806
2807         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2808
2809         if (adapter->vlans_added)
2810                 be_vid_config(adapter);
2811
2812         be_set_rx_mode(adapter->netdev);
2813
2814         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2815
2816         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2817                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2818                                         adapter->rx_fc);
2819
2820         if (be_physfn(adapter) && num_vfs) {
2821                 if (adapter->dev_num_vfs)
2822                         be_vf_setup(adapter);
2823                 else
2824                         dev_warn(dev, "device doesn't support SRIOV\n");
2825         }
2826
2827         be_cmd_get_phy_info(adapter);
2828         if (be_pause_supported(adapter))
2829                 adapter->phy.fc_autoneg = 1;
2830
2831         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2832         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2833         return 0;
2834 err:
2835         be_clear(adapter);
2836         return status;
2837 }
2838
2839 #ifdef CONFIG_NET_POLL_CONTROLLER
2840 static void be_netpoll(struct net_device *netdev)
2841 {
2842         struct be_adapter *adapter = netdev_priv(netdev);
2843         struct be_eq_obj *eqo;
2844         int i;
2845
2846         for_all_evt_queues(adapter, eqo, i)
2847                 event_handle(eqo);
2848
2849         return;
2850 }
2851 #endif
2852
2853 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2854 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2855
2856 static bool be_flash_redboot(struct be_adapter *adapter,
2857                         const u8 *p, u32 img_start, int image_size,
2858                         int hdr_size)
2859 {
2860         u32 crc_offset;
2861         u8 flashed_crc[4];
2862         int status;
2863
2864         crc_offset = hdr_size + img_start + image_size - 4;
2865
2866         p += crc_offset;
2867
2868         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2869                         (image_size - 4));
2870         if (status) {
2871                 dev_err(&adapter->pdev->dev,
2872                 "could not get crc from flash, not flashing redboot\n");
2873                 return false;
2874         }
2875
2876         /*update redboot only if crc does not match*/
2877         if (!memcmp(flashed_crc, p, 4))
2878                 return false;
2879         else
2880                 return true;
2881 }
2882
2883 static bool phy_flashing_required(struct be_adapter *adapter)
2884 {
2885         return (adapter->phy.phy_type == TN_8022 &&
2886                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2887 }
2888
2889 static bool is_comp_in_ufi(struct be_adapter *adapter,
2890                            struct flash_section_info *fsec, int type)
2891 {
2892         int i = 0, img_type = 0;
2893         struct flash_section_info_g2 *fsec_g2 = NULL;
2894
2895         if (adapter->generation != BE_GEN3)
2896                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2897
2898         for (i = 0; i < MAX_FLASH_COMP; i++) {
2899                 if (fsec_g2)
2900                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2901                 else
2902                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2903
2904                 if (img_type == type)
2905                         return true;
2906         }
2907         return false;
2908
2909 }
2910
2911 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2912                                          int header_size,
2913                                          const struct firmware *fw)
2914 {
2915         struct flash_section_info *fsec = NULL;
2916         const u8 *p = fw->data;
2917
2918         p += header_size;
2919         while (p < (fw->data + fw->size)) {
2920                 fsec = (struct flash_section_info *)p;
2921                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2922                         return fsec;
2923                 p += 32;
2924         }
2925         return NULL;
2926 }
2927
2928 static int be_flash_data(struct be_adapter *adapter,
2929                          const struct firmware *fw,
2930                          struct be_dma_mem *flash_cmd,
2931                          int num_of_images)
2932
2933 {
2934         int status = 0, i, filehdr_size = 0;
2935         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2936         u32 total_bytes = 0, flash_op;
2937         int num_bytes;
2938         const u8 *p = fw->data;
2939         struct be_cmd_write_flashrom *req = flash_cmd->va;
2940         const struct flash_comp *pflashcomp;
2941         int num_comp, hdr_size;
2942         struct flash_section_info *fsec = NULL;
2943
2944         struct flash_comp gen3_flash_types[] = {
2945                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2946                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2947                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2948                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2949                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2950                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2951                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2952                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2953                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2954                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2955                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2956                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2957                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2958                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2959                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2960                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2961                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2962                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2963                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2964                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2965         };
2966
2967         struct flash_comp gen2_flash_types[] = {
2968                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2969                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2970                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2971                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2972                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2973                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2974                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2975                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2976                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2977                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2978                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2979                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2980                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2981                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2982                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2983                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2984         };
2985
2986         if (adapter->generation == BE_GEN3) {
2987                 pflashcomp = gen3_flash_types;
2988                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2989                 num_comp = ARRAY_SIZE(gen3_flash_types);
2990         } else {
2991                 pflashcomp = gen2_flash_types;
2992                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2993                 num_comp = ARRAY_SIZE(gen2_flash_types);
2994         }
2995         /* Get flash section info*/
2996         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2997         if (!fsec) {
2998                 dev_err(&adapter->pdev->dev,
2999                         "Invalid Cookie. UFI corrupted ?\n");
3000                 return -1;
3001         }
3002         for (i = 0; i < num_comp; i++) {
3003                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3004                         continue;
3005
3006                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3007                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3008                         continue;
3009
3010                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3011                         if (!phy_flashing_required(adapter))
3012                                 continue;
3013                 }
3014
3015                 hdr_size = filehdr_size +
3016                            (num_of_images * sizeof(struct image_hdr));
3017
3018                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3019                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3020                                        pflashcomp[i].size, hdr_size)))
3021                         continue;
3022
3023                 /* Flash the component */
3024                 p = fw->data;
3025                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3026                 if (p + pflashcomp[i].size > fw->data + fw->size)
3027                         return -1;
3028                 total_bytes = pflashcomp[i].size;
3029                 while (total_bytes) {
3030                         if (total_bytes > 32*1024)
3031                                 num_bytes = 32*1024;
3032                         else
3033                                 num_bytes = total_bytes;
3034                         total_bytes -= num_bytes;
3035                         if (!total_bytes) {
3036                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3037                                         flash_op = FLASHROM_OPER_PHY_FLASH;
3038                                 else
3039                                         flash_op = FLASHROM_OPER_FLASH;
3040                         } else {
3041                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3042                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3043                                 else
3044                                         flash_op = FLASHROM_OPER_SAVE;
3045                         }
3046                         memcpy(req->params.data_buf, p, num_bytes);
3047                         p += num_bytes;
3048                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3049                                 pflashcomp[i].optype, flash_op, num_bytes);
3050                         if (status) {
3051                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3052                                         (pflashcomp[i].optype ==
3053                                                 OPTYPE_PHY_FW))
3054                                         break;
3055                                 dev_err(&adapter->pdev->dev,
3056                                         "cmd to write to flash rom failed.\n");
3057                                 return -1;
3058                         }
3059                 }
3060         }
3061         return 0;
3062 }
3063
3064 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3065 {
3066         if (fhdr == NULL)
3067                 return 0;
3068         if (fhdr->build[0] == '3')
3069                 return BE_GEN3;
3070         else if (fhdr->build[0] == '2')
3071                 return BE_GEN2;
3072         else
3073                 return 0;
3074 }
3075
3076 static int lancer_wait_idle(struct be_adapter *adapter)
3077 {
3078 #define SLIPORT_IDLE_TIMEOUT 30
3079         u32 reg_val;
3080         int status = 0, i;
3081
3082         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3083                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3084                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3085                         break;
3086
3087                 ssleep(1);
3088         }
3089
3090         if (i == SLIPORT_IDLE_TIMEOUT)
3091                 status = -1;
3092
3093         return status;
3094 }
3095
3096 static int lancer_fw_reset(struct be_adapter *adapter)
3097 {
3098         int status = 0;
3099
3100         status = lancer_wait_idle(adapter);
3101         if (status)
3102                 return status;
3103
3104         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3105                   PHYSDEV_CONTROL_OFFSET);
3106
3107         return status;
3108 }
3109
3110 static int lancer_fw_download(struct be_adapter *adapter,
3111                                 const struct firmware *fw)
3112 {
3113 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3114 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3115         struct be_dma_mem flash_cmd;
3116         const u8 *data_ptr = NULL;
3117         u8 *dest_image_ptr = NULL;
3118         size_t image_size = 0;
3119         u32 chunk_size = 0;
3120         u32 data_written = 0;
3121         u32 offset = 0;
3122         int status = 0;
3123         u8 add_status = 0;
3124         u8 change_status;
3125
3126         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3127                 dev_err(&adapter->pdev->dev,
3128                         "FW Image not properly aligned. "
3129                         "Length must be 4 byte aligned.\n");
3130                 status = -EINVAL;
3131                 goto lancer_fw_exit;
3132         }
3133
3134         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3135                                 + LANCER_FW_DOWNLOAD_CHUNK;
3136         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3137                                                 &flash_cmd.dma, GFP_KERNEL);
3138         if (!flash_cmd.va) {
3139                 status = -ENOMEM;
3140                 dev_err(&adapter->pdev->dev,
3141                         "Memory allocation failure while flashing\n");
3142                 goto lancer_fw_exit;
3143         }
3144
3145         dest_image_ptr = flash_cmd.va +
3146                                 sizeof(struct lancer_cmd_req_write_object);
3147         image_size = fw->size;
3148         data_ptr = fw->data;
3149
3150         while (image_size) {
3151                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3152
3153                 /* Copy the image chunk content. */
3154                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3155
3156                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3157                                                  chunk_size, offset,
3158                                                  LANCER_FW_DOWNLOAD_LOCATION,
3159                                                  &data_written, &change_status,
3160                                                  &add_status);
3161                 if (status)
3162                         break;
3163
3164                 offset += data_written;
3165                 data_ptr += data_written;
3166                 image_size -= data_written;
3167         }
3168
3169         if (!status) {
3170                 /* Commit the FW written */
3171                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3172                                                  0, offset,
3173                                                  LANCER_FW_DOWNLOAD_LOCATION,
3174                                                  &data_written, &change_status,
3175                                                  &add_status);
3176         }
3177
3178         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3179                                 flash_cmd.dma);
3180         if (status) {
3181                 dev_err(&adapter->pdev->dev,
3182                         "Firmware load error. "
3183                         "Status code: 0x%x Additional Status: 0x%x\n",
3184                         status, add_status);
3185                 goto lancer_fw_exit;
3186         }
3187
3188         if (change_status == LANCER_FW_RESET_NEEDED) {
3189                 status = lancer_fw_reset(adapter);
3190                 if (status) {
3191                         dev_err(&adapter->pdev->dev,
3192                                 "Adapter busy for FW reset.\n"
3193                                 "New FW will not be active.\n");
3194                         goto lancer_fw_exit;
3195                 }
3196         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3197                         dev_err(&adapter->pdev->dev,
3198                                 "System reboot required for new FW"
3199                                 " to be active\n");
3200         }
3201
3202         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3203 lancer_fw_exit:
3204         return status;
3205 }
3206
3207 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3208 {
3209         struct flash_file_hdr_g2 *fhdr;
3210         struct flash_file_hdr_g3 *fhdr3;
3211         struct image_hdr *img_hdr_ptr = NULL;
3212         struct be_dma_mem flash_cmd;
3213         const u8 *p;
3214         int status = 0, i = 0, num_imgs = 0;
3215
3216         p = fw->data;
3217         fhdr = (struct flash_file_hdr_g2 *) p;
3218
3219         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3220         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3221                                           &flash_cmd.dma, GFP_KERNEL);
3222         if (!flash_cmd.va) {
3223                 status = -ENOMEM;
3224                 dev_err(&adapter->pdev->dev,
3225                         "Memory allocation failure while flashing\n");
3226                 goto be_fw_exit;
3227         }
3228
3229         if ((adapter->generation == BE_GEN3) &&
3230                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3231                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3232                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3233                 for (i = 0; i < num_imgs; i++) {
3234                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3235                                         (sizeof(struct flash_file_hdr_g3) +
3236                                          i * sizeof(struct image_hdr)));
3237                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3238                                 status = be_flash_data(adapter, fw, &flash_cmd,
3239                                                         num_imgs);
3240                 }
3241         } else if ((adapter->generation == BE_GEN2) &&
3242                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3243                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3244         } else {
3245                 dev_err(&adapter->pdev->dev,
3246                         "UFI and Interface are not compatible for flashing\n");
3247                 status = -1;
3248         }
3249
3250         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3251                           flash_cmd.dma);
3252         if (status) {
3253                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3254                 goto be_fw_exit;
3255         }
3256
3257         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3258
3259 be_fw_exit:
3260         return status;
3261 }
3262
3263 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3264 {
3265         const struct firmware *fw;
3266         int status;
3267
3268         if (!netif_running(adapter->netdev)) {
3269                 dev_err(&adapter->pdev->dev,
3270                         "Firmware load not allowed (interface is down)\n");
3271                 return -1;
3272         }
3273
3274         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3275         if (status)
3276                 goto fw_exit;
3277
3278         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3279
3280         if (lancer_chip(adapter))
3281                 status = lancer_fw_download(adapter, fw);
3282         else
3283                 status = be_fw_download(adapter, fw);
3284
3285 fw_exit:
3286         release_firmware(fw);
3287         return status;
3288 }
3289
3290 static const struct net_device_ops be_netdev_ops = {
3291         .ndo_open               = be_open,
3292         .ndo_stop               = be_close,
3293         .ndo_start_xmit         = be_xmit,
3294         .ndo_set_rx_mode        = be_set_rx_mode,
3295         .ndo_set_mac_address    = be_mac_addr_set,
3296         .ndo_change_mtu         = be_change_mtu,
3297         .ndo_get_stats64        = be_get_stats64,
3298         .ndo_validate_addr      = eth_validate_addr,
3299         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3300         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3301         .ndo_set_vf_mac         = be_set_vf_mac,
3302         .ndo_set_vf_vlan        = be_set_vf_vlan,
3303         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3304         .ndo_get_vf_config      = be_get_vf_config,
3305 #ifdef CONFIG_NET_POLL_CONTROLLER
3306         .ndo_poll_controller    = be_netpoll,
3307 #endif
3308 };
3309
3310 static void be_netdev_init(struct net_device *netdev)
3311 {
3312         struct be_adapter *adapter = netdev_priv(netdev);
3313         struct be_eq_obj *eqo;
3314         int i;
3315
3316         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3317                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3318                 NETIF_F_HW_VLAN_TX;
3319         if (be_multi_rxq(adapter))
3320                 netdev->hw_features |= NETIF_F_RXHASH;
3321
3322         netdev->features |= netdev->hw_features |
3323                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3324
3325         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3326                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3327
3328         netdev->priv_flags |= IFF_UNICAST_FLT;
3329
3330         netdev->flags |= IFF_MULTICAST;
3331
3332         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3333
3334         netdev->netdev_ops = &be_netdev_ops;
3335
3336         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3337
3338         for_all_evt_queues(adapter, eqo, i)
3339                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3340 }
3341
3342 static void be_unmap_pci_bars(struct be_adapter *adapter)
3343 {
3344         if (adapter->csr)
3345                 iounmap(adapter->csr);
3346         if (adapter->db)
3347                 iounmap(adapter->db);
3348         if (adapter->roce_db.base)
3349                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3350 }
3351
3352 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3353 {
3354         struct pci_dev *pdev = adapter->pdev;
3355         u8 __iomem *addr;
3356
3357         addr = pci_iomap(pdev, 2, 0);
3358         if (addr == NULL)
3359                 return -ENOMEM;
3360
3361         adapter->roce_db.base = addr;
3362         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3363         adapter->roce_db.size = 8192;
3364         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3365         return 0;
3366 }
3367
3368 static int be_map_pci_bars(struct be_adapter *adapter)
3369 {
3370         u8 __iomem *addr;
3371         int db_reg;
3372
3373         if (lancer_chip(adapter)) {
3374                 if (be_type_2_3(adapter)) {
3375                         addr = ioremap_nocache(
3376                                         pci_resource_start(adapter->pdev, 0),
3377                                         pci_resource_len(adapter->pdev, 0));
3378                         if (addr == NULL)
3379                                 return -ENOMEM;
3380                         adapter->db = addr;
3381                 }
3382                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3383                         if (lancer_roce_map_pci_bars(adapter))
3384                                 goto pci_map_err;
3385                 }
3386                 return 0;
3387         }
3388
3389         if (be_physfn(adapter)) {
3390                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3391                                 pci_resource_len(adapter->pdev, 2));
3392                 if (addr == NULL)
3393                         return -ENOMEM;
3394                 adapter->csr = addr;
3395         }
3396
3397         if (adapter->generation == BE_GEN2) {
3398                 db_reg = 4;
3399         } else {
3400                 if (be_physfn(adapter))
3401                         db_reg = 4;
3402                 else
3403                         db_reg = 0;
3404         }
3405         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3406                                 pci_resource_len(adapter->pdev, db_reg));
3407         if (addr == NULL)
3408                 goto pci_map_err;
3409         adapter->db = addr;
3410         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3411                 adapter->roce_db.size = 4096;
3412                 adapter->roce_db.io_addr =
3413                                 pci_resource_start(adapter->pdev, db_reg);
3414                 adapter->roce_db.total_size =
3415                                 pci_resource_len(adapter->pdev, db_reg);
3416         }
3417         return 0;
3418 pci_map_err:
3419         be_unmap_pci_bars(adapter);
3420         return -ENOMEM;
3421 }
3422
3423 static void be_ctrl_cleanup(struct be_adapter *adapter)
3424 {
3425         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3426
3427         be_unmap_pci_bars(adapter);
3428
3429         if (mem->va)
3430                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3431                                   mem->dma);
3432
3433         mem = &adapter->rx_filter;
3434         if (mem->va)
3435                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3436                                   mem->dma);
3437         kfree(adapter->pmac_id);
3438 }
3439
3440 static int be_ctrl_init(struct be_adapter *adapter)
3441 {
3442         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3443         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3444         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3445         int status;
3446
3447         status = be_map_pci_bars(adapter);
3448         if (status)
3449                 goto done;
3450
3451         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3452         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3453                                                 mbox_mem_alloc->size,
3454                                                 &mbox_mem_alloc->dma,
3455                                                 GFP_KERNEL);
3456         if (!mbox_mem_alloc->va) {
3457                 status = -ENOMEM;
3458                 goto unmap_pci_bars;
3459         }
3460         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3461         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3462         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3463         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3464
3465         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3466         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3467                                         &rx_filter->dma, GFP_KERNEL);
3468         if (rx_filter->va == NULL) {
3469                 status = -ENOMEM;
3470                 goto free_mbox;
3471         }
3472         memset(rx_filter->va, 0, rx_filter->size);
3473
3474         /* primary mac needs 1 pmac entry */
3475         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3476                                    sizeof(*adapter->pmac_id), GFP_KERNEL);
3477         if (!adapter->pmac_id)
3478                 return -ENOMEM;
3479
3480         mutex_init(&adapter->mbox_lock);
3481         spin_lock_init(&adapter->mcc_lock);
3482         spin_lock_init(&adapter->mcc_cq_lock);
3483
3484         init_completion(&adapter->flash_compl);
3485         pci_save_state(adapter->pdev);
3486         return 0;
3487
3488 free_mbox:
3489         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3490                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3491
3492 unmap_pci_bars:
3493         be_unmap_pci_bars(adapter);
3494
3495 done:
3496         return status;
3497 }
3498
3499 static void be_stats_cleanup(struct be_adapter *adapter)
3500 {
3501         struct be_dma_mem *cmd = &adapter->stats_cmd;
3502
3503         if (cmd->va)
3504                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3505                                   cmd->va, cmd->dma);
3506 }
3507
3508 static int be_stats_init(struct be_adapter *adapter)
3509 {
3510         struct be_dma_mem *cmd = &adapter->stats_cmd;
3511
3512         if (adapter->generation == BE_GEN2) {
3513                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3514         } else {
3515                 if (lancer_chip(adapter))
3516                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3517                 else
3518                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3519         }
3520         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3521                                      GFP_KERNEL);
3522         if (cmd->va == NULL)
3523                 return -1;
3524         memset(cmd->va, 0, cmd->size);
3525         return 0;
3526 }
3527
3528 static void __devexit be_remove(struct pci_dev *pdev)
3529 {
3530         struct be_adapter *adapter = pci_get_drvdata(pdev);
3531
3532         if (!adapter)
3533                 return;
3534
3535         be_roce_dev_remove(adapter);
3536
3537         cancel_delayed_work_sync(&adapter->func_recovery_work);
3538
3539         unregister_netdev(adapter->netdev);
3540
3541         be_clear(adapter);
3542
3543         /* tell fw we're done with firing cmds */
3544         be_cmd_fw_clean(adapter);
3545
3546         be_stats_cleanup(adapter);
3547
3548         be_ctrl_cleanup(adapter);
3549
3550         pci_disable_pcie_error_reporting(pdev);
3551
3552         pci_set_drvdata(pdev, NULL);
3553         pci_release_regions(pdev);
3554         pci_disable_device(pdev);
3555
3556         free_netdev(adapter->netdev);
3557 }
3558
3559 bool be_is_wol_supported(struct be_adapter *adapter)
3560 {
3561         return ((adapter->wol_cap & BE_WOL_CAP) &&
3562                 !be_is_wol_excluded(adapter)) ? true : false;
3563 }
3564
3565 u32 be_get_fw_log_level(struct be_adapter *adapter)
3566 {
3567         struct be_dma_mem extfat_cmd;
3568         struct be_fat_conf_params *cfgs;
3569         int status;
3570         u32 level = 0;
3571         int j;
3572
3573         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3574         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3575         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3576                                              &extfat_cmd.dma);
3577
3578         if (!extfat_cmd.va) {
3579                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3580                         __func__);
3581                 goto err;
3582         }
3583
3584         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3585         if (!status) {
3586                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3587                                                 sizeof(struct be_cmd_resp_hdr));
3588                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3589                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3590                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3591                 }
3592         }
3593         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3594                             extfat_cmd.dma);
3595 err:
3596         return level;
3597 }
3598 static int be_get_initial_config(struct be_adapter *adapter)
3599 {
3600         int status;
3601         u32 level;
3602
3603         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3604                         &adapter->function_mode, &adapter->function_caps);
3605         if (status)
3606                 return status;
3607
3608         if (adapter->function_mode & FLEX10_MODE)
3609                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3610         else
3611                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3612
3613         if (be_physfn(adapter))
3614                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3615         else
3616                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3617
3618         status = be_cmd_get_cntl_attributes(adapter);
3619         if (status)
3620                 return status;
3621
3622         status = be_cmd_get_acpi_wol_cap(adapter);
3623         if (status) {
3624                 /* in case of a failure to get wol capabillities
3625                  * check the exclusion list to determine WOL capability */
3626                 if (!be_is_wol_excluded(adapter))
3627                         adapter->wol_cap |= BE_WOL_CAP;
3628         }
3629
3630         if (be_is_wol_supported(adapter))
3631                 adapter->wol = true;
3632
3633         /* Must be a power of 2 or else MODULO will BUG_ON */
3634         adapter->be_get_temp_freq = 64;
3635
3636         level = be_get_fw_log_level(adapter);
3637         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3638
3639         return 0;
3640 }
3641
3642 static int be_dev_type_check(struct be_adapter *adapter)
3643 {
3644         struct pci_dev *pdev = adapter->pdev;
3645         u32 sli_intf = 0, if_type;
3646
3647         switch (pdev->device) {
3648         case BE_DEVICE_ID1:
3649         case OC_DEVICE_ID1:
3650                 adapter->generation = BE_GEN2;
3651                 break;
3652         case BE_DEVICE_ID2:
3653         case OC_DEVICE_ID2:
3654                 adapter->generation = BE_GEN3;
3655                 break;
3656         case OC_DEVICE_ID3:
3657         case OC_DEVICE_ID4:
3658                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3659                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3660                                                 SLI_INTF_IF_TYPE_SHIFT;
3661                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3662                                                 SLI_INTF_IF_TYPE_SHIFT;
3663                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3664                         !be_type_2_3(adapter)) {
3665                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3666                         return -EINVAL;
3667                 }
3668                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3669                                          SLI_INTF_FAMILY_SHIFT);
3670                 adapter->generation = BE_GEN3;
3671                 break;
3672         case OC_DEVICE_ID5:
3673                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3674                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3675                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3676                         return -EINVAL;
3677                 }
3678                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3679                                          SLI_INTF_FAMILY_SHIFT);
3680                 adapter->generation = BE_GEN3;
3681                 break;
3682         default:
3683                 adapter->generation = 0;
3684         }
3685
3686         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3687         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3688         return 0;
3689 }
3690
3691 static int lancer_recover_func(struct be_adapter *adapter)
3692 {
3693         int status;
3694
3695         status = lancer_test_and_set_rdy_state(adapter);
3696         if (status)
3697                 goto err;
3698
3699         if (netif_running(adapter->netdev))
3700                 be_close(adapter->netdev);
3701
3702         be_clear(adapter);
3703
3704         adapter->hw_error = false;
3705         adapter->fw_timeout = false;
3706
3707         status = be_setup(adapter);
3708         if (status)
3709                 goto err;
3710
3711         if (netif_running(adapter->netdev)) {
3712                 status = be_open(adapter->netdev);
3713                 if (status)
3714                         goto err;
3715         }
3716
3717         dev_err(&adapter->pdev->dev,
3718                 "Adapter SLIPORT recovery succeeded\n");
3719         return 0;
3720 err:
3721         dev_err(&adapter->pdev->dev,
3722                 "Adapter SLIPORT recovery failed\n");
3723
3724         return status;
3725 }
3726
3727 static void be_func_recovery_task(struct work_struct *work)
3728 {
3729         struct be_adapter *adapter =
3730                 container_of(work, struct be_adapter,  func_recovery_work.work);
3731         int status;
3732
3733         be_detect_error(adapter);
3734
3735         if (adapter->hw_error && lancer_chip(adapter)) {
3736
3737                 if (adapter->eeh_error)
3738                         goto out;
3739
3740                 rtnl_lock();
3741                 netif_device_detach(adapter->netdev);
3742                 rtnl_unlock();
3743
3744                 status = lancer_recover_func(adapter);
3745
3746                 if (!status)
3747                         netif_device_attach(adapter->netdev);
3748         }
3749
3750 out:
3751         schedule_delayed_work(&adapter->func_recovery_work,
3752                               msecs_to_jiffies(1000));
3753 }
3754
3755 static void be_worker(struct work_struct *work)
3756 {
3757         struct be_adapter *adapter =
3758                 container_of(work, struct be_adapter, work.work);
3759         struct be_rx_obj *rxo;
3760         struct be_eq_obj *eqo;
3761         int i;
3762
3763         /* when interrupts are not yet enabled, just reap any pending
3764         * mcc completions */
3765         if (!netif_running(adapter->netdev)) {
3766                 local_bh_disable();
3767                 be_process_mcc(adapter);
3768                 local_bh_enable();
3769                 goto reschedule;
3770         }
3771
3772         if (!adapter->stats_cmd_sent) {
3773                 if (lancer_chip(adapter))
3774                         lancer_cmd_get_pport_stats(adapter,
3775                                                 &adapter->stats_cmd);
3776                 else
3777                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3778         }
3779
3780         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3781                 be_cmd_get_die_temperature(adapter);
3782
3783         for_all_rx_queues(adapter, rxo, i) {
3784                 if (rxo->rx_post_starved) {
3785                         rxo->rx_post_starved = false;
3786                         be_post_rx_frags(rxo, GFP_KERNEL);
3787                 }
3788         }
3789
3790         for_all_evt_queues(adapter, eqo, i)
3791                 be_eqd_update(adapter, eqo);
3792
3793 reschedule:
3794         adapter->work_counter++;
3795         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3796 }
3797
3798 static bool be_reset_required(struct be_adapter *adapter)
3799 {
3800         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3801 }
3802
3803 static char *mc_name(struct be_adapter *adapter)
3804 {
3805         if (adapter->function_mode & FLEX10_MODE)
3806                 return "FLEX10";
3807         else if (adapter->function_mode & VNIC_MODE)
3808                 return "vNIC";
3809         else if (adapter->function_mode & UMC_ENABLED)
3810                 return "UMC";
3811         else
3812                 return "";
3813 }
3814
3815 static inline char *func_name(struct be_adapter *adapter)
3816 {
3817         return be_physfn(adapter) ? "PF" : "VF";
3818 }
3819
3820 static int __devinit be_probe(struct pci_dev *pdev,
3821                         const struct pci_device_id *pdev_id)
3822 {
3823         int status = 0;
3824         struct be_adapter *adapter;
3825         struct net_device *netdev;
3826         char port_name;
3827
3828         status = pci_enable_device(pdev);
3829         if (status)
3830                 goto do_none;
3831
3832         status = pci_request_regions(pdev, DRV_NAME);
3833         if (status)
3834                 goto disable_dev;
3835         pci_set_master(pdev);
3836
3837         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3838         if (netdev == NULL) {
3839                 status = -ENOMEM;
3840                 goto rel_reg;
3841         }
3842         adapter = netdev_priv(netdev);
3843         adapter->pdev = pdev;
3844         pci_set_drvdata(pdev, adapter);
3845
3846         status = be_dev_type_check(adapter);
3847         if (status)
3848                 goto free_netdev;
3849
3850         adapter->netdev = netdev;
3851         SET_NETDEV_DEV(netdev, &pdev->dev);
3852
3853         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3854         if (!status) {
3855                 netdev->features |= NETIF_F_HIGHDMA;
3856         } else {
3857                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3858                 if (status) {
3859                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3860                         goto free_netdev;
3861                 }
3862         }
3863
3864         status = pci_enable_pcie_error_reporting(pdev);
3865         if (status)
3866                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3867
3868         status = be_ctrl_init(adapter);
3869         if (status)
3870                 goto free_netdev;
3871
3872         /* sync up with fw's ready state */
3873         if (be_physfn(adapter)) {
3874                 status = be_fw_wait_ready(adapter);
3875                 if (status)
3876                         goto ctrl_clean;
3877         }
3878
3879         /* tell fw we're ready to fire cmds */
3880         status = be_cmd_fw_init(adapter);
3881         if (status)
3882                 goto ctrl_clean;
3883
3884         if (be_reset_required(adapter)) {
3885                 status = be_cmd_reset_function(adapter);
3886                 if (status)
3887                         goto ctrl_clean;
3888         }
3889
3890         /* The INTR bit may be set in the card when probed by a kdump kernel
3891          * after a crash.
3892          */
3893         if (!lancer_chip(adapter))
3894                 be_intr_set(adapter, false);
3895
3896         status = be_stats_init(adapter);
3897         if (status)
3898                 goto ctrl_clean;
3899
3900         status = be_get_initial_config(adapter);
3901         if (status)
3902                 goto stats_clean;
3903
3904         INIT_DELAYED_WORK(&adapter->work, be_worker);
3905         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3906         adapter->rx_fc = adapter->tx_fc = true;
3907
3908         status = be_setup(adapter);
3909         if (status)
3910                 goto stats_clean;
3911
3912         be_netdev_init(netdev);
3913         status = register_netdev(netdev);
3914         if (status != 0)
3915                 goto unsetup;
3916
3917         be_roce_dev_add(adapter);
3918
3919         schedule_delayed_work(&adapter->func_recovery_work,
3920                               msecs_to_jiffies(1000));
3921
3922         be_cmd_query_port_name(adapter, &port_name);
3923
3924         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
3925                  func_name(adapter), mc_name(adapter), port_name);
3926
3927         return 0;
3928
3929 unsetup:
3930         be_clear(adapter);
3931 stats_clean:
3932         be_stats_cleanup(adapter);
3933 ctrl_clean:
3934         be_ctrl_cleanup(adapter);
3935 free_netdev:
3936         free_netdev(netdev);
3937         pci_set_drvdata(pdev, NULL);
3938 rel_reg:
3939         pci_release_regions(pdev);
3940 disable_dev:
3941         pci_disable_device(pdev);
3942 do_none:
3943         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3944         return status;
3945 }
3946
3947 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3948 {
3949         struct be_adapter *adapter = pci_get_drvdata(pdev);
3950         struct net_device *netdev =  adapter->netdev;
3951
3952         if (adapter->wol)
3953                 be_setup_wol(adapter, true);
3954
3955         cancel_delayed_work_sync(&adapter->func_recovery_work);
3956
3957         netif_device_detach(netdev);
3958         if (netif_running(netdev)) {
3959                 rtnl_lock();
3960                 be_close(netdev);
3961                 rtnl_unlock();
3962         }
3963         be_clear(adapter);
3964
3965         pci_save_state(pdev);
3966         pci_disable_device(pdev);
3967         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3968         return 0;
3969 }
3970
3971 static int be_resume(struct pci_dev *pdev)
3972 {
3973         int status = 0;
3974         struct be_adapter *adapter = pci_get_drvdata(pdev);
3975         struct net_device *netdev =  adapter->netdev;
3976
3977         netif_device_detach(netdev);
3978
3979         status = pci_enable_device(pdev);
3980         if (status)
3981                 return status;
3982
3983         pci_set_power_state(pdev, 0);
3984         pci_restore_state(pdev);
3985
3986         /* tell fw we're ready to fire cmds */
3987         status = be_cmd_fw_init(adapter);
3988         if (status)
3989                 return status;
3990
3991         be_setup(adapter);
3992         if (netif_running(netdev)) {
3993                 rtnl_lock();
3994                 be_open(netdev);
3995                 rtnl_unlock();
3996         }
3997
3998         schedule_delayed_work(&adapter->func_recovery_work,
3999                               msecs_to_jiffies(1000));
4000         netif_device_attach(netdev);
4001
4002         if (adapter->wol)
4003                 be_setup_wol(adapter, false);
4004
4005         return 0;
4006 }
4007
4008 /*
4009  * An FLR will stop BE from DMAing any data.
4010  */
4011 static void be_shutdown(struct pci_dev *pdev)
4012 {
4013         struct be_adapter *adapter = pci_get_drvdata(pdev);
4014
4015         if (!adapter)
4016                 return;
4017
4018         cancel_delayed_work_sync(&adapter->work);
4019         cancel_delayed_work_sync(&adapter->func_recovery_work);
4020
4021         netif_device_detach(adapter->netdev);
4022
4023         if (adapter->wol)
4024                 be_setup_wol(adapter, true);
4025
4026         be_cmd_reset_function(adapter);
4027
4028         pci_disable_device(pdev);
4029 }
4030
4031 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4032                                 pci_channel_state_t state)
4033 {
4034         struct be_adapter *adapter = pci_get_drvdata(pdev);
4035         struct net_device *netdev =  adapter->netdev;
4036
4037         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4038
4039         adapter->eeh_error = true;
4040
4041         cancel_delayed_work_sync(&adapter->func_recovery_work);
4042
4043         rtnl_lock();
4044         netif_device_detach(netdev);
4045         rtnl_unlock();
4046
4047         if (netif_running(netdev)) {
4048                 rtnl_lock();
4049                 be_close(netdev);
4050                 rtnl_unlock();
4051         }
4052         be_clear(adapter);
4053
4054         if (state == pci_channel_io_perm_failure)
4055                 return PCI_ERS_RESULT_DISCONNECT;
4056
4057         pci_disable_device(pdev);
4058
4059         /* The error could cause the FW to trigger a flash debug dump.
4060          * Resetting the card while flash dump is in progress
4061          * can cause it not to recover; wait for it to finish
4062          */
4063         ssleep(30);
4064         return PCI_ERS_RESULT_NEED_RESET;
4065 }
4066
4067 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4068 {
4069         struct be_adapter *adapter = pci_get_drvdata(pdev);
4070         int status;
4071
4072         dev_info(&adapter->pdev->dev, "EEH reset\n");
4073         be_clear_all_error(adapter);
4074
4075         status = pci_enable_device(pdev);
4076         if (status)
4077                 return PCI_ERS_RESULT_DISCONNECT;
4078
4079         pci_set_master(pdev);
4080         pci_set_power_state(pdev, 0);
4081         pci_restore_state(pdev);
4082
4083         /* Check if card is ok and fw is ready */
4084         status = be_fw_wait_ready(adapter);
4085         if (status)
4086                 return PCI_ERS_RESULT_DISCONNECT;
4087
4088         pci_cleanup_aer_uncorrect_error_status(pdev);
4089         return PCI_ERS_RESULT_RECOVERED;
4090 }
4091
4092 static void be_eeh_resume(struct pci_dev *pdev)
4093 {
4094         int status = 0;
4095         struct be_adapter *adapter = pci_get_drvdata(pdev);
4096         struct net_device *netdev =  adapter->netdev;
4097
4098         dev_info(&adapter->pdev->dev, "EEH resume\n");
4099
4100         pci_save_state(pdev);
4101
4102         /* tell fw we're ready to fire cmds */
4103         status = be_cmd_fw_init(adapter);
4104         if (status)
4105                 goto err;
4106
4107         status = be_cmd_reset_function(adapter);
4108         if (status)
4109                 goto err;
4110
4111         status = be_setup(adapter);
4112         if (status)
4113                 goto err;
4114
4115         if (netif_running(netdev)) {
4116                 status = be_open(netdev);
4117                 if (status)
4118                         goto err;
4119         }
4120
4121         schedule_delayed_work(&adapter->func_recovery_work,
4122                               msecs_to_jiffies(1000));
4123         netif_device_attach(netdev);
4124         return;
4125 err:
4126         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4127 }
4128
4129 static struct pci_error_handlers be_eeh_handlers = {
4130         .error_detected = be_eeh_err_detected,
4131         .slot_reset = be_eeh_reset,
4132         .resume = be_eeh_resume,
4133 };
4134
4135 static struct pci_driver be_driver = {
4136         .name = DRV_NAME,
4137         .id_table = be_dev_ids,
4138         .probe = be_probe,
4139         .remove = be_remove,
4140         .suspend = be_suspend,
4141         .resume = be_resume,
4142         .shutdown = be_shutdown,
4143         .err_handler = &be_eeh_handlers
4144 };
4145
4146 static int __init be_init_module(void)
4147 {
4148         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4149             rx_frag_size != 2048) {
4150                 printk(KERN_WARNING DRV_NAME
4151                         " : Module param rx_frag_size must be 2048/4096/8192."
4152                         " Using 2048\n");
4153                 rx_frag_size = 2048;
4154         }
4155
4156         return pci_register_driver(&be_driver);
4157 }
4158 module_init(be_init_module);
4159
4160 static void __exit be_exit_module(void)
4161 {
4162         pci_unregister_driver(&be_driver);
4163 }
4164 module_exit(be_exit_module);