6accb0c0273ae6266aada898242dcfcf8d9aa61d
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static const char * const ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static const char * const ue_status_hi_desc[] = {
87         "LPCMEMHOST",
88         "MGMT_MAC",
89         "PCS0ONLINE",
90         "MPU_IRAM",
91         "PCS1ONLINE",
92         "PCTL0",
93         "PCTL1",
94         "PMEM",
95         "RR",
96         "TXPB",
97         "RXPP",
98         "XAUI",
99         "TXP",
100         "ARM",
101         "IPC",
102         "HOST2",
103         "HOST3",
104         "HOST4",
105         "HOST5",
106         "HOST6",
107         "HOST7",
108         "HOST8",
109         "HOST9",
110         "NETC",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown"
119 };
120
121 /* Is BE in a multi-channel mode */
122 static inline bool be_is_mc(struct be_adapter *adapter) {
123         return (adapter->function_mode & FLEX10_MODE ||
124                 adapter->function_mode & VNIC_MODE ||
125                 adapter->function_mode & UMC_ENABLED);
126 }
127
128 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131         if (mem->va) {
132                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133                                   mem->dma);
134                 mem->va = NULL;
135         }
136 }
137
138 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139                 u16 len, u16 entry_size)
140 {
141         struct be_dma_mem *mem = &q->dma_mem;
142
143         memset(q, 0, sizeof(*q));
144         q->len = len;
145         q->entry_size = entry_size;
146         mem->size = len * entry_size;
147         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148                                      GFP_KERNEL);
149         if (!mem->va)
150                 return -ENOMEM;
151         memset(mem->va, 0, mem->size);
152         return 0;
153 }
154
155 static void be_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         if (adapter->eeh_error)
160                 return;
161
162         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163                                 &reg);
164         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
166         if (!enabled && enable)
167                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168         else if (enabled && !enable)
169                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170         else
171                 return;
172
173         pci_write_config_dword(adapter->pdev,
174                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
175 }
176
177 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 {
179         u32 val = 0;
180         val |= qid & DB_RQ_RING_ID_MASK;
181         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
182
183         wmb();
184         iowrite32(val, adapter->db + DB_RQ_OFFSET);
185 }
186
187 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_TXULP_RING_ID_MASK;
191         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
195 }
196
197 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
198                 bool arm, bool clear_int, u16 num_popped)
199 {
200         u32 val = 0;
201         val |= qid & DB_EQ_RING_ID_MASK;
202         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
204
205         if (adapter->eeh_error)
206                 return;
207
208         if (arm)
209                 val |= 1 << DB_EQ_REARM_SHIFT;
210         if (clear_int)
211                 val |= 1 << DB_EQ_CLR_SHIFT;
212         val |= 1 << DB_EQ_EVNT_SHIFT;
213         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_EQ_OFFSET);
215 }
216
217 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
218 {
219         u32 val = 0;
220         val |= qid & DB_CQ_RING_ID_MASK;
221         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
223
224         if (adapter->eeh_error)
225                 return;
226
227         if (arm)
228                 val |= 1 << DB_CQ_REARM_SHIFT;
229         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
230         iowrite32(val, adapter->db + DB_CQ_OFFSET);
231 }
232
233 static int be_mac_addr_set(struct net_device *netdev, void *p)
234 {
235         struct be_adapter *adapter = netdev_priv(netdev);
236         struct sockaddr *addr = p;
237         int status = 0;
238         u8 current_mac[ETH_ALEN];
239         u32 pmac_id = adapter->pmac_id[0];
240
241         if (!is_valid_ether_addr(addr->sa_data))
242                 return -EADDRNOTAVAIL;
243
244         status = be_cmd_mac_addr_query(adapter, current_mac, false,
245                                        adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561         wrb->rsvd0 = 0;
562 }
563
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565                                         struct sk_buff *skb)
566 {
567         u8 vlan_prio;
568         u16 vlan_tag;
569
570         vlan_tag = vlan_tx_tag_get(skb);
571         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572         /* If vlan priority provided by OS is NOT in available bmap */
573         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575                                 adapter->recommended_prio;
576
577         return vlan_tag;
578 }
579
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581 {
582         return vlan_tx_tag_present(skb) || adapter->pvid;
583 }
584
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
587 {
588         u16 vlan_tag;
589
590         memset(hdr, 0, sizeof(*hdr));
591
592         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
594         if (skb_is_gso(skb)) {
595                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597                         hdr, skb_shinfo(skb)->gso_size);
598                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600                 if (lancer_chip(adapter) && adapter->sli_family  ==
601                                                         LANCER_A0_SLI_FAMILY) {
602                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603                         if (is_tcp_pkt(skb))
604                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605                                                                 tcpcs, hdr, 1);
606                         else if (is_udp_pkt(skb))
607                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608                                                                 udpcs, hdr, 1);
609                 }
610         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611                 if (is_tcp_pkt(skb))
612                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613                 else if (is_udp_pkt(skb))
614                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615         }
616
617         if (vlan_tx_tag_present(skb)) {
618                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
621         }
622
623         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627 }
628
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
630                 bool unmap_single)
631 {
632         dma_addr_t dma;
633
634         be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
637         if (wrb->frag_len) {
638                 if (unmap_single)
639                         dma_unmap_single(dev, dma, wrb->frag_len,
640                                          DMA_TO_DEVICE);
641                 else
642                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
643         }
644 }
645
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648 {
649         dma_addr_t busaddr;
650         int i, copied = 0;
651         struct device *dev = &adapter->pdev->dev;
652         struct sk_buff *first_skb = skb;
653         struct be_eth_wrb *wrb;
654         struct be_eth_hdr_wrb *hdr;
655         bool map_single = false;
656         u16 map_head;
657
658         hdr = queue_head_node(txq);
659         queue_head_inc(txq);
660         map_head = txq->head;
661
662         if (skb->len > skb->data_len) {
663                 int len = skb_headlen(skb);
664                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665                 if (dma_mapping_error(dev, busaddr))
666                         goto dma_err;
667                 map_single = true;
668                 wrb = queue_head_node(txq);
669                 wrb_fill(wrb, busaddr, len);
670                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671                 queue_head_inc(txq);
672                 copied += len;
673         }
674
675         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676                 const struct skb_frag_struct *frag =
677                         &skb_shinfo(skb)->frags[i];
678                 busaddr = skb_frag_dma_map(dev, frag, 0,
679                                            skb_frag_size(frag), DMA_TO_DEVICE);
680                 if (dma_mapping_error(dev, busaddr))
681                         goto dma_err;
682                 wrb = queue_head_node(txq);
683                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
684                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685                 queue_head_inc(txq);
686                 copied += skb_frag_size(frag);
687         }
688
689         if (dummy_wrb) {
690                 wrb = queue_head_node(txq);
691                 wrb_fill(wrb, 0, 0);
692                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693                 queue_head_inc(txq);
694         }
695
696         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697         be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699         return copied;
700 dma_err:
701         txq->head = map_head;
702         while (copied) {
703                 wrb = queue_head_node(txq);
704                 unmap_tx_frag(dev, wrb, map_single);
705                 map_single = false;
706                 copied -= wrb->frag_len;
707                 queue_head_inc(txq);
708         }
709         return 0;
710 }
711
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713                                              struct sk_buff *skb)
714 {
715         u16 vlan_tag = 0;
716
717         skb = skb_share_check(skb, GFP_ATOMIC);
718         if (unlikely(!skb))
719                 return skb;
720
721         if (vlan_tx_tag_present(skb)) {
722                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723                 __vlan_put_tag(skb, vlan_tag);
724                 skb->vlan_tci = 0;
725         }
726
727         return skb;
728 }
729
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731                         struct net_device *netdev)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735         struct be_queue_info *txq = &txo->q;
736         struct iphdr *ip = NULL;
737         u32 wrb_cnt = 0, copied = 0;
738         u32 start = txq->head, eth_hdr_len;
739         bool dummy_wrb, stopped = false;
740
741         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742                 VLAN_ETH_HLEN : ETH_HLEN;
743
744         /* HW has a bug which considers padding bytes as legal
745          * and modifies the IPv4 hdr's 'tot_len' field
746          */
747         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748                         is_ipv4_pkt(skb)) {
749                 ip = (struct iphdr *)ip_hdr(skb);
750                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751         }
752
753         /* HW has a bug wherein it will calculate CSUM for VLAN
754          * pkts even though it is disabled.
755          * Manually insert VLAN in pkt.
756          */
757         if (skb->ip_summed != CHECKSUM_PARTIAL &&
758                         be_vlan_tag_chk(adapter, skb)) {
759                 skb = be_insert_vlan_in_pkt(adapter, skb);
760                 if (unlikely(!skb))
761                         goto tx_drop;
762         }
763
764         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
765
766         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
767         if (copied) {
768                 int gso_segs = skb_shinfo(skb)->gso_segs;
769
770                 /* record the sent skb in the sent_skb table */
771                 BUG_ON(txo->sent_skb_list[start]);
772                 txo->sent_skb_list[start] = skb;
773
774                 /* Ensure txq has space for the next skb; Else stop the queue
775                  * *BEFORE* ringing the tx doorbell, so that we serialze the
776                  * tx compls of the current transmit which'll wake up the queue
777                  */
778                 atomic_add(wrb_cnt, &txq->used);
779                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780                                                                 txq->len) {
781                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782                         stopped = true;
783                 }
784
785                 be_txq_notify(adapter, txq->id, wrb_cnt);
786
787                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
788         } else {
789                 txq->head = start;
790                 dev_kfree_skb_any(skb);
791         }
792 tx_drop:
793         return NETDEV_TX_OK;
794 }
795
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799         if (new_mtu < BE_MIN_MTU ||
800                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801                                         (ETH_HLEN + ETH_FCS_LEN))) {
802                 dev_info(&adapter->pdev->dev,
803                         "MTU must be between %d and %d bytes\n",
804                         BE_MIN_MTU,
805                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806                 return -EINVAL;
807         }
808         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809                         netdev->mtu, new_mtu);
810         netdev->mtu = new_mtu;
811         return 0;
812 }
813
814 /*
815  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816  * If the user configures more, place BE in vlan promiscuous mode.
817  */
818 static int be_vid_config(struct be_adapter *adapter)
819 {
820         u16 vids[BE_NUM_VLANS_SUPPORTED];
821         u16 num = 0, i;
822         int status = 0;
823
824         /* No need to further configure vids if in promiscuous mode */
825         if (adapter->promiscuous)
826                 return 0;
827
828         if (adapter->vlans_added > adapter->max_vlans)
829                 goto set_vlan_promisc;
830
831         /* Construct VLAN Table to give to HW */
832         for (i = 0; i < VLAN_N_VID; i++)
833                 if (adapter->vlan_tag[i])
834                         vids[num++] = cpu_to_le16(i);
835
836         status = be_cmd_vlan_config(adapter, adapter->if_handle,
837                                     vids, num, 1, 0);
838
839         /* Set to VLAN promisc mode as setting VLAN filter failed */
840         if (status) {
841                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843                 goto set_vlan_promisc;
844         }
845
846         return status;
847
848 set_vlan_promisc:
849         status = be_cmd_vlan_config(adapter, adapter->if_handle,
850                                     NULL, 0, 1, 1);
851         return status;
852 }
853
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
855 {
856         struct be_adapter *adapter = netdev_priv(netdev);
857         int status = 0;
858
859         if (!be_physfn(adapter)) {
860                 status = -EINVAL;
861                 goto ret;
862         }
863
864         adapter->vlan_tag[vid] = 1;
865         if (adapter->vlans_added <= (adapter->max_vlans + 1))
866                 status = be_vid_config(adapter);
867
868         if (!status)
869                 adapter->vlans_added++;
870         else
871                 adapter->vlan_tag[vid] = 0;
872 ret:
873         return status;
874 }
875
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
877 {
878         struct be_adapter *adapter = netdev_priv(netdev);
879         int status = 0;
880
881         if (!be_physfn(adapter)) {
882                 status = -EINVAL;
883                 goto ret;
884         }
885
886         adapter->vlan_tag[vid] = 0;
887         if (adapter->vlans_added <= adapter->max_vlans)
888                 status = be_vid_config(adapter);
889
890         if (!status)
891                 adapter->vlans_added--;
892         else
893                 adapter->vlan_tag[vid] = 1;
894 ret:
895         return status;
896 }
897
898 static void be_set_rx_mode(struct net_device *netdev)
899 {
900         struct be_adapter *adapter = netdev_priv(netdev);
901         int status;
902
903         if (netdev->flags & IFF_PROMISC) {
904                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905                 adapter->promiscuous = true;
906                 goto done;
907         }
908
909         /* BE was previously in promiscuous mode; disable it */
910         if (adapter->promiscuous) {
911                 adapter->promiscuous = false;
912                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
913
914                 if (adapter->vlans_added)
915                         be_vid_config(adapter);
916         }
917
918         /* Enable multicast promisc if num configured exceeds what we support */
919         if (netdev->flags & IFF_ALLMULTI ||
920                         netdev_mc_count(netdev) > BE_MAX_MC) {
921                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922                 goto done;
923         }
924
925         if (netdev_uc_count(netdev) != adapter->uc_macs) {
926                 struct netdev_hw_addr *ha;
927                 int i = 1; /* First slot is claimed by the Primary MAC */
928
929                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930                         be_cmd_pmac_del(adapter, adapter->if_handle,
931                                         adapter->pmac_id[i], 0);
932                 }
933
934                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936                         adapter->promiscuous = true;
937                         goto done;
938                 }
939
940                 netdev_for_each_uc_addr(ha, adapter->netdev) {
941                         adapter->uc_macs++; /* First slot is for Primary MAC */
942                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943                                         adapter->if_handle,
944                                         &adapter->pmac_id[adapter->uc_macs], 0);
945                 }
946         }
947
948         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950         /* Set to MCAST promisc mode if setting MULTICAST address fails */
951         if (status) {
952                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955         }
956 done:
957         return;
958 }
959
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961 {
962         struct be_adapter *adapter = netdev_priv(netdev);
963         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964         int status;
965
966         if (!sriov_enabled(adapter))
967                 return -EPERM;
968
969         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
970                 return -EINVAL;
971
972         if (lancer_chip(adapter)) {
973                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
974         } else {
975                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976                                          vf_cfg->pmac_id, vf + 1);
977
978                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979                                          &vf_cfg->pmac_id, vf + 1);
980         }
981
982         if (status)
983                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984                                 mac, vf);
985         else
986                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
987
988         return status;
989 }
990
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992                         struct ifla_vf_info *vi)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
996
997         if (!sriov_enabled(adapter))
998                 return -EPERM;
999
1000         if (vf >= adapter->num_vfs)
1001                 return -EINVAL;
1002
1003         vi->vf = vf;
1004         vi->tx_rate = vf_cfg->tx_rate;
1005         vi->vlan = vf_cfg->vlan_tag;
1006         vi->qos = 0;
1007         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1008
1009         return 0;
1010 }
1011
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013                         int vf, u16 vlan, u8 qos)
1014 {
1015         struct be_adapter *adapter = netdev_priv(netdev);
1016         int status = 0;
1017
1018         if (!sriov_enabled(adapter))
1019                 return -EPERM;
1020
1021         if (vf >= adapter->num_vfs || vlan > 4095)
1022                 return -EINVAL;
1023
1024         if (vlan) {
1025                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026                         /* If this is new value, program it. Else skip. */
1027                         adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029                         status = be_cmd_set_hsw_config(adapter, vlan,
1030                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1031                 }
1032         } else {
1033                 /* Reset Transparent Vlan Tagging. */
1034                 adapter->vf_cfg[vf].vlan_tag = 0;
1035                 vlan = adapter->vf_cfg[vf].def_vid;
1036                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037                         adapter->vf_cfg[vf].if_handle);
1038         }
1039
1040
1041         if (status)
1042                 dev_info(&adapter->pdev->dev,
1043                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1044         return status;
1045 }
1046
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1048                         int vf, int rate)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         int status = 0;
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         if (rate < 100 || rate > 10000) {
1060                 dev_err(&adapter->pdev->dev,
1061                         "tx rate must be between 100 and 10000 Mbps\n");
1062                 return -EINVAL;
1063         }
1064
1065         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066
1067         if (status)
1068                 dev_err(&adapter->pdev->dev,
1069                                 "tx rate %d on VF %d failed\n", rate, vf);
1070         else
1071                 adapter->vf_cfg[vf].tx_rate = rate;
1072         return status;
1073 }
1074
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076 {
1077         struct pci_dev *dev, *pdev = adapter->pdev;
1078         int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079         u16 offset, stride;
1080
1081         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082         if (!pos)
1083                 return 0;
1084         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088         while (dev) {
1089                 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090                 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091                         dev->bus->number == pdev->bus->number) {
1092                         vfs++;
1093                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094                                 assigned_vfs++;
1095                 }
1096                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097         }
1098         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099 }
1100
1101 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1102 {
1103         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1104         ulong now = jiffies;
1105         ulong delta = now - stats->rx_jiffies;
1106         u64 pkts;
1107         unsigned int start, eqd;
1108
1109         if (!eqo->enable_aic) {
1110                 eqd = eqo->eqd;
1111                 goto modify_eqd;
1112         }
1113
1114         if (eqo->idx >= adapter->num_rx_qs)
1115                 return;
1116
1117         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
1119         /* Wrapped around */
1120         if (time_before(now, stats->rx_jiffies)) {
1121                 stats->rx_jiffies = now;
1122                 return;
1123         }
1124
1125         /* Update once a second */
1126         if (delta < HZ)
1127                 return;
1128
1129         do {
1130                 start = u64_stats_fetch_begin_bh(&stats->sync);
1131                 pkts = stats->rx_pkts;
1132         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
1134         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1135         stats->rx_pkts_prev = pkts;
1136         stats->rx_jiffies = now;
1137         eqd = (stats->rx_pps / 110000) << 3;
1138         eqd = min(eqd, eqo->max_eqd);
1139         eqd = max(eqd, eqo->min_eqd);
1140         if (eqd < 10)
1141                 eqd = 0;
1142
1143 modify_eqd:
1144         if (eqd != eqo->cur_eqd) {
1145                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146                 eqo->cur_eqd = eqd;
1147         }
1148 }
1149
1150 static void be_rx_stats_update(struct be_rx_obj *rxo,
1151                 struct be_rx_compl_info *rxcp)
1152 {
1153         struct be_rx_stats *stats = rx_stats(rxo);
1154
1155         u64_stats_update_begin(&stats->sync);
1156         stats->rx_compl++;
1157         stats->rx_bytes += rxcp->pkt_size;
1158         stats->rx_pkts++;
1159         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1160                 stats->rx_mcast_pkts++;
1161         if (rxcp->err)
1162                 stats->rx_compl_err++;
1163         u64_stats_update_end(&stats->sync);
1164 }
1165
1166 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1167 {
1168         /* L4 checksum is not reliable for non TCP/UDP packets.
1169          * Also ignore ipcksm for ipv6 pkts */
1170         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171                                 (rxcp->ip_csum || rxcp->ipv6);
1172 }
1173
1174 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175                                                 u16 frag_idx)
1176 {
1177         struct be_adapter *adapter = rxo->adapter;
1178         struct be_rx_page_info *rx_page_info;
1179         struct be_queue_info *rxq = &rxo->q;
1180
1181         rx_page_info = &rxo->page_info_tbl[frag_idx];
1182         BUG_ON(!rx_page_info->page);
1183
1184         if (rx_page_info->last_page_user) {
1185                 dma_unmap_page(&adapter->pdev->dev,
1186                                dma_unmap_addr(rx_page_info, bus),
1187                                adapter->big_page_size, DMA_FROM_DEVICE);
1188                 rx_page_info->last_page_user = false;
1189         }
1190
1191         atomic_dec(&rxq->used);
1192         return rx_page_info;
1193 }
1194
1195 /* Throwaway the data in the Rx completion */
1196 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197                                 struct be_rx_compl_info *rxcp)
1198 {
1199         struct be_queue_info *rxq = &rxo->q;
1200         struct be_rx_page_info *page_info;
1201         u16 i, num_rcvd = rxcp->num_rcvd;
1202
1203         for (i = 0; i < num_rcvd; i++) {
1204                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1205                 put_page(page_info->page);
1206                 memset(page_info, 0, sizeof(*page_info));
1207                 index_inc(&rxcp->rxq_idx, rxq->len);
1208         }
1209 }
1210
1211 /*
1212  * skb_fill_rx_data forms a complete skb for an ether frame
1213  * indicated by rxcp.
1214  */
1215 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216                              struct be_rx_compl_info *rxcp)
1217 {
1218         struct be_queue_info *rxq = &rxo->q;
1219         struct be_rx_page_info *page_info;
1220         u16 i, j;
1221         u16 hdr_len, curr_frag_len, remaining;
1222         u8 *start;
1223
1224         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1225         start = page_address(page_info->page) + page_info->page_offset;
1226         prefetch(start);
1227
1228         /* Copy data in the first descriptor of this completion */
1229         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1230
1231         skb->len = curr_frag_len;
1232         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1233                 memcpy(skb->data, start, curr_frag_len);
1234                 /* Complete packet has now been moved to data */
1235                 put_page(page_info->page);
1236                 skb->data_len = 0;
1237                 skb->tail += curr_frag_len;
1238         } else {
1239                 hdr_len = ETH_HLEN;
1240                 memcpy(skb->data, start, hdr_len);
1241                 skb_shinfo(skb)->nr_frags = 1;
1242                 skb_frag_set_page(skb, 0, page_info->page);
1243                 skb_shinfo(skb)->frags[0].page_offset =
1244                                         page_info->page_offset + hdr_len;
1245                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1246                 skb->data_len = curr_frag_len - hdr_len;
1247                 skb->truesize += rx_frag_size;
1248                 skb->tail += hdr_len;
1249         }
1250         page_info->page = NULL;
1251
1252         if (rxcp->pkt_size <= rx_frag_size) {
1253                 BUG_ON(rxcp->num_rcvd != 1);
1254                 return;
1255         }
1256
1257         /* More frags present for this completion */
1258         index_inc(&rxcp->rxq_idx, rxq->len);
1259         remaining = rxcp->pkt_size - curr_frag_len;
1260         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 curr_frag_len = min(remaining, rx_frag_size);
1263
1264                 /* Coalesce all frags from the same physical page in one slot */
1265                 if (page_info->page_offset == 0) {
1266                         /* Fresh page */
1267                         j++;
1268                         skb_frag_set_page(skb, j, page_info->page);
1269                         skb_shinfo(skb)->frags[j].page_offset =
1270                                                         page_info->page_offset;
1271                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1272                         skb_shinfo(skb)->nr_frags++;
1273                 } else {
1274                         put_page(page_info->page);
1275                 }
1276
1277                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1278                 skb->len += curr_frag_len;
1279                 skb->data_len += curr_frag_len;
1280                 skb->truesize += rx_frag_size;
1281                 remaining -= curr_frag_len;
1282                 index_inc(&rxcp->rxq_idx, rxq->len);
1283                 page_info->page = NULL;
1284         }
1285         BUG_ON(j > MAX_SKB_FRAGS);
1286 }
1287
1288 /* Process the RX completion indicated by rxcp when GRO is disabled */
1289 static void be_rx_compl_process(struct be_rx_obj *rxo,
1290                                 struct be_rx_compl_info *rxcp)
1291 {
1292         struct be_adapter *adapter = rxo->adapter;
1293         struct net_device *netdev = adapter->netdev;
1294         struct sk_buff *skb;
1295
1296         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1297         if (unlikely(!skb)) {
1298                 rx_stats(rxo)->rx_drops_no_skbs++;
1299                 be_rx_compl_discard(rxo, rxcp);
1300                 return;
1301         }
1302
1303         skb_fill_rx_data(rxo, skb, rxcp);
1304
1305         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1306                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1307         else
1308                 skb_checksum_none_assert(skb);
1309
1310         skb->protocol = eth_type_trans(skb, netdev);
1311         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1312         if (netdev->features & NETIF_F_RXHASH)
1313                 skb->rxhash = rxcp->rss_hash;
1314
1315
1316         if (rxcp->vlanf)
1317                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319         netif_receive_skb(skb);
1320 }
1321
1322 /* Process the RX completion indicated by rxcp when GRO is enabled */
1323 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324                              struct be_rx_compl_info *rxcp)
1325 {
1326         struct be_adapter *adapter = rxo->adapter;
1327         struct be_rx_page_info *page_info;
1328         struct sk_buff *skb = NULL;
1329         struct be_queue_info *rxq = &rxo->q;
1330         u16 remaining, curr_frag_len;
1331         u16 i, j;
1332
1333         skb = napi_get_frags(napi);
1334         if (!skb) {
1335                 be_rx_compl_discard(rxo, rxcp);
1336                 return;
1337         }
1338
1339         remaining = rxcp->pkt_size;
1340         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1341                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1342
1343                 curr_frag_len = min(remaining, rx_frag_size);
1344
1345                 /* Coalesce all frags from the same physical page in one slot */
1346                 if (i == 0 || page_info->page_offset == 0) {
1347                         /* First frag or Fresh page */
1348                         j++;
1349                         skb_frag_set_page(skb, j, page_info->page);
1350                         skb_shinfo(skb)->frags[j].page_offset =
1351                                                         page_info->page_offset;
1352                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1353                 } else {
1354                         put_page(page_info->page);
1355                 }
1356                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1357                 skb->truesize += rx_frag_size;
1358                 remaining -= curr_frag_len;
1359                 index_inc(&rxcp->rxq_idx, rxq->len);
1360                 memset(page_info, 0, sizeof(*page_info));
1361         }
1362         BUG_ON(j > MAX_SKB_FRAGS);
1363
1364         skb_shinfo(skb)->nr_frags = j + 1;
1365         skb->len = rxcp->pkt_size;
1366         skb->data_len = rxcp->pkt_size;
1367         skb->ip_summed = CHECKSUM_UNNECESSARY;
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (adapter->netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372         if (rxcp->vlanf)
1373                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
1375         napi_gro_frags(napi);
1376 }
1377
1378 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379                                  struct be_rx_compl_info *rxcp)
1380 {
1381         rxcp->pkt_size =
1382                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1386         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1387         rxcp->ip_csum =
1388                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389         rxcp->l4_csum =
1390                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391         rxcp->ipv6 =
1392                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393         rxcp->rxq_idx =
1394                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395         rxcp->num_rcvd =
1396                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397         rxcp->pkt_type =
1398                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1399         rxcp->rss_hash =
1400                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1401         if (rxcp->vlanf) {
1402                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1403                                           compl);
1404                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405                                                compl);
1406         }
1407         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1408 }
1409
1410 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411                                  struct be_rx_compl_info *rxcp)
1412 {
1413         rxcp->pkt_size =
1414                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1418         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1419         rxcp->ip_csum =
1420                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421         rxcp->l4_csum =
1422                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423         rxcp->ipv6 =
1424                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425         rxcp->rxq_idx =
1426                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427         rxcp->num_rcvd =
1428                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429         rxcp->pkt_type =
1430                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1431         rxcp->rss_hash =
1432                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1433         if (rxcp->vlanf) {
1434                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1435                                           compl);
1436                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437                                                compl);
1438         }
1439         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1440 }
1441
1442 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443 {
1444         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446         struct be_adapter *adapter = rxo->adapter;
1447
1448         /* For checking the valid bit it is Ok to use either definition as the
1449          * valid bit is at the same position in both v0 and v1 Rx compl */
1450         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1451                 return NULL;
1452
1453         rmb();
1454         be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456         if (adapter->be3_native)
1457                 be_parse_rx_compl_v1(compl, rxcp);
1458         else
1459                 be_parse_rx_compl_v0(compl, rxcp);
1460
1461         if (rxcp->vlanf) {
1462                 /* vlanf could be wrongly set in some cards.
1463                  * ignore if vtm is not set */
1464                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1465                         rxcp->vlanf = 0;
1466
1467                 if (!lancer_chip(adapter))
1468                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1469
1470                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1471                     !adapter->vlan_tag[rxcp->vlan_tag])
1472                         rxcp->vlanf = 0;
1473         }
1474
1475         /* As the compl has been parsed, reset it; we wont touch it again */
1476         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1477
1478         queue_tail_inc(&rxo->cq);
1479         return rxcp;
1480 }
1481
1482 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1483 {
1484         u32 order = get_order(size);
1485
1486         if (order > 0)
1487                 gfp |= __GFP_COMP;
1488         return  alloc_pages(gfp, order);
1489 }
1490
1491 /*
1492  * Allocate a page, split it to fragments of size rx_frag_size and post as
1493  * receive buffers to BE
1494  */
1495 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1496 {
1497         struct be_adapter *adapter = rxo->adapter;
1498         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1499         struct be_queue_info *rxq = &rxo->q;
1500         struct page *pagep = NULL;
1501         struct be_eth_rx_d *rxd;
1502         u64 page_dmaaddr = 0, frag_dmaaddr;
1503         u32 posted, page_offset = 0;
1504
1505         page_info = &rxo->page_info_tbl[rxq->head];
1506         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507                 if (!pagep) {
1508                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1509                         if (unlikely(!pagep)) {
1510                                 rx_stats(rxo)->rx_post_fail++;
1511                                 break;
1512                         }
1513                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514                                                     0, adapter->big_page_size,
1515                                                     DMA_FROM_DEVICE);
1516                         page_info->page_offset = 0;
1517                 } else {
1518                         get_page(pagep);
1519                         page_info->page_offset = page_offset + rx_frag_size;
1520                 }
1521                 page_offset = page_info->page_offset;
1522                 page_info->page = pagep;
1523                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1524                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526                 rxd = queue_head_node(rxq);
1527                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1529
1530                 /* Any space left in the current big page for another frag? */
1531                 if ((page_offset + rx_frag_size + rx_frag_size) >
1532                                         adapter->big_page_size) {
1533                         pagep = NULL;
1534                         page_info->last_page_user = true;
1535                 }
1536
1537                 prev_page_info = page_info;
1538                 queue_head_inc(rxq);
1539                 page_info = &rxo->page_info_tbl[rxq->head];
1540         }
1541         if (pagep)
1542                 prev_page_info->last_page_user = true;
1543
1544         if (posted) {
1545                 atomic_add(posted, &rxq->used);
1546                 be_rxq_notify(adapter, rxq->id, posted);
1547         } else if (atomic_read(&rxq->used) == 0) {
1548                 /* Let be_worker replenish when memory is available */
1549                 rxo->rx_post_starved = true;
1550         }
1551 }
1552
1553 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1554 {
1555         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558                 return NULL;
1559
1560         rmb();
1561         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565         queue_tail_inc(tx_cq);
1566         return txcp;
1567 }
1568
1569 static u16 be_tx_compl_process(struct be_adapter *adapter,
1570                 struct be_tx_obj *txo, u16 last_index)
1571 {
1572         struct be_queue_info *txq = &txo->q;
1573         struct be_eth_wrb *wrb;
1574         struct sk_buff **sent_skbs = txo->sent_skb_list;
1575         struct sk_buff *sent_skb;
1576         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577         bool unmap_skb_hdr = true;
1578
1579         sent_skb = sent_skbs[txq->tail];
1580         BUG_ON(!sent_skb);
1581         sent_skbs[txq->tail] = NULL;
1582
1583         /* skip header wrb */
1584         queue_tail_inc(txq);
1585
1586         do {
1587                 cur_index = txq->tail;
1588                 wrb = queue_tail_node(txq);
1589                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1591                 unmap_skb_hdr = false;
1592
1593                 num_wrbs++;
1594                 queue_tail_inc(txq);
1595         } while (cur_index != last_index);
1596
1597         kfree_skb(sent_skb);
1598         return num_wrbs;
1599 }
1600
1601 /* Return the number of events in the event queue */
1602 static inline int events_get(struct be_eq_obj *eqo)
1603 {
1604         struct be_eq_entry *eqe;
1605         int num = 0;
1606
1607         do {
1608                 eqe = queue_tail_node(&eqo->q);
1609                 if (eqe->evt == 0)
1610                         break;
1611
1612                 rmb();
1613                 eqe->evt = 0;
1614                 num++;
1615                 queue_tail_inc(&eqo->q);
1616         } while (true);
1617
1618         return num;
1619 }
1620
1621 static int event_handle(struct be_eq_obj *eqo)
1622 {
1623         bool rearm = false;
1624         int num = events_get(eqo);
1625
1626         /* Deal with any spurious interrupts that come without events */
1627         if (!num)
1628                 rearm = true;
1629
1630         if (num || msix_enabled(eqo->adapter))
1631                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
1633         if (num)
1634                 napi_schedule(&eqo->napi);
1635
1636         return num;
1637 }
1638
1639 /* Leaves the EQ is disarmed state */
1640 static void be_eq_clean(struct be_eq_obj *eqo)
1641 {
1642         int num = events_get(eqo);
1643
1644         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645 }
1646
1647 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1648 {
1649         struct be_rx_page_info *page_info;
1650         struct be_queue_info *rxq = &rxo->q;
1651         struct be_queue_info *rx_cq = &rxo->cq;
1652         struct be_rx_compl_info *rxcp;
1653         u16 tail;
1654
1655         /* First cleanup pending rx completions */
1656         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1657                 be_rx_compl_discard(rxo, rxcp);
1658                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1659         }
1660
1661         /* Then free posted rx buffer that were not used */
1662         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1663         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1664                 page_info = get_rx_page_info(rxo, tail);
1665                 put_page(page_info->page);
1666                 memset(page_info, 0, sizeof(*page_info));
1667         }
1668         BUG_ON(atomic_read(&rxq->used));
1669         rxq->tail = rxq->head = 0;
1670 }
1671
1672 static void be_tx_compl_clean(struct be_adapter *adapter)
1673 {
1674         struct be_tx_obj *txo;
1675         struct be_queue_info *txq;
1676         struct be_eth_tx_compl *txcp;
1677         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1678         struct sk_buff *sent_skb;
1679         bool dummy_wrb;
1680         int i, pending_txqs;
1681
1682         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683         do {
1684                 pending_txqs = adapter->num_tx_qs;
1685
1686                 for_all_tx_queues(adapter, txo, i) {
1687                         txq = &txo->q;
1688                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1689                                 end_idx =
1690                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1691                                                       wrb_index, txcp);
1692                                 num_wrbs += be_tx_compl_process(adapter, txo,
1693                                                                 end_idx);
1694                                 cmpl++;
1695                         }
1696                         if (cmpl) {
1697                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698                                 atomic_sub(num_wrbs, &txq->used);
1699                                 cmpl = 0;
1700                                 num_wrbs = 0;
1701                         }
1702                         if (atomic_read(&txq->used) == 0)
1703                                 pending_txqs--;
1704                 }
1705
1706                 if (pending_txqs == 0 || ++timeo > 200)
1707                         break;
1708
1709                 mdelay(1);
1710         } while (true);
1711
1712         for_all_tx_queues(adapter, txo, i) {
1713                 txq = &txo->q;
1714                 if (atomic_read(&txq->used))
1715                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716                                 atomic_read(&txq->used));
1717
1718                 /* free posted tx for which compls will never arrive */
1719                 while (atomic_read(&txq->used)) {
1720                         sent_skb = txo->sent_skb_list[txq->tail];
1721                         end_idx = txq->tail;
1722                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723                                                    &dummy_wrb);
1724                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1725                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726                         atomic_sub(num_wrbs, &txq->used);
1727                 }
1728         }
1729 }
1730
1731 static void be_evt_queues_destroy(struct be_adapter *adapter)
1732 {
1733         struct be_eq_obj *eqo;
1734         int i;
1735
1736         for_all_evt_queues(adapter, eqo, i) {
1737                 if (eqo->q.created) {
1738                         be_eq_clean(eqo);
1739                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740                 }
1741                 be_queue_free(adapter, &eqo->q);
1742         }
1743 }
1744
1745 static int be_evt_queues_create(struct be_adapter *adapter)
1746 {
1747         struct be_queue_info *eq;
1748         struct be_eq_obj *eqo;
1749         int i, rc;
1750
1751         adapter->num_evt_qs = num_irqs(adapter);
1752
1753         for_all_evt_queues(adapter, eqo, i) {
1754                 eqo->adapter = adapter;
1755                 eqo->tx_budget = BE_TX_BUDGET;
1756                 eqo->idx = i;
1757                 eqo->max_eqd = BE_MAX_EQD;
1758                 eqo->enable_aic = true;
1759
1760                 eq = &eqo->q;
1761                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762                                         sizeof(struct be_eq_entry));
1763                 if (rc)
1764                         return rc;
1765
1766                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1767                 if (rc)
1768                         return rc;
1769         }
1770         return 0;
1771 }
1772
1773 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1774 {
1775         struct be_queue_info *q;
1776
1777         q = &adapter->mcc_obj.q;
1778         if (q->created)
1779                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1780         be_queue_free(adapter, q);
1781
1782         q = &adapter->mcc_obj.cq;
1783         if (q->created)
1784                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1785         be_queue_free(adapter, q);
1786 }
1787
1788 /* Must be called only after TX qs are created as MCC shares TX EQ */
1789 static int be_mcc_queues_create(struct be_adapter *adapter)
1790 {
1791         struct be_queue_info *q, *cq;
1792
1793         cq = &adapter->mcc_obj.cq;
1794         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1795                         sizeof(struct be_mcc_compl)))
1796                 goto err;
1797
1798         /* Use the default EQ for MCC completions */
1799         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1800                 goto mcc_cq_free;
1801
1802         q = &adapter->mcc_obj.q;
1803         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804                 goto mcc_cq_destroy;
1805
1806         if (be_cmd_mccq_create(adapter, q, cq))
1807                 goto mcc_q_free;
1808
1809         return 0;
1810
1811 mcc_q_free:
1812         be_queue_free(adapter, q);
1813 mcc_cq_destroy:
1814         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1815 mcc_cq_free:
1816         be_queue_free(adapter, cq);
1817 err:
1818         return -1;
1819 }
1820
1821 static void be_tx_queues_destroy(struct be_adapter *adapter)
1822 {
1823         struct be_queue_info *q;
1824         struct be_tx_obj *txo;
1825         u8 i;
1826
1827         for_all_tx_queues(adapter, txo, i) {
1828                 q = &txo->q;
1829                 if (q->created)
1830                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831                 be_queue_free(adapter, q);
1832
1833                 q = &txo->cq;
1834                 if (q->created)
1835                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836                 be_queue_free(adapter, q);
1837         }
1838 }
1839
1840 static int be_num_txqs_want(struct be_adapter *adapter)
1841 {
1842         if (sriov_want(adapter) || be_is_mc(adapter) ||
1843             lancer_chip(adapter) || !be_physfn(adapter) ||
1844             adapter->generation == BE_GEN2)
1845                 return 1;
1846         else
1847                 return MAX_TX_QS;
1848 }
1849
1850 static int be_tx_cqs_create(struct be_adapter *adapter)
1851 {
1852         struct be_queue_info *cq, *eq;
1853         int status;
1854         struct be_tx_obj *txo;
1855         u8 i;
1856
1857         adapter->num_tx_qs = be_num_txqs_want(adapter);
1858         if (adapter->num_tx_qs != MAX_TX_QS) {
1859                 rtnl_lock();
1860                 netif_set_real_num_tx_queues(adapter->netdev,
1861                         adapter->num_tx_qs);
1862                 rtnl_unlock();
1863         }
1864
1865         for_all_tx_queues(adapter, txo, i) {
1866                 cq = &txo->cq;
1867                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868                                         sizeof(struct be_eth_tx_compl));
1869                 if (status)
1870                         return status;
1871
1872                 /* If num_evt_qs is less than num_tx_qs, then more than
1873                  * one txq share an eq
1874                  */
1875                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1877                 if (status)
1878                         return status;
1879         }
1880         return 0;
1881 }
1882
1883 static int be_tx_qs_create(struct be_adapter *adapter)
1884 {
1885         struct be_tx_obj *txo;
1886         int i, status;
1887
1888         for_all_tx_queues(adapter, txo, i) {
1889                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890                                         sizeof(struct be_eth_wrb));
1891                 if (status)
1892                         return status;
1893
1894                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1895                 if (status)
1896                         return status;
1897         }
1898
1899         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1900                  adapter->num_tx_qs);
1901         return 0;
1902 }
1903
1904 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1905 {
1906         struct be_queue_info *q;
1907         struct be_rx_obj *rxo;
1908         int i;
1909
1910         for_all_rx_queues(adapter, rxo, i) {
1911                 q = &rxo->cq;
1912                 if (q->created)
1913                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1914                 be_queue_free(adapter, q);
1915         }
1916 }
1917
1918 static int be_rx_cqs_create(struct be_adapter *adapter)
1919 {
1920         struct be_queue_info *eq, *cq;
1921         struct be_rx_obj *rxo;
1922         int rc, i;
1923
1924         /* We'll create as many RSS rings as there are irqs.
1925          * But when there's only one irq there's no use creating RSS rings
1926          */
1927         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1928                                 num_irqs(adapter) + 1 : 1;
1929         if (adapter->num_rx_qs != MAX_RX_QS) {
1930                 rtnl_lock();
1931                 netif_set_real_num_rx_queues(adapter->netdev,
1932                                              adapter->num_rx_qs);
1933                 rtnl_unlock();
1934         }
1935
1936         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1937         for_all_rx_queues(adapter, rxo, i) {
1938                 rxo->adapter = adapter;
1939                 cq = &rxo->cq;
1940                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1941                                 sizeof(struct be_eth_rx_compl));
1942                 if (rc)
1943                         return rc;
1944
1945                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1946                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1947                 if (rc)
1948                         return rc;
1949         }
1950
1951         dev_info(&adapter->pdev->dev,
1952                  "created %d RSS queue(s) and 1 default RX queue\n",
1953                  adapter->num_rx_qs - 1);
1954         return 0;
1955 }
1956
1957 static irqreturn_t be_intx(int irq, void *dev)
1958 {
1959         struct be_adapter *adapter = dev;
1960         int num_evts;
1961
1962         /* With INTx only one EQ is used */
1963         num_evts = event_handle(&adapter->eq_obj[0]);
1964         if (num_evts)
1965                 return IRQ_HANDLED;
1966         else
1967                 return IRQ_NONE;
1968 }
1969
1970 static irqreturn_t be_msix(int irq, void *dev)
1971 {
1972         struct be_eq_obj *eqo = dev;
1973
1974         event_handle(eqo);
1975         return IRQ_HANDLED;
1976 }
1977
1978 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1979 {
1980         return (rxcp->tcpf && !rxcp->err) ? true : false;
1981 }
1982
1983 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1984                         int budget)
1985 {
1986         struct be_adapter *adapter = rxo->adapter;
1987         struct be_queue_info *rx_cq = &rxo->cq;
1988         struct be_rx_compl_info *rxcp;
1989         u32 work_done;
1990
1991         for (work_done = 0; work_done < budget; work_done++) {
1992                 rxcp = be_rx_compl_get(rxo);
1993                 if (!rxcp)
1994                         break;
1995
1996                 /* Is it a flush compl that has no data */
1997                 if (unlikely(rxcp->num_rcvd == 0))
1998                         goto loop_continue;
1999
2000                 /* Discard compl with partial DMA Lancer B0 */
2001                 if (unlikely(!rxcp->pkt_size)) {
2002                         be_rx_compl_discard(rxo, rxcp);
2003                         goto loop_continue;
2004                 }
2005
2006                 /* On BE drop pkts that arrive due to imperfect filtering in
2007                  * promiscuous mode on some skews
2008                  */
2009                 if (unlikely(rxcp->port != adapter->port_num &&
2010                                 !lancer_chip(adapter))) {
2011                         be_rx_compl_discard(rxo, rxcp);
2012                         goto loop_continue;
2013                 }
2014
2015                 if (do_gro(rxcp))
2016                         be_rx_compl_process_gro(rxo, napi, rxcp);
2017                 else
2018                         be_rx_compl_process(rxo, rxcp);
2019 loop_continue:
2020                 be_rx_stats_update(rxo, rxcp);
2021         }
2022
2023         if (work_done) {
2024                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2025
2026                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2027                         be_post_rx_frags(rxo, GFP_ATOMIC);
2028         }
2029
2030         return work_done;
2031 }
2032
2033 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2034                           int budget, int idx)
2035 {
2036         struct be_eth_tx_compl *txcp;
2037         int num_wrbs = 0, work_done;
2038
2039         for (work_done = 0; work_done < budget; work_done++) {
2040                 txcp = be_tx_compl_get(&txo->cq);
2041                 if (!txcp)
2042                         break;
2043                 num_wrbs += be_tx_compl_process(adapter, txo,
2044                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2045                                         wrb_index, txcp));
2046         }
2047
2048         if (work_done) {
2049                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2050                 atomic_sub(num_wrbs, &txo->q.used);
2051
2052                 /* As Tx wrbs have been freed up, wake up netdev queue
2053                  * if it was stopped due to lack of tx wrbs.  */
2054                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2055                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2056                         netif_wake_subqueue(adapter->netdev, idx);
2057                 }
2058
2059                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2060                 tx_stats(txo)->tx_compl += work_done;
2061                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2062         }
2063         return (work_done < budget); /* Done */
2064 }
2065
2066 int be_poll(struct napi_struct *napi, int budget)
2067 {
2068         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2069         struct be_adapter *adapter = eqo->adapter;
2070         int max_work = 0, work, i;
2071         bool tx_done;
2072
2073         /* Process all TXQs serviced by this EQ */
2074         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2075                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2076                                         eqo->tx_budget, i);
2077                 if (!tx_done)
2078                         max_work = budget;
2079         }
2080
2081         /* This loop will iterate twice for EQ0 in which
2082          * completions of the last RXQ (default one) are also processed
2083          * For other EQs the loop iterates only once
2084          */
2085         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2086                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2087                 max_work = max(work, max_work);
2088         }
2089
2090         if (is_mcc_eqo(eqo))
2091                 be_process_mcc(adapter);
2092
2093         if (max_work < budget) {
2094                 napi_complete(napi);
2095                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2096         } else {
2097                 /* As we'll continue in polling mode, count and clear events */
2098                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2099         }
2100         return max_work;
2101 }
2102
2103 void be_detect_error(struct be_adapter *adapter)
2104 {
2105         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2106         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2107         u32 i;
2108
2109         if (be_crit_error(adapter))
2110                 return;
2111
2112         if (lancer_chip(adapter)) {
2113                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2114                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2115                         sliport_err1 = ioread32(adapter->db +
2116                                         SLIPORT_ERROR1_OFFSET);
2117                         sliport_err2 = ioread32(adapter->db +
2118                                         SLIPORT_ERROR2_OFFSET);
2119                 }
2120         } else {
2121                 pci_read_config_dword(adapter->pdev,
2122                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2123                 pci_read_config_dword(adapter->pdev,
2124                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2125                 pci_read_config_dword(adapter->pdev,
2126                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2127                 pci_read_config_dword(adapter->pdev,
2128                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2129
2130                 ue_lo = (ue_lo & ~ue_lo_mask);
2131                 ue_hi = (ue_hi & ~ue_hi_mask);
2132         }
2133
2134         if (ue_lo || ue_hi ||
2135                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2136                 adapter->hw_error = true;
2137                 dev_err(&adapter->pdev->dev,
2138                         "Error detected in the card\n");
2139         }
2140
2141         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2142                 dev_err(&adapter->pdev->dev,
2143                         "ERR: sliport status 0x%x\n", sliport_status);
2144                 dev_err(&adapter->pdev->dev,
2145                         "ERR: sliport error1 0x%x\n", sliport_err1);
2146                 dev_err(&adapter->pdev->dev,
2147                         "ERR: sliport error2 0x%x\n", sliport_err2);
2148         }
2149
2150         if (ue_lo) {
2151                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2152                         if (ue_lo & 1)
2153                                 dev_err(&adapter->pdev->dev,
2154                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2155                 }
2156         }
2157
2158         if (ue_hi) {
2159                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2160                         if (ue_hi & 1)
2161                                 dev_err(&adapter->pdev->dev,
2162                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2163                 }
2164         }
2165
2166 }
2167
2168 static void be_msix_disable(struct be_adapter *adapter)
2169 {
2170         if (msix_enabled(adapter)) {
2171                 pci_disable_msix(adapter->pdev);
2172                 adapter->num_msix_vec = 0;
2173         }
2174 }
2175
2176 static uint be_num_rss_want(struct be_adapter *adapter)
2177 {
2178         u32 num = 0;
2179         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2180              !sriov_want(adapter) && be_physfn(adapter)) {
2181                 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2183         }
2184         return num;
2185 }
2186
2187 static void be_msix_enable(struct be_adapter *adapter)
2188 {
2189 #define BE_MIN_MSIX_VECTORS             1
2190         int i, status, num_vec, num_roce_vec = 0;
2191         struct device *dev = &adapter->pdev->dev;
2192
2193         /* If RSS queues are not used, need a vec for default RX Q */
2194         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2195         if (be_roce_supported(adapter)) {
2196                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2197                                         (num_online_cpus() + 1));
2198                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2199                 num_vec += num_roce_vec;
2200                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2201         }
2202         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2203
2204         for (i = 0; i < num_vec; i++)
2205                 adapter->msix_entries[i].entry = i;
2206
2207         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2208         if (status == 0) {
2209                 goto done;
2210         } else if (status >= BE_MIN_MSIX_VECTORS) {
2211                 num_vec = status;
2212                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2213                                 num_vec) == 0)
2214                         goto done;
2215         }
2216
2217         dev_warn(dev, "MSIx enable failed\n");
2218         return;
2219 done:
2220         if (be_roce_supported(adapter)) {
2221                 if (num_vec > num_roce_vec) {
2222                         adapter->num_msix_vec = num_vec - num_roce_vec;
2223                         adapter->num_msix_roce_vec =
2224                                 num_vec - adapter->num_msix_vec;
2225                 } else {
2226                         adapter->num_msix_vec = num_vec;
2227                         adapter->num_msix_roce_vec = 0;
2228                 }
2229         } else
2230                 adapter->num_msix_vec = num_vec;
2231         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2232         return;
2233 }
2234
2235 static inline int be_msix_vec_get(struct be_adapter *adapter,
2236                                 struct be_eq_obj *eqo)
2237 {
2238         return adapter->msix_entries[eqo->idx].vector;
2239 }
2240
2241 static int be_msix_register(struct be_adapter *adapter)
2242 {
2243         struct net_device *netdev = adapter->netdev;
2244         struct be_eq_obj *eqo;
2245         int status, i, vec;
2246
2247         for_all_evt_queues(adapter, eqo, i) {
2248                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2249                 vec = be_msix_vec_get(adapter, eqo);
2250                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2251                 if (status)
2252                         goto err_msix;
2253         }
2254
2255         return 0;
2256 err_msix:
2257         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2258                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2259         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2260                 status);
2261         be_msix_disable(adapter);
2262         return status;
2263 }
2264
2265 static int be_irq_register(struct be_adapter *adapter)
2266 {
2267         struct net_device *netdev = adapter->netdev;
2268         int status;
2269
2270         if (msix_enabled(adapter)) {
2271                 status = be_msix_register(adapter);
2272                 if (status == 0)
2273                         goto done;
2274                 /* INTx is not supported for VF */
2275                 if (!be_physfn(adapter))
2276                         return status;
2277         }
2278
2279         /* INTx */
2280         netdev->irq = adapter->pdev->irq;
2281         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2282                         adapter);
2283         if (status) {
2284                 dev_err(&adapter->pdev->dev,
2285                         "INTx request IRQ failed - err %d\n", status);
2286                 return status;
2287         }
2288 done:
2289         adapter->isr_registered = true;
2290         return 0;
2291 }
2292
2293 static void be_irq_unregister(struct be_adapter *adapter)
2294 {
2295         struct net_device *netdev = adapter->netdev;
2296         struct be_eq_obj *eqo;
2297         int i;
2298
2299         if (!adapter->isr_registered)
2300                 return;
2301
2302         /* INTx */
2303         if (!msix_enabled(adapter)) {
2304                 free_irq(netdev->irq, adapter);
2305                 goto done;
2306         }
2307
2308         /* MSIx */
2309         for_all_evt_queues(adapter, eqo, i)
2310                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2311
2312 done:
2313         adapter->isr_registered = false;
2314 }
2315
2316 static void be_rx_qs_destroy(struct be_adapter *adapter)
2317 {
2318         struct be_queue_info *q;
2319         struct be_rx_obj *rxo;
2320         int i;
2321
2322         for_all_rx_queues(adapter, rxo, i) {
2323                 q = &rxo->q;
2324                 if (q->created) {
2325                         be_cmd_rxq_destroy(adapter, q);
2326                         /* After the rxq is invalidated, wait for a grace time
2327                          * of 1ms for all dma to end and the flush compl to
2328                          * arrive
2329                          */
2330                         mdelay(1);
2331                         be_rx_cq_clean(rxo);
2332                 }
2333                 be_queue_free(adapter, q);
2334         }
2335 }
2336
2337 static int be_close(struct net_device *netdev)
2338 {
2339         struct be_adapter *adapter = netdev_priv(netdev);
2340         struct be_eq_obj *eqo;
2341         int i;
2342
2343         be_roce_dev_close(adapter);
2344
2345         be_async_mcc_disable(adapter);
2346
2347         if (!lancer_chip(adapter))
2348                 be_intr_set(adapter, false);
2349
2350         for_all_evt_queues(adapter, eqo, i) {
2351                 napi_disable(&eqo->napi);
2352                 if (msix_enabled(adapter))
2353                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2354                 else
2355                         synchronize_irq(netdev->irq);
2356                 be_eq_clean(eqo);
2357         }
2358
2359         be_irq_unregister(adapter);
2360
2361         /* Wait for all pending tx completions to arrive so that
2362          * all tx skbs are freed.
2363          */
2364         be_tx_compl_clean(adapter);
2365
2366         be_rx_qs_destroy(adapter);
2367         return 0;
2368 }
2369
2370 static int be_rx_qs_create(struct be_adapter *adapter)
2371 {
2372         struct be_rx_obj *rxo;
2373         int rc, i, j;
2374         u8 rsstable[128];
2375
2376         for_all_rx_queues(adapter, rxo, i) {
2377                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2378                                     sizeof(struct be_eth_rx_d));
2379                 if (rc)
2380                         return rc;
2381         }
2382
2383         /* The FW would like the default RXQ to be created first */
2384         rxo = default_rxo(adapter);
2385         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2386                                adapter->if_handle, false, &rxo->rss_id);
2387         if (rc)
2388                 return rc;
2389
2390         for_all_rss_queues(adapter, rxo, i) {
2391                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2392                                        rx_frag_size, adapter->if_handle,
2393                                        true, &rxo->rss_id);
2394                 if (rc)
2395                         return rc;
2396         }
2397
2398         if (be_multi_rxq(adapter)) {
2399                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2400                         for_all_rss_queues(adapter, rxo, i) {
2401                                 if ((j + i) >= 128)
2402                                         break;
2403                                 rsstable[j + i] = rxo->rss_id;
2404                         }
2405                 }
2406                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2407                 if (rc)
2408                         return rc;
2409         }
2410
2411         /* First time posting */
2412         for_all_rx_queues(adapter, rxo, i)
2413                 be_post_rx_frags(rxo, GFP_KERNEL);
2414         return 0;
2415 }
2416
2417 static int be_open(struct net_device *netdev)
2418 {
2419         struct be_adapter *adapter = netdev_priv(netdev);
2420         struct be_eq_obj *eqo;
2421         struct be_rx_obj *rxo;
2422         struct be_tx_obj *txo;
2423         u8 link_status;
2424         int status, i;
2425
2426         status = be_rx_qs_create(adapter);
2427         if (status)
2428                 goto err;
2429
2430         be_irq_register(adapter);
2431
2432         if (!lancer_chip(adapter))
2433                 be_intr_set(adapter, true);
2434
2435         for_all_rx_queues(adapter, rxo, i)
2436                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2437
2438         for_all_tx_queues(adapter, txo, i)
2439                 be_cq_notify(adapter, txo->cq.id, true, 0);
2440
2441         be_async_mcc_enable(adapter);
2442
2443         for_all_evt_queues(adapter, eqo, i) {
2444                 napi_enable(&eqo->napi);
2445                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2446         }
2447
2448         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2449         if (!status)
2450                 be_link_status_update(adapter, link_status);
2451
2452         be_roce_dev_open(adapter);
2453         return 0;
2454 err:
2455         be_close(adapter->netdev);
2456         return -EIO;
2457 }
2458
2459 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2460 {
2461         struct be_dma_mem cmd;
2462         int status = 0;
2463         u8 mac[ETH_ALEN];
2464
2465         memset(mac, 0, ETH_ALEN);
2466
2467         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2468         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2469                                     GFP_KERNEL);
2470         if (cmd.va == NULL)
2471                 return -1;
2472         memset(cmd.va, 0, cmd.size);
2473
2474         if (enable) {
2475                 status = pci_write_config_dword(adapter->pdev,
2476                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2477                 if (status) {
2478                         dev_err(&adapter->pdev->dev,
2479                                 "Could not enable Wake-on-lan\n");
2480                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2481                                           cmd.dma);
2482                         return status;
2483                 }
2484                 status = be_cmd_enable_magic_wol(adapter,
2485                                 adapter->netdev->dev_addr, &cmd);
2486                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2487                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2488         } else {
2489                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2490                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2491                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2492         }
2493
2494         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2495         return status;
2496 }
2497
2498 /*
2499  * Generate a seed MAC address from the PF MAC Address using jhash.
2500  * MAC Address for VFs are assigned incrementally starting from the seed.
2501  * These addresses are programmed in the ASIC by the PF and the VF driver
2502  * queries for the MAC address during its probe.
2503  */
2504 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2505 {
2506         u32 vf;
2507         int status = 0;
2508         u8 mac[ETH_ALEN];
2509         struct be_vf_cfg *vf_cfg;
2510
2511         be_vf_eth_addr_generate(adapter, mac);
2512
2513         for_all_vfs(adapter, vf_cfg, vf) {
2514                 if (lancer_chip(adapter)) {
2515                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2516                 } else {
2517                         status = be_cmd_pmac_add(adapter, mac,
2518                                                  vf_cfg->if_handle,
2519                                                  &vf_cfg->pmac_id, vf + 1);
2520                 }
2521
2522                 if (status)
2523                         dev_err(&adapter->pdev->dev,
2524                         "Mac address assignment failed for VF %d\n", vf);
2525                 else
2526                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2527
2528                 mac[5] += 1;
2529         }
2530         return status;
2531 }
2532
2533 static void be_vf_clear(struct be_adapter *adapter)
2534 {
2535         struct be_vf_cfg *vf_cfg;
2536         u32 vf;
2537
2538         if (be_find_vfs(adapter, ASSIGNED)) {
2539                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2540                 goto done;
2541         }
2542
2543         for_all_vfs(adapter, vf_cfg, vf) {
2544                 if (lancer_chip(adapter))
2545                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2546                 else
2547                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2548                                         vf_cfg->pmac_id, vf + 1);
2549
2550                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2551         }
2552         pci_disable_sriov(adapter->pdev);
2553 done:
2554         kfree(adapter->vf_cfg);
2555         adapter->num_vfs = 0;
2556 }
2557
2558 static int be_clear(struct be_adapter *adapter)
2559 {
2560         int i = 1;
2561
2562         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2563                 cancel_delayed_work_sync(&adapter->work);
2564                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2565         }
2566
2567         if (sriov_enabled(adapter))
2568                 be_vf_clear(adapter);
2569
2570         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2571                 be_cmd_pmac_del(adapter, adapter->if_handle,
2572                         adapter->pmac_id[i], 0);
2573
2574         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2575
2576         be_mcc_queues_destroy(adapter);
2577         be_rx_cqs_destroy(adapter);
2578         be_tx_queues_destroy(adapter);
2579         be_evt_queues_destroy(adapter);
2580
2581         be_msix_disable(adapter);
2582         return 0;
2583 }
2584
2585 static int be_vf_setup_init(struct be_adapter *adapter)
2586 {
2587         struct be_vf_cfg *vf_cfg;
2588         int vf;
2589
2590         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2591                                   GFP_KERNEL);
2592         if (!adapter->vf_cfg)
2593                 return -ENOMEM;
2594
2595         for_all_vfs(adapter, vf_cfg, vf) {
2596                 vf_cfg->if_handle = -1;
2597                 vf_cfg->pmac_id = -1;
2598         }
2599         return 0;
2600 }
2601
2602 static int be_vf_setup(struct be_adapter *adapter)
2603 {
2604         struct be_vf_cfg *vf_cfg;
2605         struct device *dev = &adapter->pdev->dev;
2606         u32 cap_flags, en_flags, vf;
2607         u16 def_vlan, lnk_speed;
2608         int status, enabled_vfs;
2609
2610         enabled_vfs = be_find_vfs(adapter, ENABLED);
2611         if (enabled_vfs) {
2612                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2613                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2614                 return 0;
2615         }
2616
2617         if (num_vfs > adapter->dev_num_vfs) {
2618                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2619                          adapter->dev_num_vfs, num_vfs);
2620                 num_vfs = adapter->dev_num_vfs;
2621         }
2622
2623         status = pci_enable_sriov(adapter->pdev, num_vfs);
2624         if (!status) {
2625                 adapter->num_vfs = num_vfs;
2626         } else {
2627                 /* Platform doesn't support SRIOV though device supports it */
2628                 dev_warn(dev, "SRIOV enable failed\n");
2629                 return 0;
2630         }
2631
2632         status = be_vf_setup_init(adapter);
2633         if (status)
2634                 goto err;
2635
2636         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2637                                 BE_IF_FLAGS_MULTICAST;
2638         for_all_vfs(adapter, vf_cfg, vf) {
2639                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2640                                           &vf_cfg->if_handle, vf + 1);
2641                 if (status)
2642                         goto err;
2643         }
2644
2645         if (!enabled_vfs) {
2646                 status = be_vf_eth_addr_config(adapter);
2647                 if (status)
2648                         goto err;
2649         }
2650
2651         for_all_vfs(adapter, vf_cfg, vf) {
2652                 lnk_speed = 1000;
2653                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2654                 if (status)
2655                         goto err;
2656                 vf_cfg->tx_rate = lnk_speed * 10;
2657
2658                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2659                                 vf + 1, vf_cfg->if_handle);
2660                 if (status)
2661                         goto err;
2662                 vf_cfg->def_vid = def_vlan;
2663         }
2664         return 0;
2665 err:
2666         return status;
2667 }
2668
2669 static void be_setup_init(struct be_adapter *adapter)
2670 {
2671         adapter->vlan_prio_bmap = 0xff;
2672         adapter->phy.link_speed = -1;
2673         adapter->if_handle = -1;
2674         adapter->be3_native = false;
2675         adapter->promiscuous = false;
2676         adapter->eq_next_idx = 0;
2677 }
2678
2679 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2680                            bool *active_mac, u32 *pmac_id)
2681 {
2682         int status = 0;
2683
2684         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2685                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2686                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2687                         *active_mac = true;
2688                 else
2689                         *active_mac = false;
2690
2691                 return status;
2692         }
2693
2694         if (lancer_chip(adapter)) {
2695                 status = be_cmd_get_mac_from_list(adapter, mac,
2696                                                   active_mac, pmac_id, 0);
2697                 if (*active_mac) {
2698                         status = be_cmd_mac_addr_query(adapter, mac, false,
2699                                                        if_handle, *pmac_id);
2700                 }
2701         } else if (be_physfn(adapter)) {
2702                 /* For BE3, for PF get permanent MAC */
2703                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2704                 *active_mac = false;
2705         } else {
2706                 /* For BE3, for VF get soft MAC assigned by PF*/
2707                 status = be_cmd_mac_addr_query(adapter, mac, false,
2708                                                if_handle, 0);
2709                 *active_mac = true;
2710         }
2711         return status;
2712 }
2713
2714 /* Routine to query per function resource limits */
2715 static int be_get_config(struct be_adapter *adapter)
2716 {
2717         int pos;
2718         u16 dev_num_vfs;
2719
2720         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2721         if (pos) {
2722                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2723                                      &dev_num_vfs);
2724                 if (!lancer_chip(adapter))
2725                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2726                 adapter->dev_num_vfs = dev_num_vfs;
2727         }
2728         return 0;
2729 }
2730
2731 static int be_setup(struct be_adapter *adapter)
2732 {
2733         struct device *dev = &adapter->pdev->dev;
2734         u32 cap_flags, en_flags;
2735         u32 tx_fc, rx_fc;
2736         int status;
2737         u8 mac[ETH_ALEN];
2738         bool active_mac;
2739
2740         be_setup_init(adapter);
2741
2742         be_get_config(adapter);
2743
2744         be_cmd_req_native_mode(adapter);
2745
2746         be_msix_enable(adapter);
2747
2748         status = be_evt_queues_create(adapter);
2749         if (status)
2750                 goto err;
2751
2752         status = be_tx_cqs_create(adapter);
2753         if (status)
2754                 goto err;
2755
2756         status = be_rx_cqs_create(adapter);
2757         if (status)
2758                 goto err;
2759
2760         status = be_mcc_queues_create(adapter);
2761         if (status)
2762                 goto err;
2763
2764         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2765                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2766         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2767                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2768
2769         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2770                 cap_flags |= BE_IF_FLAGS_RSS;
2771                 en_flags |= BE_IF_FLAGS_RSS;
2772         }
2773
2774         if (lancer_chip(adapter) && !be_physfn(adapter)) {
2775                 en_flags = BE_IF_FLAGS_UNTAGGED |
2776                             BE_IF_FLAGS_BROADCAST |
2777                             BE_IF_FLAGS_MULTICAST;
2778                 cap_flags = en_flags;
2779         }
2780
2781         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2782                                   &adapter->if_handle, 0);
2783         if (status != 0)
2784                 goto err;
2785
2786         memset(mac, 0, ETH_ALEN);
2787         active_mac = false;
2788         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2789                                  &active_mac, &adapter->pmac_id[0]);
2790         if (status != 0)
2791                 goto err;
2792
2793         if (!active_mac) {
2794                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2795                                          &adapter->pmac_id[0], 0);
2796                 if (status != 0)
2797                         goto err;
2798         }
2799
2800         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2801                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2802                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2803         }
2804
2805         status = be_tx_qs_create(adapter);
2806         if (status)
2807                 goto err;
2808
2809         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2810
2811         if (adapter->vlans_added)
2812                 be_vid_config(adapter);
2813
2814         be_set_rx_mode(adapter->netdev);
2815
2816         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2817
2818         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2819                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2820                                         adapter->rx_fc);
2821
2822         if (be_physfn(adapter) && num_vfs) {
2823                 if (adapter->dev_num_vfs)
2824                         be_vf_setup(adapter);
2825                 else
2826                         dev_warn(dev, "device doesn't support SRIOV\n");
2827         }
2828
2829         be_cmd_get_phy_info(adapter);
2830         if (be_pause_supported(adapter))
2831                 adapter->phy.fc_autoneg = 1;
2832
2833         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2834         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2835         return 0;
2836 err:
2837         be_clear(adapter);
2838         return status;
2839 }
2840
2841 #ifdef CONFIG_NET_POLL_CONTROLLER
2842 static void be_netpoll(struct net_device *netdev)
2843 {
2844         struct be_adapter *adapter = netdev_priv(netdev);
2845         struct be_eq_obj *eqo;
2846         int i;
2847
2848         for_all_evt_queues(adapter, eqo, i)
2849                 event_handle(eqo);
2850
2851         return;
2852 }
2853 #endif
2854
2855 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2856 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2857
2858 static bool be_flash_redboot(struct be_adapter *adapter,
2859                         const u8 *p, u32 img_start, int image_size,
2860                         int hdr_size)
2861 {
2862         u32 crc_offset;
2863         u8 flashed_crc[4];
2864         int status;
2865
2866         crc_offset = hdr_size + img_start + image_size - 4;
2867
2868         p += crc_offset;
2869
2870         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2871                         (image_size - 4));
2872         if (status) {
2873                 dev_err(&adapter->pdev->dev,
2874                 "could not get crc from flash, not flashing redboot\n");
2875                 return false;
2876         }
2877
2878         /*update redboot only if crc does not match*/
2879         if (!memcmp(flashed_crc, p, 4))
2880                 return false;
2881         else
2882                 return true;
2883 }
2884
2885 static bool phy_flashing_required(struct be_adapter *adapter)
2886 {
2887         return (adapter->phy.phy_type == TN_8022 &&
2888                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2889 }
2890
2891 static bool is_comp_in_ufi(struct be_adapter *adapter,
2892                            struct flash_section_info *fsec, int type)
2893 {
2894         int i = 0, img_type = 0;
2895         struct flash_section_info_g2 *fsec_g2 = NULL;
2896
2897         if (adapter->generation != BE_GEN3)
2898                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2899
2900         for (i = 0; i < MAX_FLASH_COMP; i++) {
2901                 if (fsec_g2)
2902                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2903                 else
2904                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2905
2906                 if (img_type == type)
2907                         return true;
2908         }
2909         return false;
2910
2911 }
2912
2913 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2914                                          int header_size,
2915                                          const struct firmware *fw)
2916 {
2917         struct flash_section_info *fsec = NULL;
2918         const u8 *p = fw->data;
2919
2920         p += header_size;
2921         while (p < (fw->data + fw->size)) {
2922                 fsec = (struct flash_section_info *)p;
2923                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2924                         return fsec;
2925                 p += 32;
2926         }
2927         return NULL;
2928 }
2929
2930 static int be_flash_data(struct be_adapter *adapter,
2931                          const struct firmware *fw,
2932                          struct be_dma_mem *flash_cmd,
2933                          int num_of_images)
2934
2935 {
2936         int status = 0, i, filehdr_size = 0;
2937         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2938         u32 total_bytes = 0, flash_op;
2939         int num_bytes;
2940         const u8 *p = fw->data;
2941         struct be_cmd_write_flashrom *req = flash_cmd->va;
2942         const struct flash_comp *pflashcomp;
2943         int num_comp, hdr_size;
2944         struct flash_section_info *fsec = NULL;
2945
2946         struct flash_comp gen3_flash_types[] = {
2947                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2948                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2949                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2950                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2951                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2952                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2953                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2954                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2955                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2956                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2957                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2958                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2959                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2960                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2961                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2962                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2963                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2964                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2965                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2966                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2967         };
2968
2969         struct flash_comp gen2_flash_types[] = {
2970                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2971                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2972                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2973                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2974                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2975                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2976                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2977                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2978                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2979                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2980                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2981                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2982                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2983                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2984                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2985                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2986         };
2987
2988         if (adapter->generation == BE_GEN3) {
2989                 pflashcomp = gen3_flash_types;
2990                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2991                 num_comp = ARRAY_SIZE(gen3_flash_types);
2992         } else {
2993                 pflashcomp = gen2_flash_types;
2994                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2995                 num_comp = ARRAY_SIZE(gen2_flash_types);
2996         }
2997         /* Get flash section info*/
2998         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2999         if (!fsec) {
3000                 dev_err(&adapter->pdev->dev,
3001                         "Invalid Cookie. UFI corrupted ?\n");
3002                 return -1;
3003         }
3004         for (i = 0; i < num_comp; i++) {
3005                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3006                         continue;
3007
3008                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3009                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3010                         continue;
3011
3012                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3013                         if (!phy_flashing_required(adapter))
3014                                 continue;
3015                 }
3016
3017                 hdr_size = filehdr_size +
3018                            (num_of_images * sizeof(struct image_hdr));
3019
3020                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3021                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3022                                        pflashcomp[i].size, hdr_size)))
3023                         continue;
3024
3025                 /* Flash the component */
3026                 p = fw->data;
3027                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3028                 if (p + pflashcomp[i].size > fw->data + fw->size)
3029                         return -1;
3030                 total_bytes = pflashcomp[i].size;
3031                 while (total_bytes) {
3032                         if (total_bytes > 32*1024)
3033                                 num_bytes = 32*1024;
3034                         else
3035                                 num_bytes = total_bytes;
3036                         total_bytes -= num_bytes;
3037                         if (!total_bytes) {
3038                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3039                                         flash_op = FLASHROM_OPER_PHY_FLASH;
3040                                 else
3041                                         flash_op = FLASHROM_OPER_FLASH;
3042                         } else {
3043                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3044                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3045                                 else
3046                                         flash_op = FLASHROM_OPER_SAVE;
3047                         }
3048                         memcpy(req->params.data_buf, p, num_bytes);
3049                         p += num_bytes;
3050                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3051                                 pflashcomp[i].optype, flash_op, num_bytes);
3052                         if (status) {
3053                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3054                                         (pflashcomp[i].optype ==
3055                                                 OPTYPE_PHY_FW))
3056                                         break;
3057                                 dev_err(&adapter->pdev->dev,
3058                                         "cmd to write to flash rom failed.\n");
3059                                 return -1;
3060                         }
3061                 }
3062         }
3063         return 0;
3064 }
3065
3066 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3067 {
3068         if (fhdr == NULL)
3069                 return 0;
3070         if (fhdr->build[0] == '3')
3071                 return BE_GEN3;
3072         else if (fhdr->build[0] == '2')
3073                 return BE_GEN2;
3074         else
3075                 return 0;
3076 }
3077
3078 static int lancer_wait_idle(struct be_adapter *adapter)
3079 {
3080 #define SLIPORT_IDLE_TIMEOUT 30
3081         u32 reg_val;
3082         int status = 0, i;
3083
3084         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3085                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3086                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3087                         break;
3088
3089                 ssleep(1);
3090         }
3091
3092         if (i == SLIPORT_IDLE_TIMEOUT)
3093                 status = -1;
3094
3095         return status;
3096 }
3097
3098 static int lancer_fw_reset(struct be_adapter *adapter)
3099 {
3100         int status = 0;
3101
3102         status = lancer_wait_idle(adapter);
3103         if (status)
3104                 return status;
3105
3106         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3107                   PHYSDEV_CONTROL_OFFSET);
3108
3109         return status;
3110 }
3111
3112 static int lancer_fw_download(struct be_adapter *adapter,
3113                                 const struct firmware *fw)
3114 {
3115 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3116 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3117         struct be_dma_mem flash_cmd;
3118         const u8 *data_ptr = NULL;
3119         u8 *dest_image_ptr = NULL;
3120         size_t image_size = 0;
3121         u32 chunk_size = 0;
3122         u32 data_written = 0;
3123         u32 offset = 0;
3124         int status = 0;
3125         u8 add_status = 0;
3126         u8 change_status;
3127
3128         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3129                 dev_err(&adapter->pdev->dev,
3130                         "FW Image not properly aligned. "
3131                         "Length must be 4 byte aligned.\n");
3132                 status = -EINVAL;
3133                 goto lancer_fw_exit;
3134         }
3135
3136         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3137                                 + LANCER_FW_DOWNLOAD_CHUNK;
3138         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3139                                                 &flash_cmd.dma, GFP_KERNEL);
3140         if (!flash_cmd.va) {
3141                 status = -ENOMEM;
3142                 dev_err(&adapter->pdev->dev,
3143                         "Memory allocation failure while flashing\n");
3144                 goto lancer_fw_exit;
3145         }
3146
3147         dest_image_ptr = flash_cmd.va +
3148                                 sizeof(struct lancer_cmd_req_write_object);
3149         image_size = fw->size;
3150         data_ptr = fw->data;
3151
3152         while (image_size) {
3153                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3154
3155                 /* Copy the image chunk content. */
3156                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3157
3158                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3159                                                  chunk_size, offset,
3160                                                  LANCER_FW_DOWNLOAD_LOCATION,
3161                                                  &data_written, &change_status,
3162                                                  &add_status);
3163                 if (status)
3164                         break;
3165
3166                 offset += data_written;
3167                 data_ptr += data_written;
3168                 image_size -= data_written;
3169         }
3170
3171         if (!status) {
3172                 /* Commit the FW written */
3173                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3174                                                  0, offset,
3175                                                  LANCER_FW_DOWNLOAD_LOCATION,
3176                                                  &data_written, &change_status,
3177                                                  &add_status);
3178         }
3179
3180         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3181                                 flash_cmd.dma);
3182         if (status) {
3183                 dev_err(&adapter->pdev->dev,
3184                         "Firmware load error. "
3185                         "Status code: 0x%x Additional Status: 0x%x\n",
3186                         status, add_status);
3187                 goto lancer_fw_exit;
3188         }
3189
3190         if (change_status == LANCER_FW_RESET_NEEDED) {
3191                 status = lancer_fw_reset(adapter);
3192                 if (status) {
3193                         dev_err(&adapter->pdev->dev,
3194                                 "Adapter busy for FW reset.\n"
3195                                 "New FW will not be active.\n");
3196                         goto lancer_fw_exit;
3197                 }
3198         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3199                         dev_err(&adapter->pdev->dev,
3200                                 "System reboot required for new FW"
3201                                 " to be active\n");
3202         }
3203
3204         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3205 lancer_fw_exit:
3206         return status;
3207 }
3208
3209 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3210 {
3211         struct flash_file_hdr_g2 *fhdr;
3212         struct flash_file_hdr_g3 *fhdr3;
3213         struct image_hdr *img_hdr_ptr = NULL;
3214         struct be_dma_mem flash_cmd;
3215         const u8 *p;
3216         int status = 0, i = 0, num_imgs = 0;
3217
3218         p = fw->data;
3219         fhdr = (struct flash_file_hdr_g2 *) p;
3220
3221         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3222         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3223                                           &flash_cmd.dma, GFP_KERNEL);
3224         if (!flash_cmd.va) {
3225                 status = -ENOMEM;
3226                 dev_err(&adapter->pdev->dev,
3227                         "Memory allocation failure while flashing\n");
3228                 goto be_fw_exit;
3229         }
3230
3231         if ((adapter->generation == BE_GEN3) &&
3232                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3233                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3234                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3235                 for (i = 0; i < num_imgs; i++) {
3236                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3237                                         (sizeof(struct flash_file_hdr_g3) +
3238                                          i * sizeof(struct image_hdr)));
3239                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3240                                 status = be_flash_data(adapter, fw, &flash_cmd,
3241                                                         num_imgs);
3242                 }
3243         } else if ((adapter->generation == BE_GEN2) &&
3244                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3245                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3246         } else {
3247                 dev_err(&adapter->pdev->dev,
3248                         "UFI and Interface are not compatible for flashing\n");
3249                 status = -1;
3250         }
3251
3252         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3253                           flash_cmd.dma);
3254         if (status) {
3255                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3256                 goto be_fw_exit;
3257         }
3258
3259         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3260
3261 be_fw_exit:
3262         return status;
3263 }
3264
3265 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3266 {
3267         const struct firmware *fw;
3268         int status;
3269
3270         if (!netif_running(adapter->netdev)) {
3271                 dev_err(&adapter->pdev->dev,
3272                         "Firmware load not allowed (interface is down)\n");
3273                 return -1;
3274         }
3275
3276         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3277         if (status)
3278                 goto fw_exit;
3279
3280         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3281
3282         if (lancer_chip(adapter))
3283                 status = lancer_fw_download(adapter, fw);
3284         else
3285                 status = be_fw_download(adapter, fw);
3286
3287 fw_exit:
3288         release_firmware(fw);
3289         return status;
3290 }
3291
3292 static const struct net_device_ops be_netdev_ops = {
3293         .ndo_open               = be_open,
3294         .ndo_stop               = be_close,
3295         .ndo_start_xmit         = be_xmit,
3296         .ndo_set_rx_mode        = be_set_rx_mode,
3297         .ndo_set_mac_address    = be_mac_addr_set,
3298         .ndo_change_mtu         = be_change_mtu,
3299         .ndo_get_stats64        = be_get_stats64,
3300         .ndo_validate_addr      = eth_validate_addr,
3301         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3302         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3303         .ndo_set_vf_mac         = be_set_vf_mac,
3304         .ndo_set_vf_vlan        = be_set_vf_vlan,
3305         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3306         .ndo_get_vf_config      = be_get_vf_config,
3307 #ifdef CONFIG_NET_POLL_CONTROLLER
3308         .ndo_poll_controller    = be_netpoll,
3309 #endif
3310 };
3311
3312 static void be_netdev_init(struct net_device *netdev)
3313 {
3314         struct be_adapter *adapter = netdev_priv(netdev);
3315         struct be_eq_obj *eqo;
3316         int i;
3317
3318         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3319                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3320                 NETIF_F_HW_VLAN_TX;
3321         if (be_multi_rxq(adapter))
3322                 netdev->hw_features |= NETIF_F_RXHASH;
3323
3324         netdev->features |= netdev->hw_features |
3325                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3326
3327         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3328                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3329
3330         netdev->priv_flags |= IFF_UNICAST_FLT;
3331
3332         netdev->flags |= IFF_MULTICAST;
3333
3334         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3335
3336         netdev->netdev_ops = &be_netdev_ops;
3337
3338         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3339
3340         for_all_evt_queues(adapter, eqo, i)
3341                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3342 }
3343
3344 static void be_unmap_pci_bars(struct be_adapter *adapter)
3345 {
3346         if (adapter->csr)
3347                 iounmap(adapter->csr);
3348         if (adapter->db)
3349                 iounmap(adapter->db);
3350         if (adapter->roce_db.base)
3351                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3352 }
3353
3354 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3355 {
3356         struct pci_dev *pdev = adapter->pdev;
3357         u8 __iomem *addr;
3358
3359         addr = pci_iomap(pdev, 2, 0);
3360         if (addr == NULL)
3361                 return -ENOMEM;
3362
3363         adapter->roce_db.base = addr;
3364         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3365         adapter->roce_db.size = 8192;
3366         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3367         return 0;
3368 }
3369
3370 static int be_map_pci_bars(struct be_adapter *adapter)
3371 {
3372         u8 __iomem *addr;
3373         int db_reg;
3374
3375         if (lancer_chip(adapter)) {
3376                 if (be_type_2_3(adapter)) {
3377                         addr = ioremap_nocache(
3378                                         pci_resource_start(adapter->pdev, 0),
3379                                         pci_resource_len(adapter->pdev, 0));
3380                         if (addr == NULL)
3381                                 return -ENOMEM;
3382                         adapter->db = addr;
3383                 }
3384                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3385                         if (lancer_roce_map_pci_bars(adapter))
3386                                 goto pci_map_err;
3387                 }
3388                 return 0;
3389         }
3390
3391         if (be_physfn(adapter)) {
3392                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3393                                 pci_resource_len(adapter->pdev, 2));
3394                 if (addr == NULL)
3395                         return -ENOMEM;
3396                 adapter->csr = addr;
3397         }
3398
3399         if (adapter->generation == BE_GEN2) {
3400                 db_reg = 4;
3401         } else {
3402                 if (be_physfn(adapter))
3403                         db_reg = 4;
3404                 else
3405                         db_reg = 0;
3406         }
3407         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3408                                 pci_resource_len(adapter->pdev, db_reg));
3409         if (addr == NULL)
3410                 goto pci_map_err;
3411         adapter->db = addr;
3412         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3413                 adapter->roce_db.size = 4096;
3414                 adapter->roce_db.io_addr =
3415                                 pci_resource_start(adapter->pdev, db_reg);
3416                 adapter->roce_db.total_size =
3417                                 pci_resource_len(adapter->pdev, db_reg);
3418         }
3419         return 0;
3420 pci_map_err:
3421         be_unmap_pci_bars(adapter);
3422         return -ENOMEM;
3423 }
3424
3425 static void be_ctrl_cleanup(struct be_adapter *adapter)
3426 {
3427         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3428
3429         be_unmap_pci_bars(adapter);
3430
3431         if (mem->va)
3432                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3433                                   mem->dma);
3434
3435         mem = &adapter->rx_filter;
3436         if (mem->va)
3437                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3438                                   mem->dma);
3439         kfree(adapter->pmac_id);
3440 }
3441
3442 static int be_ctrl_init(struct be_adapter *adapter)
3443 {
3444         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3445         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3446         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3447         int status;
3448
3449         status = be_map_pci_bars(adapter);
3450         if (status)
3451                 goto done;
3452
3453         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3454         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3455                                                 mbox_mem_alloc->size,
3456                                                 &mbox_mem_alloc->dma,
3457                                                 GFP_KERNEL);
3458         if (!mbox_mem_alloc->va) {
3459                 status = -ENOMEM;
3460                 goto unmap_pci_bars;
3461         }
3462         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3463         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3464         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3465         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3466
3467         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3468         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3469                                         &rx_filter->dma, GFP_KERNEL);
3470         if (rx_filter->va == NULL) {
3471                 status = -ENOMEM;
3472                 goto free_mbox;
3473         }
3474         memset(rx_filter->va, 0, rx_filter->size);
3475
3476         /* primary mac needs 1 pmac entry */
3477         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3478                                    sizeof(*adapter->pmac_id), GFP_KERNEL);
3479         if (!adapter->pmac_id)
3480                 return -ENOMEM;
3481
3482         mutex_init(&adapter->mbox_lock);
3483         spin_lock_init(&adapter->mcc_lock);
3484         spin_lock_init(&adapter->mcc_cq_lock);
3485
3486         init_completion(&adapter->flash_compl);
3487         pci_save_state(adapter->pdev);
3488         return 0;
3489
3490 free_mbox:
3491         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3492                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3493
3494 unmap_pci_bars:
3495         be_unmap_pci_bars(adapter);
3496
3497 done:
3498         return status;
3499 }
3500
3501 static void be_stats_cleanup(struct be_adapter *adapter)
3502 {
3503         struct be_dma_mem *cmd = &adapter->stats_cmd;
3504
3505         if (cmd->va)
3506                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3507                                   cmd->va, cmd->dma);
3508 }
3509
3510 static int be_stats_init(struct be_adapter *adapter)
3511 {
3512         struct be_dma_mem *cmd = &adapter->stats_cmd;
3513
3514         if (adapter->generation == BE_GEN2) {
3515                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3516         } else {
3517                 if (lancer_chip(adapter))
3518                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3519                 else
3520                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3521         }
3522         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3523                                      GFP_KERNEL);
3524         if (cmd->va == NULL)
3525                 return -1;
3526         memset(cmd->va, 0, cmd->size);
3527         return 0;
3528 }
3529
3530 static void __devexit be_remove(struct pci_dev *pdev)
3531 {
3532         struct be_adapter *adapter = pci_get_drvdata(pdev);
3533
3534         if (!adapter)
3535                 return;
3536
3537         be_roce_dev_remove(adapter);
3538
3539         cancel_delayed_work_sync(&adapter->func_recovery_work);
3540
3541         unregister_netdev(adapter->netdev);
3542
3543         be_clear(adapter);
3544
3545         /* tell fw we're done with firing cmds */
3546         be_cmd_fw_clean(adapter);
3547
3548         be_stats_cleanup(adapter);
3549
3550         be_ctrl_cleanup(adapter);
3551
3552         pci_disable_pcie_error_reporting(pdev);
3553
3554         pci_set_drvdata(pdev, NULL);
3555         pci_release_regions(pdev);
3556         pci_disable_device(pdev);
3557
3558         free_netdev(adapter->netdev);
3559 }
3560
3561 bool be_is_wol_supported(struct be_adapter *adapter)
3562 {
3563         return ((adapter->wol_cap & BE_WOL_CAP) &&
3564                 !be_is_wol_excluded(adapter)) ? true : false;
3565 }
3566
3567 u32 be_get_fw_log_level(struct be_adapter *adapter)
3568 {
3569         struct be_dma_mem extfat_cmd;
3570         struct be_fat_conf_params *cfgs;
3571         int status;
3572         u32 level = 0;
3573         int j;
3574
3575         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3576         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3577         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3578                                              &extfat_cmd.dma);
3579
3580         if (!extfat_cmd.va) {
3581                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3582                         __func__);
3583                 goto err;
3584         }
3585
3586         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3587         if (!status) {
3588                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3589                                                 sizeof(struct be_cmd_resp_hdr));
3590                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3591                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3592                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3593                 }
3594         }
3595         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3596                             extfat_cmd.dma);
3597 err:
3598         return level;
3599 }
3600 static int be_get_initial_config(struct be_adapter *adapter)
3601 {
3602         int status;
3603         u32 level;
3604
3605         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3606                         &adapter->function_mode, &adapter->function_caps);
3607         if (status)
3608                 return status;
3609
3610         if (adapter->function_mode & FLEX10_MODE)
3611                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3612         else
3613                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3614
3615         if (be_physfn(adapter))
3616                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3617         else
3618                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3619
3620         status = be_cmd_get_cntl_attributes(adapter);
3621         if (status)
3622                 return status;
3623
3624         status = be_cmd_get_acpi_wol_cap(adapter);
3625         if (status) {
3626                 /* in case of a failure to get wol capabillities
3627                  * check the exclusion list to determine WOL capability */
3628                 if (!be_is_wol_excluded(adapter))
3629                         adapter->wol_cap |= BE_WOL_CAP;
3630         }
3631
3632         if (be_is_wol_supported(adapter))
3633                 adapter->wol = true;
3634
3635         /* Must be a power of 2 or else MODULO will BUG_ON */
3636         adapter->be_get_temp_freq = 64;
3637
3638         level = be_get_fw_log_level(adapter);
3639         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3640
3641         return 0;
3642 }
3643
3644 static int be_dev_type_check(struct be_adapter *adapter)
3645 {
3646         struct pci_dev *pdev = adapter->pdev;
3647         u32 sli_intf = 0, if_type;
3648
3649         switch (pdev->device) {
3650         case BE_DEVICE_ID1:
3651         case OC_DEVICE_ID1:
3652                 adapter->generation = BE_GEN2;
3653                 break;
3654         case BE_DEVICE_ID2:
3655         case OC_DEVICE_ID2:
3656                 adapter->generation = BE_GEN3;
3657                 break;
3658         case OC_DEVICE_ID3:
3659         case OC_DEVICE_ID4:
3660                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3661                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3662                                                 SLI_INTF_IF_TYPE_SHIFT;
3663                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3664                                                 SLI_INTF_IF_TYPE_SHIFT;
3665                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3666                         !be_type_2_3(adapter)) {
3667                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3668                         return -EINVAL;
3669                 }
3670                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3671                                          SLI_INTF_FAMILY_SHIFT);
3672                 adapter->generation = BE_GEN3;
3673                 break;
3674         case OC_DEVICE_ID5:
3675                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3676                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3677                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3678                         return -EINVAL;
3679                 }
3680                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3681                                          SLI_INTF_FAMILY_SHIFT);
3682                 adapter->generation = BE_GEN3;
3683                 break;
3684         default:
3685                 adapter->generation = 0;
3686         }
3687
3688         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3689         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3690         return 0;
3691 }
3692
3693 static int lancer_recover_func(struct be_adapter *adapter)
3694 {
3695         int status;
3696
3697         status = lancer_test_and_set_rdy_state(adapter);
3698         if (status)
3699                 goto err;
3700
3701         if (netif_running(adapter->netdev))
3702                 be_close(adapter->netdev);
3703
3704         be_clear(adapter);
3705
3706         adapter->hw_error = false;
3707         adapter->fw_timeout = false;
3708
3709         status = be_setup(adapter);
3710         if (status)
3711                 goto err;
3712
3713         if (netif_running(adapter->netdev)) {
3714                 status = be_open(adapter->netdev);
3715                 if (status)
3716                         goto err;
3717         }
3718
3719         dev_err(&adapter->pdev->dev,
3720                 "Adapter SLIPORT recovery succeeded\n");
3721         return 0;
3722 err:
3723         dev_err(&adapter->pdev->dev,
3724                 "Adapter SLIPORT recovery failed\n");
3725
3726         return status;
3727 }
3728
3729 static void be_func_recovery_task(struct work_struct *work)
3730 {
3731         struct be_adapter *adapter =
3732                 container_of(work, struct be_adapter,  func_recovery_work.work);
3733         int status;
3734
3735         be_detect_error(adapter);
3736
3737         if (adapter->hw_error && lancer_chip(adapter)) {
3738
3739                 if (adapter->eeh_error)
3740                         goto out;
3741
3742                 rtnl_lock();
3743                 netif_device_detach(adapter->netdev);
3744                 rtnl_unlock();
3745
3746                 status = lancer_recover_func(adapter);
3747
3748                 if (!status)
3749                         netif_device_attach(adapter->netdev);
3750         }
3751
3752 out:
3753         schedule_delayed_work(&adapter->func_recovery_work,
3754                               msecs_to_jiffies(1000));
3755 }
3756
3757 static void be_worker(struct work_struct *work)
3758 {
3759         struct be_adapter *adapter =
3760                 container_of(work, struct be_adapter, work.work);
3761         struct be_rx_obj *rxo;
3762         struct be_eq_obj *eqo;
3763         int i;
3764
3765         /* when interrupts are not yet enabled, just reap any pending
3766         * mcc completions */
3767         if (!netif_running(adapter->netdev)) {
3768                 local_bh_disable();
3769                 be_process_mcc(adapter);
3770                 local_bh_enable();
3771                 goto reschedule;
3772         }
3773
3774         if (!adapter->stats_cmd_sent) {
3775                 if (lancer_chip(adapter))
3776                         lancer_cmd_get_pport_stats(adapter,
3777                                                 &adapter->stats_cmd);
3778                 else
3779                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3780         }
3781
3782         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3783                 be_cmd_get_die_temperature(adapter);
3784
3785         for_all_rx_queues(adapter, rxo, i) {
3786                 if (rxo->rx_post_starved) {
3787                         rxo->rx_post_starved = false;
3788                         be_post_rx_frags(rxo, GFP_KERNEL);
3789                 }
3790         }
3791
3792         for_all_evt_queues(adapter, eqo, i)
3793                 be_eqd_update(adapter, eqo);
3794
3795 reschedule:
3796         adapter->work_counter++;
3797         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3798 }
3799
3800 static bool be_reset_required(struct be_adapter *adapter)
3801 {
3802         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3803 }
3804
3805 static char *mc_name(struct be_adapter *adapter)
3806 {
3807         if (adapter->function_mode & FLEX10_MODE)
3808                 return "FLEX10";
3809         else if (adapter->function_mode & VNIC_MODE)
3810                 return "vNIC";
3811         else if (adapter->function_mode & UMC_ENABLED)
3812                 return "UMC";
3813         else
3814                 return "";
3815 }
3816
3817 static inline char *func_name(struct be_adapter *adapter)
3818 {
3819         return be_physfn(adapter) ? "PF" : "VF";
3820 }
3821
3822 static int __devinit be_probe(struct pci_dev *pdev,
3823                         const struct pci_device_id *pdev_id)
3824 {
3825         int status = 0;
3826         struct be_adapter *adapter;
3827         struct net_device *netdev;
3828         char port_name;
3829
3830         status = pci_enable_device(pdev);
3831         if (status)
3832                 goto do_none;
3833
3834         status = pci_request_regions(pdev, DRV_NAME);
3835         if (status)
3836                 goto disable_dev;
3837         pci_set_master(pdev);
3838
3839         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3840         if (netdev == NULL) {
3841                 status = -ENOMEM;
3842                 goto rel_reg;
3843         }
3844         adapter = netdev_priv(netdev);
3845         adapter->pdev = pdev;
3846         pci_set_drvdata(pdev, adapter);
3847
3848         status = be_dev_type_check(adapter);
3849         if (status)
3850                 goto free_netdev;
3851
3852         adapter->netdev = netdev;
3853         SET_NETDEV_DEV(netdev, &pdev->dev);
3854
3855         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3856         if (!status) {
3857                 netdev->features |= NETIF_F_HIGHDMA;
3858         } else {
3859                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3860                 if (status) {
3861                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3862                         goto free_netdev;
3863                 }
3864         }
3865
3866         status = pci_enable_pcie_error_reporting(pdev);
3867         if (status)
3868                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3869
3870         status = be_ctrl_init(adapter);
3871         if (status)
3872                 goto free_netdev;
3873
3874         /* sync up with fw's ready state */
3875         if (be_physfn(adapter)) {
3876                 status = be_fw_wait_ready(adapter);
3877                 if (status)
3878                         goto ctrl_clean;
3879         }
3880
3881         /* tell fw we're ready to fire cmds */
3882         status = be_cmd_fw_init(adapter);
3883         if (status)
3884                 goto ctrl_clean;
3885
3886         if (be_reset_required(adapter)) {
3887                 status = be_cmd_reset_function(adapter);
3888                 if (status)
3889                         goto ctrl_clean;
3890         }
3891
3892         /* The INTR bit may be set in the card when probed by a kdump kernel
3893          * after a crash.
3894          */
3895         if (!lancer_chip(adapter))
3896                 be_intr_set(adapter, false);
3897
3898         status = be_stats_init(adapter);
3899         if (status)
3900                 goto ctrl_clean;
3901
3902         status = be_get_initial_config(adapter);
3903         if (status)
3904                 goto stats_clean;
3905
3906         INIT_DELAYED_WORK(&adapter->work, be_worker);
3907         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3908         adapter->rx_fc = adapter->tx_fc = true;
3909
3910         status = be_setup(adapter);
3911         if (status)
3912                 goto stats_clean;
3913
3914         be_netdev_init(netdev);
3915         status = register_netdev(netdev);
3916         if (status != 0)
3917                 goto unsetup;
3918
3919         be_roce_dev_add(adapter);
3920
3921         schedule_delayed_work(&adapter->func_recovery_work,
3922                               msecs_to_jiffies(1000));
3923
3924         be_cmd_query_port_name(adapter, &port_name);
3925
3926         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
3927                  func_name(adapter), mc_name(adapter), port_name);
3928
3929         return 0;
3930
3931 unsetup:
3932         be_clear(adapter);
3933 stats_clean:
3934         be_stats_cleanup(adapter);
3935 ctrl_clean:
3936         be_ctrl_cleanup(adapter);
3937 free_netdev:
3938         free_netdev(netdev);
3939         pci_set_drvdata(pdev, NULL);
3940 rel_reg:
3941         pci_release_regions(pdev);
3942 disable_dev:
3943         pci_disable_device(pdev);
3944 do_none:
3945         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3946         return status;
3947 }
3948
3949 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3950 {
3951         struct be_adapter *adapter = pci_get_drvdata(pdev);
3952         struct net_device *netdev =  adapter->netdev;
3953
3954         if (adapter->wol)
3955                 be_setup_wol(adapter, true);
3956
3957         cancel_delayed_work_sync(&adapter->func_recovery_work);
3958
3959         netif_device_detach(netdev);
3960         if (netif_running(netdev)) {
3961                 rtnl_lock();
3962                 be_close(netdev);
3963                 rtnl_unlock();
3964         }
3965         be_clear(adapter);
3966
3967         pci_save_state(pdev);
3968         pci_disable_device(pdev);
3969         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3970         return 0;
3971 }
3972
3973 static int be_resume(struct pci_dev *pdev)
3974 {
3975         int status = 0;
3976         struct be_adapter *adapter = pci_get_drvdata(pdev);
3977         struct net_device *netdev =  adapter->netdev;
3978
3979         netif_device_detach(netdev);
3980
3981         status = pci_enable_device(pdev);
3982         if (status)
3983                 return status;
3984
3985         pci_set_power_state(pdev, 0);
3986         pci_restore_state(pdev);
3987
3988         /* tell fw we're ready to fire cmds */
3989         status = be_cmd_fw_init(adapter);
3990         if (status)
3991                 return status;
3992
3993         be_setup(adapter);
3994         if (netif_running(netdev)) {
3995                 rtnl_lock();
3996                 be_open(netdev);
3997                 rtnl_unlock();
3998         }
3999
4000         schedule_delayed_work(&adapter->func_recovery_work,
4001                               msecs_to_jiffies(1000));
4002         netif_device_attach(netdev);
4003
4004         if (adapter->wol)
4005                 be_setup_wol(adapter, false);
4006
4007         return 0;
4008 }
4009
4010 /*
4011  * An FLR will stop BE from DMAing any data.
4012  */
4013 static void be_shutdown(struct pci_dev *pdev)
4014 {
4015         struct be_adapter *adapter = pci_get_drvdata(pdev);
4016
4017         if (!adapter)
4018                 return;
4019
4020         cancel_delayed_work_sync(&adapter->work);
4021         cancel_delayed_work_sync(&adapter->func_recovery_work);
4022
4023         netif_device_detach(adapter->netdev);
4024
4025         if (adapter->wol)
4026                 be_setup_wol(adapter, true);
4027
4028         be_cmd_reset_function(adapter);
4029
4030         pci_disable_device(pdev);
4031 }
4032
4033 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4034                                 pci_channel_state_t state)
4035 {
4036         struct be_adapter *adapter = pci_get_drvdata(pdev);
4037         struct net_device *netdev =  adapter->netdev;
4038
4039         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4040
4041         adapter->eeh_error = true;
4042
4043         cancel_delayed_work_sync(&adapter->func_recovery_work);
4044
4045         rtnl_lock();
4046         netif_device_detach(netdev);
4047         rtnl_unlock();
4048
4049         if (netif_running(netdev)) {
4050                 rtnl_lock();
4051                 be_close(netdev);
4052                 rtnl_unlock();
4053         }
4054         be_clear(adapter);
4055
4056         if (state == pci_channel_io_perm_failure)
4057                 return PCI_ERS_RESULT_DISCONNECT;
4058
4059         pci_disable_device(pdev);
4060
4061         /* The error could cause the FW to trigger a flash debug dump.
4062          * Resetting the card while flash dump is in progress
4063          * can cause it not to recover; wait for it to finish
4064          */
4065         ssleep(30);
4066         return PCI_ERS_RESULT_NEED_RESET;
4067 }
4068
4069 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4070 {
4071         struct be_adapter *adapter = pci_get_drvdata(pdev);
4072         int status;
4073
4074         dev_info(&adapter->pdev->dev, "EEH reset\n");
4075         be_clear_all_error(adapter);
4076
4077         status = pci_enable_device(pdev);
4078         if (status)
4079                 return PCI_ERS_RESULT_DISCONNECT;
4080
4081         pci_set_master(pdev);
4082         pci_set_power_state(pdev, 0);
4083         pci_restore_state(pdev);
4084
4085         /* Check if card is ok and fw is ready */
4086         status = be_fw_wait_ready(adapter);
4087         if (status)
4088                 return PCI_ERS_RESULT_DISCONNECT;
4089
4090         pci_cleanup_aer_uncorrect_error_status(pdev);
4091         return PCI_ERS_RESULT_RECOVERED;
4092 }
4093
4094 static void be_eeh_resume(struct pci_dev *pdev)
4095 {
4096         int status = 0;
4097         struct be_adapter *adapter = pci_get_drvdata(pdev);
4098         struct net_device *netdev =  adapter->netdev;
4099
4100         dev_info(&adapter->pdev->dev, "EEH resume\n");
4101
4102         pci_save_state(pdev);
4103
4104         /* tell fw we're ready to fire cmds */
4105         status = be_cmd_fw_init(adapter);
4106         if (status)
4107                 goto err;
4108
4109         status = be_cmd_reset_function(adapter);
4110         if (status)
4111                 goto err;
4112
4113         status = be_setup(adapter);
4114         if (status)
4115                 goto err;
4116
4117         if (netif_running(netdev)) {
4118                 status = be_open(netdev);
4119                 if (status)
4120                         goto err;
4121         }
4122
4123         schedule_delayed_work(&adapter->func_recovery_work,
4124                               msecs_to_jiffies(1000));
4125         netif_device_attach(netdev);
4126         return;
4127 err:
4128         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4129 }
4130
4131 static struct pci_error_handlers be_eeh_handlers = {
4132         .error_detected = be_eeh_err_detected,
4133         .slot_reset = be_eeh_reset,
4134         .resume = be_eeh_resume,
4135 };
4136
4137 static struct pci_driver be_driver = {
4138         .name = DRV_NAME,
4139         .id_table = be_dev_ids,
4140         .probe = be_probe,
4141         .remove = be_remove,
4142         .suspend = be_suspend,
4143         .resume = be_resume,
4144         .shutdown = be_shutdown,
4145         .err_handler = &be_eeh_handlers
4146 };
4147
4148 static int __init be_init_module(void)
4149 {
4150         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4151             rx_frag_size != 2048) {
4152                 printk(KERN_WARNING DRV_NAME
4153                         " : Module param rx_frag_size must be 2048/4096/8192."
4154                         " Using 2048\n");
4155                 rx_frag_size = 2048;
4156         }
4157
4158         return pci_register_driver(&be_driver);
4159 }
4160 module_init(be_init_module);
4161
4162 static void __exit be_exit_module(void)
4163 {
4164         pci_unregister_driver(&be_driver);
4165 }
4166 module_exit(be_exit_module);