cxgb4: Add support for dynamic allocation of resources for ULD
[cascardo/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
67 #include <linux/crash_dump.h>
68
69 #include "cxgb4.h"
70 #include "t4_regs.h"
71 #include "t4_values.h"
72 #include "t4_msg.h"
73 #include "t4fw_api.h"
74 #include "t4fw_version.h"
75 #include "cxgb4_dcb.h"
76 #include "cxgb4_debugfs.h"
77 #include "clip_tbl.h"
78 #include "l2t.h"
79
80 char cxgb4_driver_name[] = KBUILD_MODNAME;
81
82 #ifdef DRV_VERSION
83 #undef DRV_VERSION
84 #endif
85 #define DRV_VERSION "2.0.0-ko"
86 const char cxgb4_driver_version[] = DRV_VERSION;
87 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
88
89 /* Host shadow copy of ingress filter entry.  This is in host native format
90  * and doesn't match the ordering or bit order, etc. of the hardware of the
91  * firmware command.  The use of bit-field structure elements is purely to
92  * remind ourselves of the field size limitations and save memory in the case
93  * where the filter table is large.
94  */
95 struct filter_entry {
96         /* Administrative fields for filter.
97          */
98         u32 valid:1;            /* filter allocated and valid */
99         u32 locked:1;           /* filter is administratively locked */
100
101         u32 pending:1;          /* filter action is pending firmware reply */
102         u32 smtidx:8;           /* Source MAC Table index for smac */
103         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
104
105         /* The filter itself.  Most of this is a straight copy of information
106          * provided by the extended ioctl().  Some fields are translated to
107          * internal forms -- for instance the Ingress Queue ID passed in from
108          * the ioctl() is translated into the Absolute Ingress Queue ID.
109          */
110         struct ch_filter_specification fs;
111 };
112
113 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
114                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
115                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
116
117 /* Macros needed to support the PCI Device ID Table ...
118  */
119 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
120         static const struct pci_device_id cxgb4_pci_tbl[] = {
121 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
122
123 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
124  * called for both.
125  */
126 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
127
128 #define CH_PCI_ID_TABLE_ENTRY(devid) \
129                 {PCI_VDEVICE(CHELSIO, (devid)), 4}
130
131 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
132                 { 0, } \
133         }
134
135 #include "t4_pci_id_tbl.h"
136
137 #define FW4_FNAME "cxgb4/t4fw.bin"
138 #define FW5_FNAME "cxgb4/t5fw.bin"
139 #define FW6_FNAME "cxgb4/t6fw.bin"
140 #define FW4_CFNAME "cxgb4/t4-config.txt"
141 #define FW5_CFNAME "cxgb4/t5-config.txt"
142 #define FW6_CFNAME "cxgb4/t6-config.txt"
143 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
144 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
145 #define PHY_AQ1202_DEVICEID 0x4409
146 #define PHY_BCM84834_DEVICEID 0x4486
147
148 MODULE_DESCRIPTION(DRV_DESC);
149 MODULE_AUTHOR("Chelsio Communications");
150 MODULE_LICENSE("Dual BSD/GPL");
151 MODULE_VERSION(DRV_VERSION);
152 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
153 MODULE_FIRMWARE(FW4_FNAME);
154 MODULE_FIRMWARE(FW5_FNAME);
155 MODULE_FIRMWARE(FW6_FNAME);
156
157 /*
158  * Normally we're willing to become the firmware's Master PF but will be happy
159  * if another PF has already become the Master and initialized the adapter.
160  * Setting "force_init" will cause this driver to forcibly establish itself as
161  * the Master PF and initialize the adapter.
162  */
163 static uint force_init;
164
165 module_param(force_init, uint, 0644);
166 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
167                  "deprecated parameter");
168
169 static int dflt_msg_enable = DFLT_MSG_ENABLE;
170
171 module_param(dflt_msg_enable, int, 0644);
172 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
173                  "deprecated parameter");
174
175 /*
176  * The driver uses the best interrupt scheme available on a platform in the
177  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
178  * of these schemes the driver may consider as follows:
179  *
180  * msi = 2: choose from among all three options
181  * msi = 1: only consider MSI and INTx interrupts
182  * msi = 0: force INTx interrupts
183  */
184 static int msi = 2;
185
186 module_param(msi, int, 0644);
187 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
188
189 /*
190  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
191  * offset by 2 bytes in order to have the IP headers line up on 4-byte
192  * boundaries.  This is a requirement for many architectures which will throw
193  * a machine check fault if an attempt is made to access one of the 4-byte IP
194  * header fields on a non-4-byte boundary.  And it's a major performance issue
195  * even on some architectures which allow it like some implementations of the
196  * x86 ISA.  However, some architectures don't mind this and for some very
197  * edge-case performance sensitive applications (like forwarding large volumes
198  * of small packets), setting this DMA offset to 0 will decrease the number of
199  * PCI-E Bus transfers enough to measurably affect performance.
200  */
201 static int rx_dma_offset = 2;
202
203 #ifdef CONFIG_PCI_IOV
204 /* Configure the number of PCI-E Virtual Function which are to be instantiated
205  * on SR-IOV Capable Physical Functions.
206  */
207 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
208
209 module_param_array(num_vf, uint, NULL, 0644);
210 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
211 #endif
212
213 /* TX Queue select used to determine what algorithm to use for selecting TX
214  * queue. Select between the kernel provided function (select_queue=0) or user
215  * cxgb_select_queue function (select_queue=1)
216  *
217  * Default: select_queue=0
218  */
219 static int select_queue;
220 module_param(select_queue, int, 0644);
221 MODULE_PARM_DESC(select_queue,
222                  "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
223
224 static struct dentry *cxgb4_debugfs_root;
225
226 LIST_HEAD(adapter_list);
227 DEFINE_MUTEX(uld_mutex);
228 /* Adapter list to be accessed from atomic context */
229 static LIST_HEAD(adap_rcu_list);
230 static DEFINE_SPINLOCK(adap_rcu_lock);
231 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
232 static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
233
234 static void link_report(struct net_device *dev)
235 {
236         if (!netif_carrier_ok(dev))
237                 netdev_info(dev, "link down\n");
238         else {
239                 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
240
241                 const char *s;
242                 const struct port_info *p = netdev_priv(dev);
243
244                 switch (p->link_cfg.speed) {
245                 case 10000:
246                         s = "10Gbps";
247                         break;
248                 case 1000:
249                         s = "1000Mbps";
250                         break;
251                 case 100:
252                         s = "100Mbps";
253                         break;
254                 case 40000:
255                         s = "40Gbps";
256                         break;
257                 default:
258                         pr_info("%s: unsupported speed: %d\n",
259                                 dev->name, p->link_cfg.speed);
260                         return;
261                 }
262
263                 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
264                             fc[p->link_cfg.fc]);
265         }
266 }
267
268 #ifdef CONFIG_CHELSIO_T4_DCB
269 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
270 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
271 {
272         struct port_info *pi = netdev_priv(dev);
273         struct adapter *adap = pi->adapter;
274         struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
275         int i;
276
277         /* We use a simple mapping of Port TX Queue Index to DCB
278          * Priority when we're enabling DCB.
279          */
280         for (i = 0; i < pi->nqsets; i++, txq++) {
281                 u32 name, value;
282                 int err;
283
284                 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
285                         FW_PARAMS_PARAM_X_V(
286                                 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
287                         FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
288                 value = enable ? i : 0xffffffff;
289
290                 /* Since we can be called while atomic (from "interrupt
291                  * level") we need to issue the Set Parameters Commannd
292                  * without sleeping (timeout < 0).
293                  */
294                 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
295                                             &name, &value,
296                                             -FW_CMD_MAX_TIMEOUT);
297
298                 if (err)
299                         dev_err(adap->pdev_dev,
300                                 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
301                                 enable ? "set" : "unset", pi->port_id, i, -err);
302                 else
303                         txq->dcb_prio = value;
304         }
305 }
306 #endif /* CONFIG_CHELSIO_T4_DCB */
307
308 int cxgb4_dcb_enabled(const struct net_device *dev)
309 {
310 #ifdef CONFIG_CHELSIO_T4_DCB
311         struct port_info *pi = netdev_priv(dev);
312
313         if (!pi->dcb.enabled)
314                 return 0;
315
316         return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
317                 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
318 #else
319         return 0;
320 #endif
321 }
322 EXPORT_SYMBOL(cxgb4_dcb_enabled);
323
324 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
325 {
326         struct net_device *dev = adapter->port[port_id];
327
328         /* Skip changes from disabled ports. */
329         if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
330                 if (link_stat)
331                         netif_carrier_on(dev);
332                 else {
333 #ifdef CONFIG_CHELSIO_T4_DCB
334                         if (cxgb4_dcb_enabled(dev)) {
335                                 cxgb4_dcb_state_init(dev);
336                                 dcb_tx_queue_prio_enable(dev, false);
337                         }
338 #endif /* CONFIG_CHELSIO_T4_DCB */
339                         netif_carrier_off(dev);
340                 }
341
342                 link_report(dev);
343         }
344 }
345
346 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
347 {
348         static const char *mod_str[] = {
349                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
350         };
351
352         const struct net_device *dev = adap->port[port_id];
353         const struct port_info *pi = netdev_priv(dev);
354
355         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
356                 netdev_info(dev, "port module unplugged\n");
357         else if (pi->mod_type < ARRAY_SIZE(mod_str))
358                 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
359         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
360                 netdev_info(dev, "%s: unsupported port module inserted\n",
361                             dev->name);
362         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
363                 netdev_info(dev, "%s: unknown port module inserted\n",
364                             dev->name);
365         else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
366                 netdev_info(dev, "%s: transceiver module error\n", dev->name);
367         else
368                 netdev_info(dev, "%s: unknown module type %d inserted\n",
369                             dev->name, pi->mod_type);
370 }
371
372 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
373 module_param(dbfifo_int_thresh, int, 0644);
374 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
375
376 /*
377  * usecs to sleep while draining the dbfifo
378  */
379 static int dbfifo_drain_delay = 1000;
380 module_param(dbfifo_drain_delay, int, 0644);
381 MODULE_PARM_DESC(dbfifo_drain_delay,
382                  "usecs to sleep while draining the dbfifo");
383
384 static inline int cxgb4_set_addr_hash(struct port_info *pi)
385 {
386         struct adapter *adap = pi->adapter;
387         u64 vec = 0;
388         bool ucast = false;
389         struct hash_mac_addr *entry;
390
391         /* Calculate the hash vector for the updated list and program it */
392         list_for_each_entry(entry, &adap->mac_hlist, list) {
393                 ucast |= is_unicast_ether_addr(entry->addr);
394                 vec |= (1ULL << hash_mac_addr(entry->addr));
395         }
396         return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
397                                 vec, false);
398 }
399
400 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
401 {
402         struct port_info *pi = netdev_priv(netdev);
403         struct adapter *adap = pi->adapter;
404         int ret;
405         u64 mhash = 0;
406         u64 uhash = 0;
407         bool free = false;
408         bool ucast = is_unicast_ether_addr(mac_addr);
409         const u8 *maclist[1] = {mac_addr};
410         struct hash_mac_addr *new_entry;
411
412         ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
413                                 NULL, ucast ? &uhash : &mhash, false);
414         if (ret < 0)
415                 goto out;
416         /* if hash != 0, then add the addr to hash addr list
417          * so on the end we will calculate the hash for the
418          * list and program it
419          */
420         if (uhash || mhash) {
421                 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
422                 if (!new_entry)
423                         return -ENOMEM;
424                 ether_addr_copy(new_entry->addr, mac_addr);
425                 list_add_tail(&new_entry->list, &adap->mac_hlist);
426                 ret = cxgb4_set_addr_hash(pi);
427         }
428 out:
429         return ret < 0 ? ret : 0;
430 }
431
432 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
433 {
434         struct port_info *pi = netdev_priv(netdev);
435         struct adapter *adap = pi->adapter;
436         int ret;
437         const u8 *maclist[1] = {mac_addr};
438         struct hash_mac_addr *entry, *tmp;
439
440         /* If the MAC address to be removed is in the hash addr
441          * list, delete it from the list and update hash vector
442          */
443         list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
444                 if (ether_addr_equal(entry->addr, mac_addr)) {
445                         list_del(&entry->list);
446                         kfree(entry);
447                         return cxgb4_set_addr_hash(pi);
448                 }
449         }
450
451         ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
452         return ret < 0 ? -EINVAL : 0;
453 }
454
455 /*
456  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
457  * If @mtu is -1 it is left unchanged.
458  */
459 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
460 {
461         struct port_info *pi = netdev_priv(dev);
462         struct adapter *adapter = pi->adapter;
463
464         __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
465         __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
466
467         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
468                              (dev->flags & IFF_PROMISC) ? 1 : 0,
469                              (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
470                              sleep_ok);
471 }
472
473 /**
474  *      link_start - enable a port
475  *      @dev: the port to enable
476  *
477  *      Performs the MAC and PHY actions needed to enable a port.
478  */
479 static int link_start(struct net_device *dev)
480 {
481         int ret;
482         struct port_info *pi = netdev_priv(dev);
483         unsigned int mb = pi->adapter->pf;
484
485         /*
486          * We do not set address filters and promiscuity here, the stack does
487          * that step explicitly.
488          */
489         ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
490                             !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
491         if (ret == 0) {
492                 ret = t4_change_mac(pi->adapter, mb, pi->viid,
493                                     pi->xact_addr_filt, dev->dev_addr, true,
494                                     true);
495                 if (ret >= 0) {
496                         pi->xact_addr_filt = ret;
497                         ret = 0;
498                 }
499         }
500         if (ret == 0)
501                 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
502                                     &pi->link_cfg);
503         if (ret == 0) {
504                 local_bh_disable();
505                 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
506                                           true, CXGB4_DCB_ENABLED);
507                 local_bh_enable();
508         }
509
510         return ret;
511 }
512
513 #ifdef CONFIG_CHELSIO_T4_DCB
514 /* Handle a Data Center Bridging update message from the firmware. */
515 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
516 {
517         int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
518         struct net_device *dev = adap->port[adap->chan_map[port]];
519         int old_dcb_enabled = cxgb4_dcb_enabled(dev);
520         int new_dcb_enabled;
521
522         cxgb4_dcb_handle_fw_update(adap, pcmd);
523         new_dcb_enabled = cxgb4_dcb_enabled(dev);
524
525         /* If the DCB has become enabled or disabled on the port then we're
526          * going to need to set up/tear down DCB Priority parameters for the
527          * TX Queues associated with the port.
528          */
529         if (new_dcb_enabled != old_dcb_enabled)
530                 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
531 }
532 #endif /* CONFIG_CHELSIO_T4_DCB */
533
534 /* Clear a filter and release any of its resources that we own.  This also
535  * clears the filter's "pending" status.
536  */
537 static void clear_filter(struct adapter *adap, struct filter_entry *f)
538 {
539         /* If the new or old filter have loopback rewriteing rules then we'll
540          * need to free any existing Layer Two Table (L2T) entries of the old
541          * filter rule.  The firmware will handle freeing up any Source MAC
542          * Table (SMT) entries used for rewriting Source MAC Addresses in
543          * loopback rules.
544          */
545         if (f->l2t)
546                 cxgb4_l2t_release(f->l2t);
547
548         /* The zeroing of the filter rule below clears the filter valid,
549          * pending, locked flags, l2t pointer, etc. so it's all we need for
550          * this operation.
551          */
552         memset(f, 0, sizeof(*f));
553 }
554
555 /* Handle a filter write/deletion reply.
556  */
557 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
558 {
559         unsigned int idx = GET_TID(rpl);
560         unsigned int nidx = idx - adap->tids.ftid_base;
561         unsigned int ret;
562         struct filter_entry *f;
563
564         if (idx >= adap->tids.ftid_base && nidx <
565            (adap->tids.nftids + adap->tids.nsftids)) {
566                 idx = nidx;
567                 ret = TCB_COOKIE_G(rpl->cookie);
568                 f = &adap->tids.ftid_tab[idx];
569
570                 if (ret == FW_FILTER_WR_FLT_DELETED) {
571                         /* Clear the filter when we get confirmation from the
572                          * hardware that the filter has been deleted.
573                          */
574                         clear_filter(adap, f);
575                 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
576                         dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
577                                 idx);
578                         clear_filter(adap, f);
579                 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
580                         f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
581                         f->pending = 0;  /* asynchronous setup completed */
582                         f->valid = 1;
583                 } else {
584                         /* Something went wrong.  Issue a warning about the
585                          * problem and clear everything out.
586                          */
587                         dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
588                                 idx, ret);
589                         clear_filter(adap, f);
590                 }
591         }
592 }
593
594 /* Response queue handler for the FW event queue.
595  */
596 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
597                           const struct pkt_gl *gl)
598 {
599         u8 opcode = ((const struct rss_header *)rsp)->opcode;
600
601         rsp++;                                          /* skip RSS header */
602
603         /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
604          */
605         if (unlikely(opcode == CPL_FW4_MSG &&
606            ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
607                 rsp++;
608                 opcode = ((const struct rss_header *)rsp)->opcode;
609                 rsp++;
610                 if (opcode != CPL_SGE_EGR_UPDATE) {
611                         dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
612                                 , opcode);
613                         goto out;
614                 }
615         }
616
617         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
618                 const struct cpl_sge_egr_update *p = (void *)rsp;
619                 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
620                 struct sge_txq *txq;
621
622                 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
623                 txq->restarts++;
624                 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
625                         struct sge_eth_txq *eq;
626
627                         eq = container_of(txq, struct sge_eth_txq, q);
628                         netif_tx_wake_queue(eq->txq);
629                 } else {
630                         struct sge_ofld_txq *oq;
631
632                         oq = container_of(txq, struct sge_ofld_txq, q);
633                         tasklet_schedule(&oq->qresume_tsk);
634                 }
635         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
636                 const struct cpl_fw6_msg *p = (void *)rsp;
637
638 #ifdef CONFIG_CHELSIO_T4_DCB
639                 const struct fw_port_cmd *pcmd = (const void *)p->data;
640                 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
641                 unsigned int action =
642                         FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
643
644                 if (cmd == FW_PORT_CMD &&
645                     action == FW_PORT_ACTION_GET_PORT_INFO) {
646                         int port = FW_PORT_CMD_PORTID_G(
647                                         be32_to_cpu(pcmd->op_to_portid));
648                         struct net_device *dev =
649                                 q->adap->port[q->adap->chan_map[port]];
650                         int state_input = ((pcmd->u.info.dcbxdis_pkd &
651                                             FW_PORT_CMD_DCBXDIS_F)
652                                            ? CXGB4_DCB_INPUT_FW_DISABLED
653                                            : CXGB4_DCB_INPUT_FW_ENABLED);
654
655                         cxgb4_dcb_state_fsm(dev, state_input);
656                 }
657
658                 if (cmd == FW_PORT_CMD &&
659                     action == FW_PORT_ACTION_L2_DCB_CFG)
660                         dcb_rpl(q->adap, pcmd);
661                 else
662 #endif
663                         if (p->type == 0)
664                                 t4_handle_fw_rpl(q->adap, p->data);
665         } else if (opcode == CPL_L2T_WRITE_RPL) {
666                 const struct cpl_l2t_write_rpl *p = (void *)rsp;
667
668                 do_l2t_write_rpl(q->adap, p);
669         } else if (opcode == CPL_SET_TCB_RPL) {
670                 const struct cpl_set_tcb_rpl *p = (void *)rsp;
671
672                 filter_rpl(q->adap, p);
673         } else
674                 dev_err(q->adap->pdev_dev,
675                         "unexpected CPL %#x on FW event queue\n", opcode);
676 out:
677         return 0;
678 }
679
680 /* Flush the aggregated lro sessions */
681 static void uldrx_flush_handler(struct sge_rspq *q)
682 {
683         if (ulds[q->uld].lro_flush)
684                 ulds[q->uld].lro_flush(&q->lro_mgr);
685 }
686
687 /**
688  *      uldrx_handler - response queue handler for ULD queues
689  *      @q: the response queue that received the packet
690  *      @rsp: the response queue descriptor holding the offload message
691  *      @gl: the gather list of packet fragments
692  *
693  *      Deliver an ingress offload packet to a ULD.  All processing is done by
694  *      the ULD, we just maintain statistics.
695  */
696 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
697                          const struct pkt_gl *gl)
698 {
699         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
700         int ret;
701
702         /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
703          */
704         if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
705             ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
706                 rsp += 2;
707
708         if (q->flush_handler)
709                 ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
710                                                   rsp, gl, &q->lro_mgr,
711                                                   &q->napi);
712         else
713                 ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
714                                               rsp, gl);
715
716         if (ret) {
717                 rxq->stats.nomem++;
718                 return -1;
719         }
720
721         if (gl == NULL)
722                 rxq->stats.imm++;
723         else if (gl == CXGB4_MSG_AN)
724                 rxq->stats.an++;
725         else
726                 rxq->stats.pkts++;
727         return 0;
728 }
729
730 static void disable_msi(struct adapter *adapter)
731 {
732         if (adapter->flags & USING_MSIX) {
733                 pci_disable_msix(adapter->pdev);
734                 adapter->flags &= ~USING_MSIX;
735         } else if (adapter->flags & USING_MSI) {
736                 pci_disable_msi(adapter->pdev);
737                 adapter->flags &= ~USING_MSI;
738         }
739 }
740
741 /*
742  * Interrupt handler for non-data events used with MSI-X.
743  */
744 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
745 {
746         struct adapter *adap = cookie;
747         u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
748
749         if (v & PFSW_F) {
750                 adap->swintr = 1;
751                 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
752         }
753         if (adap->flags & MASTER_PF)
754                 t4_slow_intr_handler(adap);
755         return IRQ_HANDLED;
756 }
757
758 /*
759  * Name the MSI-X interrupts.
760  */
761 static void name_msix_vecs(struct adapter *adap)
762 {
763         int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
764
765         /* non-data interrupts */
766         snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
767
768         /* FW events */
769         snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
770                  adap->port[0]->name);
771
772         /* Ethernet queues */
773         for_each_port(adap, j) {
774                 struct net_device *d = adap->port[j];
775                 const struct port_info *pi = netdev_priv(d);
776
777                 for (i = 0; i < pi->nqsets; i++, msi_idx++)
778                         snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
779                                  d->name, i);
780         }
781
782         /* offload queues */
783         for_each_iscsirxq(&adap->sge, i)
784                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
785                          adap->port[0]->name, i);
786
787         for_each_iscsitrxq(&adap->sge, i)
788                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
789                          adap->port[0]->name, i);
790
791         for_each_rdmarxq(&adap->sge, i)
792                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
793                          adap->port[0]->name, i);
794
795         for_each_rdmaciq(&adap->sge, i)
796                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
797                          adap->port[0]->name, i);
798 }
799
800 static int request_msix_queue_irqs(struct adapter *adap)
801 {
802         struct sge *s = &adap->sge;
803         int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
804         int iscsitqidx = 0;
805         int msi_index = 2;
806
807         err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
808                           adap->msix_info[1].desc, &s->fw_evtq);
809         if (err)
810                 return err;
811
812         for_each_ethrxq(s, ethqidx) {
813                 err = request_irq(adap->msix_info[msi_index].vec,
814                                   t4_sge_intr_msix, 0,
815                                   adap->msix_info[msi_index].desc,
816                                   &s->ethrxq[ethqidx].rspq);
817                 if (err)
818                         goto unwind;
819                 msi_index++;
820         }
821         for_each_iscsirxq(s, iscsiqidx) {
822                 err = request_irq(adap->msix_info[msi_index].vec,
823                                   t4_sge_intr_msix, 0,
824                                   adap->msix_info[msi_index].desc,
825                                   &s->iscsirxq[iscsiqidx].rspq);
826                 if (err)
827                         goto unwind;
828                 msi_index++;
829         }
830         for_each_iscsitrxq(s, iscsitqidx) {
831                 err = request_irq(adap->msix_info[msi_index].vec,
832                                   t4_sge_intr_msix, 0,
833                                   adap->msix_info[msi_index].desc,
834                                   &s->iscsitrxq[iscsitqidx].rspq);
835                 if (err)
836                         goto unwind;
837                 msi_index++;
838         }
839         for_each_rdmarxq(s, rdmaqidx) {
840                 err = request_irq(adap->msix_info[msi_index].vec,
841                                   t4_sge_intr_msix, 0,
842                                   adap->msix_info[msi_index].desc,
843                                   &s->rdmarxq[rdmaqidx].rspq);
844                 if (err)
845                         goto unwind;
846                 msi_index++;
847         }
848         for_each_rdmaciq(s, rdmaciqqidx) {
849                 err = request_irq(adap->msix_info[msi_index].vec,
850                                   t4_sge_intr_msix, 0,
851                                   adap->msix_info[msi_index].desc,
852                                   &s->rdmaciq[rdmaciqqidx].rspq);
853                 if (err)
854                         goto unwind;
855                 msi_index++;
856         }
857         return 0;
858
859 unwind:
860         while (--rdmaciqqidx >= 0)
861                 free_irq(adap->msix_info[--msi_index].vec,
862                          &s->rdmaciq[rdmaciqqidx].rspq);
863         while (--rdmaqidx >= 0)
864                 free_irq(adap->msix_info[--msi_index].vec,
865                          &s->rdmarxq[rdmaqidx].rspq);
866         while (--iscsitqidx >= 0)
867                 free_irq(adap->msix_info[--msi_index].vec,
868                          &s->iscsitrxq[iscsitqidx].rspq);
869         while (--iscsiqidx >= 0)
870                 free_irq(adap->msix_info[--msi_index].vec,
871                          &s->iscsirxq[iscsiqidx].rspq);
872         while (--ethqidx >= 0)
873                 free_irq(adap->msix_info[--msi_index].vec,
874                          &s->ethrxq[ethqidx].rspq);
875         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
876         return err;
877 }
878
879 static void free_msix_queue_irqs(struct adapter *adap)
880 {
881         int i, msi_index = 2;
882         struct sge *s = &adap->sge;
883
884         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
885         for_each_ethrxq(s, i)
886                 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
887         for_each_iscsirxq(s, i)
888                 free_irq(adap->msix_info[msi_index++].vec,
889                          &s->iscsirxq[i].rspq);
890         for_each_iscsitrxq(s, i)
891                 free_irq(adap->msix_info[msi_index++].vec,
892                          &s->iscsitrxq[i].rspq);
893         for_each_rdmarxq(s, i)
894                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
895         for_each_rdmaciq(s, i)
896                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
897 }
898
899 /**
900  *      cxgb4_write_rss - write the RSS table for a given port
901  *      @pi: the port
902  *      @queues: array of queue indices for RSS
903  *
904  *      Sets up the portion of the HW RSS table for the port's VI to distribute
905  *      packets to the Rx queues in @queues.
906  *      Should never be called before setting up sge eth rx queues
907  */
908 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
909 {
910         u16 *rss;
911         int i, err;
912         struct adapter *adapter = pi->adapter;
913         const struct sge_eth_rxq *rxq;
914
915         rxq = &adapter->sge.ethrxq[pi->first_qset];
916         rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
917         if (!rss)
918                 return -ENOMEM;
919
920         /* map the queue indices to queue ids */
921         for (i = 0; i < pi->rss_size; i++, queues++)
922                 rss[i] = rxq[*queues].rspq.abs_id;
923
924         err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
925                                   pi->rss_size, rss, pi->rss_size);
926         /* If Tunnel All Lookup isn't specified in the global RSS
927          * Configuration, then we need to specify a default Ingress
928          * Queue for any ingress packets which aren't hashed.  We'll
929          * use our first ingress queue ...
930          */
931         if (!err)
932                 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
933                                        FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
934                                        FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
935                                        FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
936                                        FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
937                                        FW_RSS_VI_CONFIG_CMD_UDPEN_F,
938                                        rss[0]);
939         kfree(rss);
940         return err;
941 }
942
943 /**
944  *      setup_rss - configure RSS
945  *      @adap: the adapter
946  *
947  *      Sets up RSS for each port.
948  */
949 static int setup_rss(struct adapter *adap)
950 {
951         int i, j, err;
952
953         for_each_port(adap, i) {
954                 const struct port_info *pi = adap2pinfo(adap, i);
955
956                 /* Fill default values with equal distribution */
957                 for (j = 0; j < pi->rss_size; j++)
958                         pi->rss[j] = j % pi->nqsets;
959
960                 err = cxgb4_write_rss(pi, pi->rss);
961                 if (err)
962                         return err;
963         }
964         return 0;
965 }
966
967 /*
968  * Return the channel of the ingress queue with the given qid.
969  */
970 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
971 {
972         qid -= p->ingr_start;
973         return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
974 }
975
976 /*
977  * Wait until all NAPI handlers are descheduled.
978  */
979 static void quiesce_rx(struct adapter *adap)
980 {
981         int i;
982
983         for (i = 0; i < adap->sge.ingr_sz; i++) {
984                 struct sge_rspq *q = adap->sge.ingr_map[i];
985
986                 if (q && q->handler) {
987                         napi_disable(&q->napi);
988                         local_bh_disable();
989                         while (!cxgb_poll_lock_napi(q))
990                                 mdelay(1);
991                         local_bh_enable();
992                 }
993
994         }
995 }
996
997 /* Disable interrupt and napi handler */
998 static void disable_interrupts(struct adapter *adap)
999 {
1000         if (adap->flags & FULL_INIT_DONE) {
1001                 t4_intr_disable(adap);
1002                 if (adap->flags & USING_MSIX) {
1003                         free_msix_queue_irqs(adap);
1004                         free_irq(adap->msix_info[0].vec, adap);
1005                 } else {
1006                         free_irq(adap->pdev->irq, adap);
1007                 }
1008                 quiesce_rx(adap);
1009         }
1010 }
1011
1012 /*
1013  * Enable NAPI scheduling and interrupt generation for all Rx queues.
1014  */
1015 static void enable_rx(struct adapter *adap)
1016 {
1017         int i;
1018
1019         for (i = 0; i < adap->sge.ingr_sz; i++) {
1020                 struct sge_rspq *q = adap->sge.ingr_map[i];
1021
1022                 if (!q)
1023                         continue;
1024                 if (q->handler) {
1025                         cxgb_busy_poll_init_lock(q);
1026                         napi_enable(&q->napi);
1027                 }
1028                 /* 0-increment GTS to start the timer and enable interrupts */
1029                 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
1030                              SEINTARM_V(q->intr_params) |
1031                              INGRESSQID_V(q->cntxt_id));
1032         }
1033 }
1034
1035 static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
1036                            unsigned int nq, unsigned int per_chan, int msi_idx,
1037                            u16 *ids, bool lro)
1038 {
1039         int i, err;
1040
1041         for (i = 0; i < nq; i++, q++) {
1042                 if (msi_idx > 0)
1043                         msi_idx++;
1044                 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
1045                                        adap->port[i / per_chan],
1046                                        msi_idx, q->fl.size ? &q->fl : NULL,
1047                                        uldrx_handler,
1048                                        lro ? uldrx_flush_handler : NULL,
1049                                        0);
1050                 if (err)
1051                         return err;
1052                 memset(&q->stats, 0, sizeof(q->stats));
1053                 if (ids)
1054                         ids[i] = q->rspq.abs_id;
1055         }
1056         return 0;
1057 }
1058
1059 /**
1060  *      setup_sge_queues - configure SGE Tx/Rx/response queues
1061  *      @adap: the adapter
1062  *
1063  *      Determines how many sets of SGE queues to use and initializes them.
1064  *      We support multiple queue sets per port if we have MSI-X, otherwise
1065  *      just one queue set per port.
1066  */
1067 static int setup_sge_queues(struct adapter *adap)
1068 {
1069         int err, i, j;
1070         struct sge *s = &adap->sge;
1071
1072         bitmap_zero(s->starving_fl, s->egr_sz);
1073         bitmap_zero(s->txq_maperr, s->egr_sz);
1074
1075         if (adap->flags & USING_MSIX)
1076                 adap->msi_idx = 1;         /* vector 0 is for non-queue interrupts */
1077         else {
1078                 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1079                                        NULL, NULL, NULL, -1);
1080                 if (err)
1081                         return err;
1082                 adap->msi_idx = -((int)s->intrq.abs_id + 1);
1083         }
1084
1085         /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1086          * don't forget to update the following which need to be
1087          * synchronized to and changes here.
1088          *
1089          * 1. The calculations of MAX_INGQ in cxgb4.h.
1090          *
1091          * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1092          *    to accommodate any new/deleted Ingress Queues
1093          *    which need MSI-X Vectors.
1094          *
1095          * 3. Update sge_qinfo_show() to include information on the
1096          *    new/deleted queues.
1097          */
1098         err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1099                                adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
1100         if (err) {
1101 freeout:        t4_free_sge_resources(adap);
1102                 return err;
1103         }
1104
1105         for_each_port(adap, i) {
1106                 struct net_device *dev = adap->port[i];
1107                 struct port_info *pi = netdev_priv(dev);
1108                 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1109                 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1110
1111                 for (j = 0; j < pi->nqsets; j++, q++) {
1112                         if (adap->msi_idx > 0)
1113                                 adap->msi_idx++;
1114                         err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1115                                                adap->msi_idx, &q->fl,
1116                                                t4_ethrx_handler,
1117                                                NULL,
1118                                                t4_get_mps_bg_map(adap,
1119                                                                  pi->tx_chan));
1120                         if (err)
1121                                 goto freeout;
1122                         q->rspq.idx = j;
1123                         memset(&q->stats, 0, sizeof(q->stats));
1124                 }
1125                 for (j = 0; j < pi->nqsets; j++, t++) {
1126                         err = t4_sge_alloc_eth_txq(adap, t, dev,
1127                                         netdev_get_tx_queue(dev, j),
1128                                         s->fw_evtq.cntxt_id);
1129                         if (err)
1130                                 goto freeout;
1131                 }
1132         }
1133
1134         j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
1135         for_each_iscsirxq(s, i) {
1136                 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
1137                                             adap->port[i / j],
1138                                             s->fw_evtq.cntxt_id);
1139                 if (err)
1140                         goto freeout;
1141         }
1142
1143 #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1144         err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \
1145         if (err) \
1146                 goto freeout; \
1147         if (adap->msi_idx > 0) \
1148                 adap->msi_idx += nq; \
1149 } while (0)
1150
1151         ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
1152         ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
1153         ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
1154         j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
1155         ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
1156
1157 #undef ALLOC_OFLD_RXQS
1158
1159         for_each_port(adap, i) {
1160                 /*
1161                  * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1162                  * have RDMA queues, and that's the right value.
1163                  */
1164                 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1165                                             s->fw_evtq.cntxt_id,
1166                                             s->rdmarxq[i].rspq.cntxt_id);
1167                 if (err)
1168                         goto freeout;
1169         }
1170
1171         t4_write_reg(adap, is_t4(adap->params.chip) ?
1172                                 MPS_TRC_RSS_CONTROL_A :
1173                                 MPS_T5_TRC_RSS_CONTROL_A,
1174                      RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1175                      QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1176         return 0;
1177 }
1178
1179 /*
1180  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1181  * The allocated memory is cleared.
1182  */
1183 void *t4_alloc_mem(size_t size)
1184 {
1185         void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1186
1187         if (!p)
1188                 p = vzalloc(size);
1189         return p;
1190 }
1191
1192 /*
1193  * Free memory allocated through alloc_mem().
1194  */
1195 void t4_free_mem(void *addr)
1196 {
1197         kvfree(addr);
1198 }
1199
1200 /* Send a Work Request to write the filter at a specified index.  We construct
1201  * a Firmware Filter Work Request to have the work done and put the indicated
1202  * filter into "pending" mode which will prevent any further actions against
1203  * it till we get a reply from the firmware on the completion status of the
1204  * request.
1205  */
1206 static int set_filter_wr(struct adapter *adapter, int fidx)
1207 {
1208         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1209         struct sk_buff *skb;
1210         struct fw_filter_wr *fwr;
1211         unsigned int ftid;
1212
1213         skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1214         if (!skb)
1215                 return -ENOMEM;
1216
1217         /* If the new filter requires loopback Destination MAC and/or VLAN
1218          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1219          * the filter.
1220          */
1221         if (f->fs.newdmac || f->fs.newvlan) {
1222                 /* allocate L2T entry for new filter */
1223                 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1224                                                 f->fs.eport, f->fs.dmac);
1225                 if (f->l2t == NULL) {
1226                         kfree_skb(skb);
1227                         return -ENOMEM;
1228                 }
1229         }
1230
1231         ftid = adapter->tids.ftid_base + fidx;
1232
1233         fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1234         memset(fwr, 0, sizeof(*fwr));
1235
1236         /* It would be nice to put most of the following in t4_hw.c but most
1237          * of the work is translating the cxgbtool ch_filter_specification
1238          * into the Work Request and the definition of that structure is
1239          * currently in cxgbtool.h which isn't appropriate to pull into the
1240          * common code.  We may eventually try to come up with a more neutral
1241          * filter specification structure but for now it's easiest to simply
1242          * put this fairly direct code in line ...
1243          */
1244         fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1245         fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1246         fwr->tid_to_iq =
1247                 htonl(FW_FILTER_WR_TID_V(ftid) |
1248                       FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1249                       FW_FILTER_WR_NOREPLY_V(0) |
1250                       FW_FILTER_WR_IQ_V(f->fs.iq));
1251         fwr->del_filter_to_l2tix =
1252                 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1253                       FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1254                       FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1255                       FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1256                       FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1257                       FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1258                       FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1259                       FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1260                       FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1261                                              f->fs.newvlan == VLAN_REWRITE) |
1262                       FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1263                                             f->fs.newvlan == VLAN_REWRITE) |
1264                       FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1265                       FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1266                       FW_FILTER_WR_PRIO_V(f->fs.prio) |
1267                       FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1268         fwr->ethtype = htons(f->fs.val.ethtype);
1269         fwr->ethtypem = htons(f->fs.mask.ethtype);
1270         fwr->frag_to_ovlan_vldm =
1271                 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1272                  FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1273                  FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1274                  FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1275                  FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1276                  FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1277         fwr->smac_sel = 0;
1278         fwr->rx_chan_rx_rpl_iq =
1279                 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1280                       FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1281         fwr->maci_to_matchtypem =
1282                 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1283                       FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1284                       FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1285                       FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1286                       FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1287                       FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1288                       FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1289                       FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1290         fwr->ptcl = f->fs.val.proto;
1291         fwr->ptclm = f->fs.mask.proto;
1292         fwr->ttyp = f->fs.val.tos;
1293         fwr->ttypm = f->fs.mask.tos;
1294         fwr->ivlan = htons(f->fs.val.ivlan);
1295         fwr->ivlanm = htons(f->fs.mask.ivlan);
1296         fwr->ovlan = htons(f->fs.val.ovlan);
1297         fwr->ovlanm = htons(f->fs.mask.ovlan);
1298         memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1299         memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1300         memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1301         memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1302         fwr->lp = htons(f->fs.val.lport);
1303         fwr->lpm = htons(f->fs.mask.lport);
1304         fwr->fp = htons(f->fs.val.fport);
1305         fwr->fpm = htons(f->fs.mask.fport);
1306         if (f->fs.newsmac)
1307                 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1308
1309         /* Mark the filter as "pending" and ship off the Filter Work Request.
1310          * When we get the Work Request Reply we'll clear the pending status.
1311          */
1312         f->pending = 1;
1313         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1314         t4_ofld_send(adapter, skb);
1315         return 0;
1316 }
1317
1318 /* Delete the filter at a specified index.
1319  */
1320 static int del_filter_wr(struct adapter *adapter, int fidx)
1321 {
1322         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1323         struct sk_buff *skb;
1324         struct fw_filter_wr *fwr;
1325         unsigned int len, ftid;
1326
1327         len = sizeof(*fwr);
1328         ftid = adapter->tids.ftid_base + fidx;
1329
1330         skb = alloc_skb(len, GFP_KERNEL);
1331         if (!skb)
1332                 return -ENOMEM;
1333
1334         fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1335         t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1336
1337         /* Mark the filter as "pending" and ship off the Filter Work Request.
1338          * When we get the Work Request Reply we'll clear the pending status.
1339          */
1340         f->pending = 1;
1341         t4_mgmt_tx(adapter, skb);
1342         return 0;
1343 }
1344
1345 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1346                              void *accel_priv, select_queue_fallback_t fallback)
1347 {
1348         int txq;
1349
1350 #ifdef CONFIG_CHELSIO_T4_DCB
1351         /* If a Data Center Bridging has been successfully negotiated on this
1352          * link then we'll use the skb's priority to map it to a TX Queue.
1353          * The skb's priority is determined via the VLAN Tag Priority Code
1354          * Point field.
1355          */
1356         if (cxgb4_dcb_enabled(dev)) {
1357                 u16 vlan_tci;
1358                 int err;
1359
1360                 err = vlan_get_tag(skb, &vlan_tci);
1361                 if (unlikely(err)) {
1362                         if (net_ratelimit())
1363                                 netdev_warn(dev,
1364                                             "TX Packet without VLAN Tag on DCB Link\n");
1365                         txq = 0;
1366                 } else {
1367                         txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1368 #ifdef CONFIG_CHELSIO_T4_FCOE
1369                         if (skb->protocol == htons(ETH_P_FCOE))
1370                                 txq = skb->priority & 0x7;
1371 #endif /* CONFIG_CHELSIO_T4_FCOE */
1372                 }
1373                 return txq;
1374         }
1375 #endif /* CONFIG_CHELSIO_T4_DCB */
1376
1377         if (select_queue) {
1378                 txq = (skb_rx_queue_recorded(skb)
1379                         ? skb_get_rx_queue(skb)
1380                         : smp_processor_id());
1381
1382                 while (unlikely(txq >= dev->real_num_tx_queues))
1383                         txq -= dev->real_num_tx_queues;
1384
1385                 return txq;
1386         }
1387
1388         return fallback(dev, skb) % dev->real_num_tx_queues;
1389 }
1390
1391 static int closest_timer(const struct sge *s, int time)
1392 {
1393         int i, delta, match = 0, min_delta = INT_MAX;
1394
1395         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1396                 delta = time - s->timer_val[i];
1397                 if (delta < 0)
1398                         delta = -delta;
1399                 if (delta < min_delta) {
1400                         min_delta = delta;
1401                         match = i;
1402                 }
1403         }
1404         return match;
1405 }
1406
1407 static int closest_thres(const struct sge *s, int thres)
1408 {
1409         int i, delta, match = 0, min_delta = INT_MAX;
1410
1411         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1412                 delta = thres - s->counter_val[i];
1413                 if (delta < 0)
1414                         delta = -delta;
1415                 if (delta < min_delta) {
1416                         min_delta = delta;
1417                         match = i;
1418                 }
1419         }
1420         return match;
1421 }
1422
1423 /**
1424  *      cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1425  *      @q: the Rx queue
1426  *      @us: the hold-off time in us, or 0 to disable timer
1427  *      @cnt: the hold-off packet count, or 0 to disable counter
1428  *
1429  *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1430  *      one of the two needs to be enabled for the queue to generate interrupts.
1431  */
1432 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1433                                unsigned int us, unsigned int cnt)
1434 {
1435         struct adapter *adap = q->adap;
1436
1437         if ((us | cnt) == 0)
1438                 cnt = 1;
1439
1440         if (cnt) {
1441                 int err;
1442                 u32 v, new_idx;
1443
1444                 new_idx = closest_thres(&adap->sge, cnt);
1445                 if (q->desc && q->pktcnt_idx != new_idx) {
1446                         /* the queue has already been created, update it */
1447                         v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1448                             FW_PARAMS_PARAM_X_V(
1449                                         FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1450                             FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1451                         err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1452                                             &v, &new_idx);
1453                         if (err)
1454                                 return err;
1455                 }
1456                 q->pktcnt_idx = new_idx;
1457         }
1458
1459         us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1460         q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1461         return 0;
1462 }
1463
1464 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1465 {
1466         const struct port_info *pi = netdev_priv(dev);
1467         netdev_features_t changed = dev->features ^ features;
1468         int err;
1469
1470         if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1471                 return 0;
1472
1473         err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1474                             -1, -1, -1,
1475                             !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1476         if (unlikely(err))
1477                 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1478         return err;
1479 }
1480
1481 static int setup_debugfs(struct adapter *adap)
1482 {
1483         if (IS_ERR_OR_NULL(adap->debugfs_root))
1484                 return -1;
1485
1486 #ifdef CONFIG_DEBUG_FS
1487         t4_setup_debugfs(adap);
1488 #endif
1489         return 0;
1490 }
1491
1492 /*
1493  * upper-layer driver support
1494  */
1495
1496 /*
1497  * Allocate an active-open TID and set it to the supplied value.
1498  */
1499 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1500 {
1501         int atid = -1;
1502
1503         spin_lock_bh(&t->atid_lock);
1504         if (t->afree) {
1505                 union aopen_entry *p = t->afree;
1506
1507                 atid = (p - t->atid_tab) + t->atid_base;
1508                 t->afree = p->next;
1509                 p->data = data;
1510                 t->atids_in_use++;
1511         }
1512         spin_unlock_bh(&t->atid_lock);
1513         return atid;
1514 }
1515 EXPORT_SYMBOL(cxgb4_alloc_atid);
1516
1517 /*
1518  * Release an active-open TID.
1519  */
1520 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1521 {
1522         union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1523
1524         spin_lock_bh(&t->atid_lock);
1525         p->next = t->afree;
1526         t->afree = p;
1527         t->atids_in_use--;
1528         spin_unlock_bh(&t->atid_lock);
1529 }
1530 EXPORT_SYMBOL(cxgb4_free_atid);
1531
1532 /*
1533  * Allocate a server TID and set it to the supplied value.
1534  */
1535 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1536 {
1537         int stid;
1538
1539         spin_lock_bh(&t->stid_lock);
1540         if (family == PF_INET) {
1541                 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1542                 if (stid < t->nstids)
1543                         __set_bit(stid, t->stid_bmap);
1544                 else
1545                         stid = -1;
1546         } else {
1547                 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1548                 if (stid < 0)
1549                         stid = -1;
1550         }
1551         if (stid >= 0) {
1552                 t->stid_tab[stid].data = data;
1553                 stid += t->stid_base;
1554                 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1555                  * This is equivalent to 4 TIDs. With CLIP enabled it
1556                  * needs 2 TIDs.
1557                  */
1558                 if (family == PF_INET)
1559                         t->stids_in_use++;
1560                 else
1561                         t->stids_in_use += 2;
1562         }
1563         spin_unlock_bh(&t->stid_lock);
1564         return stid;
1565 }
1566 EXPORT_SYMBOL(cxgb4_alloc_stid);
1567
1568 /* Allocate a server filter TID and set it to the supplied value.
1569  */
1570 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1571 {
1572         int stid;
1573
1574         spin_lock_bh(&t->stid_lock);
1575         if (family == PF_INET) {
1576                 stid = find_next_zero_bit(t->stid_bmap,
1577                                 t->nstids + t->nsftids, t->nstids);
1578                 if (stid < (t->nstids + t->nsftids))
1579                         __set_bit(stid, t->stid_bmap);
1580                 else
1581                         stid = -1;
1582         } else {
1583                 stid = -1;
1584         }
1585         if (stid >= 0) {
1586                 t->stid_tab[stid].data = data;
1587                 stid -= t->nstids;
1588                 stid += t->sftid_base;
1589                 t->sftids_in_use++;
1590         }
1591         spin_unlock_bh(&t->stid_lock);
1592         return stid;
1593 }
1594 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1595
1596 /* Release a server TID.
1597  */
1598 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1599 {
1600         /* Is it a server filter TID? */
1601         if (t->nsftids && (stid >= t->sftid_base)) {
1602                 stid -= t->sftid_base;
1603                 stid += t->nstids;
1604         } else {
1605                 stid -= t->stid_base;
1606         }
1607
1608         spin_lock_bh(&t->stid_lock);
1609         if (family == PF_INET)
1610                 __clear_bit(stid, t->stid_bmap);
1611         else
1612                 bitmap_release_region(t->stid_bmap, stid, 1);
1613         t->stid_tab[stid].data = NULL;
1614         if (stid < t->nstids) {
1615                 if (family == PF_INET)
1616                         t->stids_in_use--;
1617                 else
1618                         t->stids_in_use -= 2;
1619         } else {
1620                 t->sftids_in_use--;
1621         }
1622         spin_unlock_bh(&t->stid_lock);
1623 }
1624 EXPORT_SYMBOL(cxgb4_free_stid);
1625
1626 /*
1627  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
1628  */
1629 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1630                            unsigned int tid)
1631 {
1632         struct cpl_tid_release *req;
1633
1634         set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1635         req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1636         INIT_TP_WR(req, tid);
1637         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1638 }
1639
1640 /*
1641  * Queue a TID release request and if necessary schedule a work queue to
1642  * process it.
1643  */
1644 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1645                                     unsigned int tid)
1646 {
1647         void **p = &t->tid_tab[tid];
1648         struct adapter *adap = container_of(t, struct adapter, tids);
1649
1650         spin_lock_bh(&adap->tid_release_lock);
1651         *p = adap->tid_release_head;
1652         /* Low 2 bits encode the Tx channel number */
1653         adap->tid_release_head = (void **)((uintptr_t)p | chan);
1654         if (!adap->tid_release_task_busy) {
1655                 adap->tid_release_task_busy = true;
1656                 queue_work(adap->workq, &adap->tid_release_task);
1657         }
1658         spin_unlock_bh(&adap->tid_release_lock);
1659 }
1660
1661 /*
1662  * Process the list of pending TID release requests.
1663  */
1664 static void process_tid_release_list(struct work_struct *work)
1665 {
1666         struct sk_buff *skb;
1667         struct adapter *adap;
1668
1669         adap = container_of(work, struct adapter, tid_release_task);
1670
1671         spin_lock_bh(&adap->tid_release_lock);
1672         while (adap->tid_release_head) {
1673                 void **p = adap->tid_release_head;
1674                 unsigned int chan = (uintptr_t)p & 3;
1675                 p = (void *)p - chan;
1676
1677                 adap->tid_release_head = *p;
1678                 *p = NULL;
1679                 spin_unlock_bh(&adap->tid_release_lock);
1680
1681                 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1682                                          GFP_KERNEL)))
1683                         schedule_timeout_uninterruptible(1);
1684
1685                 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1686                 t4_ofld_send(adap, skb);
1687                 spin_lock_bh(&adap->tid_release_lock);
1688         }
1689         adap->tid_release_task_busy = false;
1690         spin_unlock_bh(&adap->tid_release_lock);
1691 }
1692
1693 /*
1694  * Release a TID and inform HW.  If we are unable to allocate the release
1695  * message we defer to a work queue.
1696  */
1697 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1698 {
1699         struct sk_buff *skb;
1700         struct adapter *adap = container_of(t, struct adapter, tids);
1701
1702         WARN_ON(tid >= t->ntids);
1703
1704         if (t->tid_tab[tid]) {
1705                 t->tid_tab[tid] = NULL;
1706                 if (t->hash_base && (tid >= t->hash_base))
1707                         atomic_dec(&t->hash_tids_in_use);
1708                 else
1709                         atomic_dec(&t->tids_in_use);
1710         }
1711
1712         skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1713         if (likely(skb)) {
1714                 mk_tid_release(skb, chan, tid);
1715                 t4_ofld_send(adap, skb);
1716         } else
1717                 cxgb4_queue_tid_release(t, chan, tid);
1718 }
1719 EXPORT_SYMBOL(cxgb4_remove_tid);
1720
1721 /*
1722  * Allocate and initialize the TID tables.  Returns 0 on success.
1723  */
1724 static int tid_init(struct tid_info *t)
1725 {
1726         size_t size;
1727         unsigned int stid_bmap_size;
1728         unsigned int natids = t->natids;
1729         struct adapter *adap = container_of(t, struct adapter, tids);
1730
1731         stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1732         size = t->ntids * sizeof(*t->tid_tab) +
1733                natids * sizeof(*t->atid_tab) +
1734                t->nstids * sizeof(*t->stid_tab) +
1735                t->nsftids * sizeof(*t->stid_tab) +
1736                stid_bmap_size * sizeof(long) +
1737                t->nftids * sizeof(*t->ftid_tab) +
1738                t->nsftids * sizeof(*t->ftid_tab);
1739
1740         t->tid_tab = t4_alloc_mem(size);
1741         if (!t->tid_tab)
1742                 return -ENOMEM;
1743
1744         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1745         t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1746         t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1747         t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1748         spin_lock_init(&t->stid_lock);
1749         spin_lock_init(&t->atid_lock);
1750
1751         t->stids_in_use = 0;
1752         t->sftids_in_use = 0;
1753         t->afree = NULL;
1754         t->atids_in_use = 0;
1755         atomic_set(&t->tids_in_use, 0);
1756         atomic_set(&t->hash_tids_in_use, 0);
1757
1758         /* Setup the free list for atid_tab and clear the stid bitmap. */
1759         if (natids) {
1760                 while (--natids)
1761                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1762                 t->afree = t->atid_tab;
1763         }
1764         bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1765         /* Reserve stid 0 for T4/T5 adapters */
1766         if (!t->stid_base &&
1767             (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
1768                 __set_bit(0, t->stid_bmap);
1769
1770         return 0;
1771 }
1772
1773 /**
1774  *      cxgb4_create_server - create an IP server
1775  *      @dev: the device
1776  *      @stid: the server TID
1777  *      @sip: local IP address to bind server to
1778  *      @sport: the server's TCP port
1779  *      @queue: queue to direct messages from this server to
1780  *
1781  *      Create an IP server for the given port and address.
1782  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1783  */
1784 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1785                         __be32 sip, __be16 sport, __be16 vlan,
1786                         unsigned int queue)
1787 {
1788         unsigned int chan;
1789         struct sk_buff *skb;
1790         struct adapter *adap;
1791         struct cpl_pass_open_req *req;
1792         int ret;
1793
1794         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1795         if (!skb)
1796                 return -ENOMEM;
1797
1798         adap = netdev2adap(dev);
1799         req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1800         INIT_TP_WR(req, 0);
1801         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1802         req->local_port = sport;
1803         req->peer_port = htons(0);
1804         req->local_ip = sip;
1805         req->peer_ip = htonl(0);
1806         chan = rxq_to_chan(&adap->sge, queue);
1807         req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1808         req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1809                                 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1810         ret = t4_mgmt_tx(adap, skb);
1811         return net_xmit_eval(ret);
1812 }
1813 EXPORT_SYMBOL(cxgb4_create_server);
1814
1815 /*      cxgb4_create_server6 - create an IPv6 server
1816  *      @dev: the device
1817  *      @stid: the server TID
1818  *      @sip: local IPv6 address to bind server to
1819  *      @sport: the server's TCP port
1820  *      @queue: queue to direct messages from this server to
1821  *
1822  *      Create an IPv6 server for the given port and address.
1823  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1824  */
1825 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1826                          const struct in6_addr *sip, __be16 sport,
1827                          unsigned int queue)
1828 {
1829         unsigned int chan;
1830         struct sk_buff *skb;
1831         struct adapter *adap;
1832         struct cpl_pass_open_req6 *req;
1833         int ret;
1834
1835         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1836         if (!skb)
1837                 return -ENOMEM;
1838
1839         adap = netdev2adap(dev);
1840         req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1841         INIT_TP_WR(req, 0);
1842         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1843         req->local_port = sport;
1844         req->peer_port = htons(0);
1845         req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1846         req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1847         req->peer_ip_hi = cpu_to_be64(0);
1848         req->peer_ip_lo = cpu_to_be64(0);
1849         chan = rxq_to_chan(&adap->sge, queue);
1850         req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1851         req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1852                                 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1853         ret = t4_mgmt_tx(adap, skb);
1854         return net_xmit_eval(ret);
1855 }
1856 EXPORT_SYMBOL(cxgb4_create_server6);
1857
1858 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1859                         unsigned int queue, bool ipv6)
1860 {
1861         struct sk_buff *skb;
1862         struct adapter *adap;
1863         struct cpl_close_listsvr_req *req;
1864         int ret;
1865
1866         adap = netdev2adap(dev);
1867
1868         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1869         if (!skb)
1870                 return -ENOMEM;
1871
1872         req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1873         INIT_TP_WR(req, 0);
1874         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1875         req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1876                                 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1877         ret = t4_mgmt_tx(adap, skb);
1878         return net_xmit_eval(ret);
1879 }
1880 EXPORT_SYMBOL(cxgb4_remove_server);
1881
1882 /**
1883  *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1884  *      @mtus: the HW MTU table
1885  *      @mtu: the target MTU
1886  *      @idx: index of selected entry in the MTU table
1887  *
1888  *      Returns the index and the value in the HW MTU table that is closest to
1889  *      but does not exceed @mtu, unless @mtu is smaller than any value in the
1890  *      table, in which case that smallest available value is selected.
1891  */
1892 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1893                             unsigned int *idx)
1894 {
1895         unsigned int i = 0;
1896
1897         while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1898                 ++i;
1899         if (idx)
1900                 *idx = i;
1901         return mtus[i];
1902 }
1903 EXPORT_SYMBOL(cxgb4_best_mtu);
1904
1905 /**
1906  *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1907  *     @mtus: the HW MTU table
1908  *     @header_size: Header Size
1909  *     @data_size_max: maximum Data Segment Size
1910  *     @data_size_align: desired Data Segment Size Alignment (2^N)
1911  *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1912  *
1913  *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
1914  *     MTU Table based solely on a Maximum MTU parameter, we break that
1915  *     parameter up into a Header Size and Maximum Data Segment Size, and
1916  *     provide a desired Data Segment Size Alignment.  If we find an MTU in
1917  *     the Hardware MTU Table which will result in a Data Segment Size with
1918  *     the requested alignment _and_ that MTU isn't "too far" from the
1919  *     closest MTU, then we'll return that rather than the closest MTU.
1920  */
1921 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1922                                     unsigned short header_size,
1923                                     unsigned short data_size_max,
1924                                     unsigned short data_size_align,
1925                                     unsigned int *mtu_idxp)
1926 {
1927         unsigned short max_mtu = header_size + data_size_max;
1928         unsigned short data_size_align_mask = data_size_align - 1;
1929         int mtu_idx, aligned_mtu_idx;
1930
1931         /* Scan the MTU Table till we find an MTU which is larger than our
1932          * Maximum MTU or we reach the end of the table.  Along the way,
1933          * record the last MTU found, if any, which will result in a Data
1934          * Segment Length matching the requested alignment.
1935          */
1936         for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1937                 unsigned short data_size = mtus[mtu_idx] - header_size;
1938
1939                 /* If this MTU minus the Header Size would result in a
1940                  * Data Segment Size of the desired alignment, remember it.
1941                  */
1942                 if ((data_size & data_size_align_mask) == 0)
1943                         aligned_mtu_idx = mtu_idx;
1944
1945                 /* If we're not at the end of the Hardware MTU Table and the
1946                  * next element is larger than our Maximum MTU, drop out of
1947                  * the loop.
1948                  */
1949                 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1950                         break;
1951         }
1952
1953         /* If we fell out of the loop because we ran to the end of the table,
1954          * then we just have to use the last [largest] entry.
1955          */
1956         if (mtu_idx == NMTUS)
1957                 mtu_idx--;
1958
1959         /* If we found an MTU which resulted in the requested Data Segment
1960          * Length alignment and that's "not far" from the largest MTU which is
1961          * less than or equal to the maximum MTU, then use that.
1962          */
1963         if (aligned_mtu_idx >= 0 &&
1964             mtu_idx - aligned_mtu_idx <= 1)
1965                 mtu_idx = aligned_mtu_idx;
1966
1967         /* If the caller has passed in an MTU Index pointer, pass the
1968          * MTU Index back.  Return the MTU value.
1969          */
1970         if (mtu_idxp)
1971                 *mtu_idxp = mtu_idx;
1972         return mtus[mtu_idx];
1973 }
1974 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1975
1976 /**
1977  *      cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1978  *      @chip: chip type
1979  *      @viid: VI id of the given port
1980  *
1981  *      Return the SMT index for this VI.
1982  */
1983 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1984 {
1985         /* In T4/T5, SMT contains 256 SMAC entries organized in
1986          * 128 rows of 2 entries each.
1987          * In T6, SMT contains 256 SMAC entries in 256 rows.
1988          * TODO: The below code needs to be updated when we add support
1989          * for 256 VFs.
1990          */
1991         if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1992                 return ((viid & 0x7f) << 1);
1993         else
1994                 return (viid & 0x7f);
1995 }
1996 EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1997
1998 /**
1999  *      cxgb4_port_chan - get the HW channel of a port
2000  *      @dev: the net device for the port
2001  *
2002  *      Return the HW Tx channel of the given port.
2003  */
2004 unsigned int cxgb4_port_chan(const struct net_device *dev)
2005 {
2006         return netdev2pinfo(dev)->tx_chan;
2007 }
2008 EXPORT_SYMBOL(cxgb4_port_chan);
2009
2010 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2011 {
2012         struct adapter *adap = netdev2adap(dev);
2013         u32 v1, v2, lp_count, hp_count;
2014
2015         v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2016         v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2017         if (is_t4(adap->params.chip)) {
2018                 lp_count = LP_COUNT_G(v1);
2019                 hp_count = HP_COUNT_G(v1);
2020         } else {
2021                 lp_count = LP_COUNT_T5_G(v1);
2022                 hp_count = HP_COUNT_T5_G(v2);
2023         }
2024         return lpfifo ? lp_count : hp_count;
2025 }
2026 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2027
2028 /**
2029  *      cxgb4_port_viid - get the VI id of a port
2030  *      @dev: the net device for the port
2031  *
2032  *      Return the VI id of the given port.
2033  */
2034 unsigned int cxgb4_port_viid(const struct net_device *dev)
2035 {
2036         return netdev2pinfo(dev)->viid;
2037 }
2038 EXPORT_SYMBOL(cxgb4_port_viid);
2039
2040 /**
2041  *      cxgb4_port_idx - get the index of a port
2042  *      @dev: the net device for the port
2043  *
2044  *      Return the index of the given port.
2045  */
2046 unsigned int cxgb4_port_idx(const struct net_device *dev)
2047 {
2048         return netdev2pinfo(dev)->port_id;
2049 }
2050 EXPORT_SYMBOL(cxgb4_port_idx);
2051
2052 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2053                          struct tp_tcp_stats *v6)
2054 {
2055         struct adapter *adap = pci_get_drvdata(pdev);
2056
2057         spin_lock(&adap->stats_lock);
2058         t4_tp_get_tcp_stats(adap, v4, v6);
2059         spin_unlock(&adap->stats_lock);
2060 }
2061 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2062
2063 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2064                       const unsigned int *pgsz_order)
2065 {
2066         struct adapter *adap = netdev2adap(dev);
2067
2068         t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2069         t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2070                      HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2071                      HPZ3_V(pgsz_order[3]));
2072 }
2073 EXPORT_SYMBOL(cxgb4_iscsi_init);
2074
2075 int cxgb4_flush_eq_cache(struct net_device *dev)
2076 {
2077         struct adapter *adap = netdev2adap(dev);
2078
2079         return t4_sge_ctxt_flush(adap, adap->mbox);
2080 }
2081 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2082
2083 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2084 {
2085         u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2086         __be64 indices;
2087         int ret;
2088
2089         spin_lock(&adap->win0_lock);
2090         ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2091                            sizeof(indices), (__be32 *)&indices,
2092                            T4_MEMORY_READ);
2093         spin_unlock(&adap->win0_lock);
2094         if (!ret) {
2095                 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2096                 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2097         }
2098         return ret;
2099 }
2100
2101 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2102                         u16 size)
2103 {
2104         struct adapter *adap = netdev2adap(dev);
2105         u16 hw_pidx, hw_cidx;
2106         int ret;
2107
2108         ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2109         if (ret)
2110                 goto out;
2111
2112         if (pidx != hw_pidx) {
2113                 u16 delta;
2114                 u32 val;
2115
2116                 if (pidx >= hw_pidx)
2117                         delta = pidx - hw_pidx;
2118                 else
2119                         delta = size - hw_pidx + pidx;
2120
2121                 if (is_t4(adap->params.chip))
2122                         val = PIDX_V(delta);
2123                 else
2124                         val = PIDX_T5_V(delta);
2125                 wmb();
2126                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2127                              QID_V(qid) | val);
2128         }
2129 out:
2130         return ret;
2131 }
2132 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2133
2134 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2135 {
2136         struct adapter *adap;
2137         u32 offset, memtype, memaddr;
2138         u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2139         u32 edc0_end, edc1_end, mc0_end, mc1_end;
2140         int ret;
2141
2142         adap = netdev2adap(dev);
2143
2144         offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2145
2146         /* Figure out where the offset lands in the Memory Type/Address scheme.
2147          * This code assumes that the memory is laid out starting at offset 0
2148          * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2149          * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
2150          * MC0, and some have both MC0 and MC1.
2151          */
2152         size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2153         edc0_size = EDRAM0_SIZE_G(size) << 20;
2154         size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2155         edc1_size = EDRAM1_SIZE_G(size) << 20;
2156         size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2157         mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2158
2159         edc0_end = edc0_size;
2160         edc1_end = edc0_end + edc1_size;
2161         mc0_end = edc1_end + mc0_size;
2162
2163         if (offset < edc0_end) {
2164                 memtype = MEM_EDC0;
2165                 memaddr = offset;
2166         } else if (offset < edc1_end) {
2167                 memtype = MEM_EDC1;
2168                 memaddr = offset - edc0_end;
2169         } else {
2170                 if (offset < mc0_end) {
2171                         memtype = MEM_MC0;
2172                         memaddr = offset - edc1_end;
2173                 } else if (is_t5(adap->params.chip)) {
2174                         size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2175                         mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2176                         mc1_end = mc0_end + mc1_size;
2177                         if (offset < mc1_end) {
2178                                 memtype = MEM_MC1;
2179                                 memaddr = offset - mc0_end;
2180                         } else {
2181                                 /* offset beyond the end of any memory */
2182                                 goto err;
2183                         }
2184                 } else {
2185                         /* T4/T6 only has a single memory channel */
2186                         goto err;
2187                 }
2188         }
2189
2190         spin_lock(&adap->win0_lock);
2191         ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2192         spin_unlock(&adap->win0_lock);
2193         return ret;
2194
2195 err:
2196         dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2197                 stag, offset);
2198         return -EINVAL;
2199 }
2200 EXPORT_SYMBOL(cxgb4_read_tpte);
2201
2202 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2203 {
2204         u32 hi, lo;
2205         struct adapter *adap;
2206
2207         adap = netdev2adap(dev);
2208         lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2209         hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2210
2211         return ((u64)hi << 32) | (u64)lo;
2212 }
2213 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2214
2215 int cxgb4_bar2_sge_qregs(struct net_device *dev,
2216                          unsigned int qid,
2217                          enum cxgb4_bar2_qtype qtype,
2218                          int user,
2219                          u64 *pbar2_qoffset,
2220                          unsigned int *pbar2_qid)
2221 {
2222         return t4_bar2_sge_qregs(netdev2adap(dev),
2223                                  qid,
2224                                  (qtype == CXGB4_BAR2_QTYPE_EGRESS
2225                                   ? T4_BAR2_QTYPE_EGRESS
2226                                   : T4_BAR2_QTYPE_INGRESS),
2227                                  user,
2228                                  pbar2_qoffset,
2229                                  pbar2_qid);
2230 }
2231 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2232
2233 static struct pci_driver cxgb4_driver;
2234
2235 static void check_neigh_update(struct neighbour *neigh)
2236 {
2237         const struct device *parent;
2238         const struct net_device *netdev = neigh->dev;
2239
2240         if (netdev->priv_flags & IFF_802_1Q_VLAN)
2241                 netdev = vlan_dev_real_dev(netdev);
2242         parent = netdev->dev.parent;
2243         if (parent && parent->driver == &cxgb4_driver.driver)
2244                 t4_l2t_update(dev_get_drvdata(parent), neigh);
2245 }
2246
2247 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2248                        void *data)
2249 {
2250         switch (event) {
2251         case NETEVENT_NEIGH_UPDATE:
2252                 check_neigh_update(data);
2253                 break;
2254         case NETEVENT_REDIRECT:
2255         default:
2256                 break;
2257         }
2258         return 0;
2259 }
2260
2261 static bool netevent_registered;
2262 static struct notifier_block cxgb4_netevent_nb = {
2263         .notifier_call = netevent_cb
2264 };
2265
2266 static void drain_db_fifo(struct adapter *adap, int usecs)
2267 {
2268         u32 v1, v2, lp_count, hp_count;
2269
2270         do {
2271                 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2272                 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2273                 if (is_t4(adap->params.chip)) {
2274                         lp_count = LP_COUNT_G(v1);
2275                         hp_count = HP_COUNT_G(v1);
2276                 } else {
2277                         lp_count = LP_COUNT_T5_G(v1);
2278                         hp_count = HP_COUNT_T5_G(v2);
2279                 }
2280
2281                 if (lp_count == 0 && hp_count == 0)
2282                         break;
2283                 set_current_state(TASK_UNINTERRUPTIBLE);
2284                 schedule_timeout(usecs_to_jiffies(usecs));
2285         } while (1);
2286 }
2287
2288 static void disable_txq_db(struct sge_txq *q)
2289 {
2290         unsigned long flags;
2291
2292         spin_lock_irqsave(&q->db_lock, flags);
2293         q->db_disabled = 1;
2294         spin_unlock_irqrestore(&q->db_lock, flags);
2295 }
2296
2297 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2298 {
2299         spin_lock_irq(&q->db_lock);
2300         if (q->db_pidx_inc) {
2301                 /* Make sure that all writes to the TX descriptors
2302                  * are committed before we tell HW about them.
2303                  */
2304                 wmb();
2305                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2306                              QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2307                 q->db_pidx_inc = 0;
2308         }
2309         q->db_disabled = 0;
2310         spin_unlock_irq(&q->db_lock);
2311 }
2312
2313 static void disable_dbs(struct adapter *adap)
2314 {
2315         int i;
2316
2317         for_each_ethrxq(&adap->sge, i)
2318                 disable_txq_db(&adap->sge.ethtxq[i].q);
2319         for_each_iscsirxq(&adap->sge, i)
2320                 disable_txq_db(&adap->sge.ofldtxq[i].q);
2321         for_each_port(adap, i)
2322                 disable_txq_db(&adap->sge.ctrlq[i].q);
2323 }
2324
2325 static void enable_dbs(struct adapter *adap)
2326 {
2327         int i;
2328
2329         for_each_ethrxq(&adap->sge, i)
2330                 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2331         for_each_iscsirxq(&adap->sge, i)
2332                 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
2333         for_each_port(adap, i)
2334                 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2335 }
2336
2337 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2338 {
2339         if (adap->uld_handle[CXGB4_ULD_RDMA])
2340                 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2341                                 cmd);
2342 }
2343
2344 static void process_db_full(struct work_struct *work)
2345 {
2346         struct adapter *adap;
2347
2348         adap = container_of(work, struct adapter, db_full_task);
2349
2350         drain_db_fifo(adap, dbfifo_drain_delay);
2351         enable_dbs(adap);
2352         notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2353         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2354                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2355                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2356                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2357         else
2358                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2359                                  DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2360 }
2361
2362 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2363 {
2364         u16 hw_pidx, hw_cidx;
2365         int ret;
2366
2367         spin_lock_irq(&q->db_lock);
2368         ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2369         if (ret)
2370                 goto out;
2371         if (q->db_pidx != hw_pidx) {
2372                 u16 delta;
2373                 u32 val;
2374
2375                 if (q->db_pidx >= hw_pidx)
2376                         delta = q->db_pidx - hw_pidx;
2377                 else
2378                         delta = q->size - hw_pidx + q->db_pidx;
2379
2380                 if (is_t4(adap->params.chip))
2381                         val = PIDX_V(delta);
2382                 else
2383                         val = PIDX_T5_V(delta);
2384                 wmb();
2385                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2386                              QID_V(q->cntxt_id) | val);
2387         }
2388 out:
2389         q->db_disabled = 0;
2390         q->db_pidx_inc = 0;
2391         spin_unlock_irq(&q->db_lock);
2392         if (ret)
2393                 CH_WARN(adap, "DB drop recovery failed.\n");
2394 }
2395 static void recover_all_queues(struct adapter *adap)
2396 {
2397         int i;
2398
2399         for_each_ethrxq(&adap->sge, i)
2400                 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2401         for_each_iscsirxq(&adap->sge, i)
2402                 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2403         for_each_port(adap, i)
2404                 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2405 }
2406
2407 static void process_db_drop(struct work_struct *work)
2408 {
2409         struct adapter *adap;
2410
2411         adap = container_of(work, struct adapter, db_drop_task);
2412
2413         if (is_t4(adap->params.chip)) {
2414                 drain_db_fifo(adap, dbfifo_drain_delay);
2415                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2416                 drain_db_fifo(adap, dbfifo_drain_delay);
2417                 recover_all_queues(adap);
2418                 drain_db_fifo(adap, dbfifo_drain_delay);
2419                 enable_dbs(adap);
2420                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2421         } else if (is_t5(adap->params.chip)) {
2422                 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2423                 u16 qid = (dropped_db >> 15) & 0x1ffff;
2424                 u16 pidx_inc = dropped_db & 0x1fff;
2425                 u64 bar2_qoffset;
2426                 unsigned int bar2_qid;
2427                 int ret;
2428
2429                 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2430                                         0, &bar2_qoffset, &bar2_qid);
2431                 if (ret)
2432                         dev_err(adap->pdev_dev, "doorbell drop recovery: "
2433                                 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2434                 else
2435                         writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2436                                adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2437
2438                 /* Re-enable BAR2 WC */
2439                 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2440         }
2441
2442         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2443                 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2444 }
2445
2446 void t4_db_full(struct adapter *adap)
2447 {
2448         if (is_t4(adap->params.chip)) {
2449                 disable_dbs(adap);
2450                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2451                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2452                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2453                 queue_work(adap->workq, &adap->db_full_task);
2454         }
2455 }
2456
2457 void t4_db_dropped(struct adapter *adap)
2458 {
2459         if (is_t4(adap->params.chip)) {
2460                 disable_dbs(adap);
2461                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2462         }
2463         queue_work(adap->workq, &adap->db_drop_task);
2464 }
2465
2466 static void uld_attach(struct adapter *adap, unsigned int uld)
2467 {
2468         void *handle;
2469         struct cxgb4_lld_info lli;
2470         unsigned short i;
2471
2472         lli.pdev = adap->pdev;
2473         lli.pf = adap->pf;
2474         lli.l2t = adap->l2t;
2475         lli.tids = &adap->tids;
2476         lli.ports = adap->port;
2477         lli.vr = &adap->vres;
2478         lli.mtus = adap->params.mtus;
2479         if (uld == CXGB4_ULD_RDMA) {
2480                 lli.rxq_ids = adap->sge.rdma_rxq;
2481                 lli.ciq_ids = adap->sge.rdma_ciq;
2482                 lli.nrxq = adap->sge.rdmaqs;
2483                 lli.nciq = adap->sge.rdmaciqs;
2484         } else if (uld == CXGB4_ULD_ISCSI) {
2485                 lli.rxq_ids = adap->sge.iscsi_rxq;
2486                 lli.nrxq = adap->sge.iscsiqsets;
2487         } else if (uld == CXGB4_ULD_ISCSIT) {
2488                 lli.rxq_ids = adap->sge.iscsit_rxq;
2489                 lli.nrxq = adap->sge.niscsitq;
2490         }
2491         lli.ntxq = adap->sge.iscsiqsets;
2492         lli.nchan = adap->params.nports;
2493         lli.nports = adap->params.nports;
2494         lli.wr_cred = adap->params.ofldq_wr_cred;
2495         lli.adapter_type = adap->params.chip;
2496         lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
2497         lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
2498         lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
2499         lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
2500         lli.iscsi_ppm = &adap->iscsi_ppm;
2501         lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
2502         lli.udb_density = 1 << adap->params.sge.eq_qpp;
2503         lli.ucq_density = 1 << adap->params.sge.iq_qpp;
2504         lli.filt_mode = adap->params.tp.vlan_pri_map;
2505         /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2506         for (i = 0; i < NCHAN; i++)
2507                 lli.tx_modq[i] = i;
2508         lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
2509         lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
2510         lli.fw_vers = adap->params.fw_vers;
2511         lli.dbfifo_int_thresh = dbfifo_int_thresh;
2512         lli.sge_ingpadboundary = adap->sge.fl_align;
2513         lli.sge_egrstatuspagesize = adap->sge.stat_len;
2514         lli.sge_pktshift = adap->sge.pktshift;
2515         lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
2516         lli.max_ordird_qp = adap->params.max_ordird_qp;
2517         lli.max_ird_adapter = adap->params.max_ird_adapter;
2518         lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
2519         lli.nodeid = dev_to_node(adap->pdev_dev);
2520
2521         handle = ulds[uld].add(&lli);
2522         if (IS_ERR(handle)) {
2523                 dev_warn(adap->pdev_dev,
2524                          "could not attach to the %s driver, error %ld\n",
2525                          uld_str[uld], PTR_ERR(handle));
2526                 return;
2527         }
2528
2529         adap->uld_handle[uld] = handle;
2530
2531         if (!netevent_registered) {
2532                 register_netevent_notifier(&cxgb4_netevent_nb);
2533                 netevent_registered = true;
2534         }
2535
2536         if (adap->flags & FULL_INIT_DONE)
2537                 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2538 }
2539
2540 static void attach_ulds(struct adapter *adap)
2541 {
2542         unsigned int i;
2543
2544         spin_lock(&adap_rcu_lock);
2545         list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
2546         spin_unlock(&adap_rcu_lock);
2547
2548         mutex_lock(&uld_mutex);
2549         list_add_tail(&adap->list_node, &adapter_list);
2550         for (i = 0; i < CXGB4_ULD_MAX; i++)
2551                 if (ulds[i].add)
2552                         uld_attach(adap, i);
2553         mutex_unlock(&uld_mutex);
2554 }
2555
2556 static void detach_ulds(struct adapter *adap)
2557 {
2558         unsigned int i;
2559
2560         mutex_lock(&uld_mutex);
2561         list_del(&adap->list_node);
2562         for (i = 0; i < CXGB4_ULD_MAX; i++)
2563                 if (adap->uld_handle[i]) {
2564                         ulds[i].state_change(adap->uld_handle[i],
2565                                              CXGB4_STATE_DETACH);
2566                         adap->uld_handle[i] = NULL;
2567                 }
2568         for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
2569                 if (adap->uld && adap->uld[i].handle) {
2570                         adap->uld[i].state_change(adap->uld[i].handle,
2571                                              CXGB4_STATE_DETACH);
2572                         adap->uld[i].handle = NULL;
2573                 }
2574         if (netevent_registered && list_empty(&adapter_list)) {
2575                 unregister_netevent_notifier(&cxgb4_netevent_nb);
2576                 netevent_registered = false;
2577         }
2578         mutex_unlock(&uld_mutex);
2579
2580         spin_lock(&adap_rcu_lock);
2581         list_del_rcu(&adap->rcu_node);
2582         spin_unlock(&adap_rcu_lock);
2583 }
2584
2585 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2586 {
2587         unsigned int i;
2588
2589         mutex_lock(&uld_mutex);
2590         for (i = 0; i < CXGB4_ULD_MAX; i++)
2591                 if (adap->uld_handle[i])
2592                         ulds[i].state_change(adap->uld_handle[i], new_state);
2593         for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
2594                 if (adap->uld && adap->uld[i].handle)
2595                         adap->uld[i].state_change(adap->uld[i].handle,
2596                                                   new_state);
2597         mutex_unlock(&uld_mutex);
2598 }
2599
2600 /**
2601  *      cxgb4_register_uld - register an upper-layer driver
2602  *      @type: the ULD type
2603  *      @p: the ULD methods
2604  *
2605  *      Registers an upper-layer driver with this driver and notifies the ULD
2606  *      about any presently available devices that support its type.  Returns
2607  *      %-EBUSY if a ULD of the same type is already registered.
2608  */
2609 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2610 {
2611         int ret = 0;
2612         struct adapter *adap;
2613
2614         if (type >= CXGB4_ULD_MAX)
2615                 return -EINVAL;
2616         mutex_lock(&uld_mutex);
2617         if (ulds[type].add) {
2618                 ret = -EBUSY;
2619                 goto out;
2620         }
2621         ulds[type] = *p;
2622         list_for_each_entry(adap, &adapter_list, list_node)
2623                 uld_attach(adap, type);
2624 out:    mutex_unlock(&uld_mutex);
2625         return ret;
2626 }
2627 EXPORT_SYMBOL(cxgb4_register_uld);
2628
2629 /**
2630  *      cxgb4_unregister_uld - unregister an upper-layer driver
2631  *      @type: the ULD type
2632  *
2633  *      Unregisters an existing upper-layer driver.
2634  */
2635 int cxgb4_unregister_uld(enum cxgb4_uld type)
2636 {
2637         struct adapter *adap;
2638
2639         if (type >= CXGB4_ULD_MAX)
2640                 return -EINVAL;
2641         mutex_lock(&uld_mutex);
2642         list_for_each_entry(adap, &adapter_list, list_node)
2643                 adap->uld_handle[type] = NULL;
2644         ulds[type].add = NULL;
2645         mutex_unlock(&uld_mutex);
2646         return 0;
2647 }
2648 EXPORT_SYMBOL(cxgb4_unregister_uld);
2649
2650 #if IS_ENABLED(CONFIG_IPV6)
2651 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2652                                    unsigned long event, void *data)
2653 {
2654         struct inet6_ifaddr *ifa = data;
2655         struct net_device *event_dev = ifa->idev->dev;
2656         const struct device *parent = NULL;
2657 #if IS_ENABLED(CONFIG_BONDING)
2658         struct adapter *adap;
2659 #endif
2660         if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2661                 event_dev = vlan_dev_real_dev(event_dev);
2662 #if IS_ENABLED(CONFIG_BONDING)
2663         if (event_dev->flags & IFF_MASTER) {
2664                 list_for_each_entry(adap, &adapter_list, list_node) {
2665                         switch (event) {
2666                         case NETDEV_UP:
2667                                 cxgb4_clip_get(adap->port[0],
2668                                                (const u32 *)ifa, 1);
2669                                 break;
2670                         case NETDEV_DOWN:
2671                                 cxgb4_clip_release(adap->port[0],
2672                                                    (const u32 *)ifa, 1);
2673                                 break;
2674                         default:
2675                                 break;
2676                         }
2677                 }
2678                 return NOTIFY_OK;
2679         }
2680 #endif
2681
2682         if (event_dev)
2683                 parent = event_dev->dev.parent;
2684
2685         if (parent && parent->driver == &cxgb4_driver.driver) {
2686                 switch (event) {
2687                 case NETDEV_UP:
2688                         cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2689                         break;
2690                 case NETDEV_DOWN:
2691                         cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2692                         break;
2693                 default:
2694                         break;
2695                 }
2696         }
2697         return NOTIFY_OK;
2698 }
2699
2700 static bool inet6addr_registered;
2701 static struct notifier_block cxgb4_inet6addr_notifier = {
2702         .notifier_call = cxgb4_inet6addr_handler
2703 };
2704
2705 static void update_clip(const struct adapter *adap)
2706 {
2707         int i;
2708         struct net_device *dev;
2709         int ret;
2710
2711         rcu_read_lock();
2712
2713         for (i = 0; i < MAX_NPORTS; i++) {
2714                 dev = adap->port[i];
2715                 ret = 0;
2716
2717                 if (dev)
2718                         ret = cxgb4_update_root_dev_clip(dev);
2719
2720                 if (ret < 0)
2721                         break;
2722         }
2723         rcu_read_unlock();
2724 }
2725 #endif /* IS_ENABLED(CONFIG_IPV6) */
2726
2727 /**
2728  *      cxgb_up - enable the adapter
2729  *      @adap: adapter being enabled
2730  *
2731  *      Called when the first port is enabled, this function performs the
2732  *      actions necessary to make an adapter operational, such as completing
2733  *      the initialization of HW modules, and enabling interrupts.
2734  *
2735  *      Must be called with the rtnl lock held.
2736  */
2737 static int cxgb_up(struct adapter *adap)
2738 {
2739         int err;
2740
2741         err = setup_sge_queues(adap);
2742         if (err)
2743                 goto out;
2744         err = setup_rss(adap);
2745         if (err)
2746                 goto freeq;
2747
2748         if (adap->flags & USING_MSIX) {
2749                 name_msix_vecs(adap);
2750                 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2751                                   adap->msix_info[0].desc, adap);
2752                 if (err)
2753                         goto irq_err;
2754
2755                 err = request_msix_queue_irqs(adap);
2756                 if (err) {
2757                         free_irq(adap->msix_info[0].vec, adap);
2758                         goto irq_err;
2759                 }
2760         } else {
2761                 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2762                                   (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2763                                   adap->port[0]->name, adap);
2764                 if (err)
2765                         goto irq_err;
2766         }
2767         enable_rx(adap);
2768         t4_sge_start(adap);
2769         t4_intr_enable(adap);
2770         adap->flags |= FULL_INIT_DONE;
2771         notify_ulds(adap, CXGB4_STATE_UP);
2772 #if IS_ENABLED(CONFIG_IPV6)
2773         update_clip(adap);
2774 #endif
2775         /* Initialize hash mac addr list*/
2776         INIT_LIST_HEAD(&adap->mac_hlist);
2777  out:
2778         return err;
2779  irq_err:
2780         dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2781  freeq:
2782         t4_free_sge_resources(adap);
2783         goto out;
2784 }
2785
2786 static void cxgb_down(struct adapter *adapter)
2787 {
2788         cancel_work_sync(&adapter->tid_release_task);
2789         cancel_work_sync(&adapter->db_full_task);
2790         cancel_work_sync(&adapter->db_drop_task);
2791         adapter->tid_release_task_busy = false;
2792         adapter->tid_release_head = NULL;
2793
2794         t4_sge_stop(adapter);
2795         t4_free_sge_resources(adapter);
2796         adapter->flags &= ~FULL_INIT_DONE;
2797 }
2798
2799 /*
2800  * net_device operations
2801  */
2802 static int cxgb_open(struct net_device *dev)
2803 {
2804         int err;
2805         struct port_info *pi = netdev_priv(dev);
2806         struct adapter *adapter = pi->adapter;
2807
2808         netif_carrier_off(dev);
2809
2810         if (!(adapter->flags & FULL_INIT_DONE)) {
2811                 err = cxgb_up(adapter);
2812                 if (err < 0)
2813                         return err;
2814         }
2815
2816         err = link_start(dev);
2817         if (!err)
2818                 netif_tx_start_all_queues(dev);
2819         return err;
2820 }
2821
2822 static int cxgb_close(struct net_device *dev)
2823 {
2824         struct port_info *pi = netdev_priv(dev);
2825         struct adapter *adapter = pi->adapter;
2826
2827         netif_tx_stop_all_queues(dev);
2828         netif_carrier_off(dev);
2829         return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2830 }
2831
2832 /* Return an error number if the indicated filter isn't writable ...
2833  */
2834 static int writable_filter(struct filter_entry *f)
2835 {
2836         if (f->locked)
2837                 return -EPERM;
2838         if (f->pending)
2839                 return -EBUSY;
2840
2841         return 0;
2842 }
2843
2844 /* Delete the filter at the specified index (if valid).  The checks for all
2845  * the common problems with doing this like the filter being locked, currently
2846  * pending in another operation, etc.
2847  */
2848 static int delete_filter(struct adapter *adapter, unsigned int fidx)
2849 {
2850         struct filter_entry *f;
2851         int ret;
2852
2853         if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
2854                 return -EINVAL;
2855
2856         f = &adapter->tids.ftid_tab[fidx];
2857         ret = writable_filter(f);
2858         if (ret)
2859                 return ret;
2860         if (f->valid)
2861                 return del_filter_wr(adapter, fidx);
2862
2863         return 0;
2864 }
2865
2866 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2867                 __be32 sip, __be16 sport, __be16 vlan,
2868                 unsigned int queue, unsigned char port, unsigned char mask)
2869 {
2870         int ret;
2871         struct filter_entry *f;
2872         struct adapter *adap;
2873         int i;
2874         u8 *val;
2875
2876         adap = netdev2adap(dev);
2877
2878         /* Adjust stid to correct filter index */
2879         stid -= adap->tids.sftid_base;
2880         stid += adap->tids.nftids;
2881
2882         /* Check to make sure the filter requested is writable ...
2883          */
2884         f = &adap->tids.ftid_tab[stid];
2885         ret = writable_filter(f);
2886         if (ret)
2887                 return ret;
2888
2889         /* Clear out any old resources being used by the filter before
2890          * we start constructing the new filter.
2891          */
2892         if (f->valid)
2893                 clear_filter(adap, f);
2894
2895         /* Clear out filter specifications */
2896         memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2897         f->fs.val.lport = cpu_to_be16(sport);
2898         f->fs.mask.lport  = ~0;
2899         val = (u8 *)&sip;
2900         if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2901                 for (i = 0; i < 4; i++) {
2902                         f->fs.val.lip[i] = val[i];
2903                         f->fs.mask.lip[i] = ~0;
2904                 }
2905                 if (adap->params.tp.vlan_pri_map & PORT_F) {
2906                         f->fs.val.iport = port;
2907                         f->fs.mask.iport = mask;
2908                 }
2909         }
2910
2911         if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2912                 f->fs.val.proto = IPPROTO_TCP;
2913                 f->fs.mask.proto = ~0;
2914         }
2915
2916         f->fs.dirsteer = 1;
2917         f->fs.iq = queue;
2918         /* Mark filter as locked */
2919         f->locked = 1;
2920         f->fs.rpttid = 1;
2921
2922         ret = set_filter_wr(adap, stid);
2923         if (ret) {
2924                 clear_filter(adap, f);
2925                 return ret;
2926         }
2927
2928         return 0;
2929 }
2930 EXPORT_SYMBOL(cxgb4_create_server_filter);
2931
2932 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2933                 unsigned int queue, bool ipv6)
2934 {
2935         int ret;
2936         struct filter_entry *f;
2937         struct adapter *adap;
2938
2939         adap = netdev2adap(dev);
2940
2941         /* Adjust stid to correct filter index */
2942         stid -= adap->tids.sftid_base;
2943         stid += adap->tids.nftids;
2944
2945         f = &adap->tids.ftid_tab[stid];
2946         /* Unlock the filter */
2947         f->locked = 0;
2948
2949         ret = delete_filter(adap, stid);
2950         if (ret)
2951                 return ret;
2952
2953         return 0;
2954 }
2955 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2956
2957 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2958                                                 struct rtnl_link_stats64 *ns)
2959 {
2960         struct port_stats stats;
2961         struct port_info *p = netdev_priv(dev);
2962         struct adapter *adapter = p->adapter;
2963
2964         /* Block retrieving statistics during EEH error
2965          * recovery. Otherwise, the recovery might fail
2966          * and the PCI device will be removed permanently
2967          */
2968         spin_lock(&adapter->stats_lock);
2969         if (!netif_device_present(dev)) {
2970                 spin_unlock(&adapter->stats_lock);
2971                 return ns;
2972         }
2973         t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2974                                  &p->stats_base);
2975         spin_unlock(&adapter->stats_lock);
2976
2977         ns->tx_bytes   = stats.tx_octets;
2978         ns->tx_packets = stats.tx_frames;
2979         ns->rx_bytes   = stats.rx_octets;
2980         ns->rx_packets = stats.rx_frames;
2981         ns->multicast  = stats.rx_mcast_frames;
2982
2983         /* detailed rx_errors */
2984         ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2985                                stats.rx_runt;
2986         ns->rx_over_errors   = 0;
2987         ns->rx_crc_errors    = stats.rx_fcs_err;
2988         ns->rx_frame_errors  = stats.rx_symbol_err;
2989         ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
2990                                stats.rx_ovflow2 + stats.rx_ovflow3 +
2991                                stats.rx_trunc0 + stats.rx_trunc1 +
2992                                stats.rx_trunc2 + stats.rx_trunc3;
2993         ns->rx_missed_errors = 0;
2994
2995         /* detailed tx_errors */
2996         ns->tx_aborted_errors   = 0;
2997         ns->tx_carrier_errors   = 0;
2998         ns->tx_fifo_errors      = 0;
2999         ns->tx_heartbeat_errors = 0;
3000         ns->tx_window_errors    = 0;
3001
3002         ns->tx_errors = stats.tx_error_frames;
3003         ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3004                 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3005         return ns;
3006 }
3007
3008 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3009 {
3010         unsigned int mbox;
3011         int ret = 0, prtad, devad;
3012         struct port_info *pi = netdev_priv(dev);
3013         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3014
3015         switch (cmd) {
3016         case SIOCGMIIPHY:
3017                 if (pi->mdio_addr < 0)
3018                         return -EOPNOTSUPP;
3019                 data->phy_id = pi->mdio_addr;
3020                 break;
3021         case SIOCGMIIREG:
3022         case SIOCSMIIREG:
3023                 if (mdio_phy_id_is_c45(data->phy_id)) {
3024                         prtad = mdio_phy_id_prtad(data->phy_id);
3025                         devad = mdio_phy_id_devad(data->phy_id);
3026                 } else if (data->phy_id < 32) {
3027                         prtad = data->phy_id;
3028                         devad = 0;
3029                         data->reg_num &= 0x1f;
3030                 } else
3031                         return -EINVAL;
3032
3033                 mbox = pi->adapter->pf;
3034                 if (cmd == SIOCGMIIREG)
3035                         ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3036                                          data->reg_num, &data->val_out);
3037                 else
3038                         ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3039                                          data->reg_num, data->val_in);
3040                 break;
3041         case SIOCGHWTSTAMP:
3042                 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3043                                     sizeof(pi->tstamp_config)) ?
3044                         -EFAULT : 0;
3045         case SIOCSHWTSTAMP:
3046                 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3047                                    sizeof(pi->tstamp_config)))
3048                         return -EFAULT;
3049
3050                 switch (pi->tstamp_config.rx_filter) {
3051                 case HWTSTAMP_FILTER_NONE:
3052                         pi->rxtstamp = false;
3053                         break;
3054                 case HWTSTAMP_FILTER_ALL:
3055                         pi->rxtstamp = true;
3056                         break;
3057                 default:
3058                         pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3059                         return -ERANGE;
3060                 }
3061
3062                 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3063                                     sizeof(pi->tstamp_config)) ?
3064                         -EFAULT : 0;
3065         default:
3066                 return -EOPNOTSUPP;
3067         }
3068         return ret;
3069 }
3070
3071 static void cxgb_set_rxmode(struct net_device *dev)
3072 {
3073         /* unfortunately we can't return errors to the stack */
3074         set_rxmode(dev, -1, false);
3075 }
3076
3077 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3078 {
3079         int ret;
3080         struct port_info *pi = netdev_priv(dev);
3081
3082         if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
3083                 return -EINVAL;
3084         ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
3085                             -1, -1, -1, true);
3086         if (!ret)
3087                 dev->mtu = new_mtu;
3088         return ret;
3089 }
3090
3091 #ifdef CONFIG_PCI_IOV
3092 static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3093 {
3094         struct port_info *pi = netdev_priv(dev);
3095         struct adapter *adap = pi->adapter;
3096
3097         /* verify MAC addr is valid */
3098         if (!is_valid_ether_addr(mac)) {
3099                 dev_err(pi->adapter->pdev_dev,
3100                         "Invalid Ethernet address %pM for VF %d\n",
3101                         mac, vf);
3102                 return -EINVAL;
3103         }
3104
3105         dev_info(pi->adapter->pdev_dev,
3106                  "Setting MAC %pM on VF %d\n", mac, vf);
3107         return t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
3108 }
3109 #endif
3110
3111 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3112 {
3113         int ret;
3114         struct sockaddr *addr = p;
3115         struct port_info *pi = netdev_priv(dev);
3116
3117         if (!is_valid_ether_addr(addr->sa_data))
3118                 return -EADDRNOTAVAIL;
3119
3120         ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
3121                             pi->xact_addr_filt, addr->sa_data, true, true);
3122         if (ret < 0)
3123                 return ret;
3124
3125         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3126         pi->xact_addr_filt = ret;
3127         return 0;
3128 }
3129
3130 #ifdef CONFIG_NET_POLL_CONTROLLER
3131 static void cxgb_netpoll(struct net_device *dev)
3132 {
3133         struct port_info *pi = netdev_priv(dev);
3134         struct adapter *adap = pi->adapter;
3135
3136         if (adap->flags & USING_MSIX) {
3137                 int i;
3138                 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3139
3140                 for (i = pi->nqsets; i; i--, rx++)
3141                         t4_sge_intr_msix(0, &rx->rspq);
3142         } else
3143                 t4_intr_handler(adap)(0, adap);
3144 }
3145 #endif
3146
3147 static const struct net_device_ops cxgb4_netdev_ops = {
3148         .ndo_open             = cxgb_open,
3149         .ndo_stop             = cxgb_close,
3150         .ndo_start_xmit       = t4_eth_xmit,
3151         .ndo_select_queue     = cxgb_select_queue,
3152         .ndo_get_stats64      = cxgb_get_stats,
3153         .ndo_set_rx_mode      = cxgb_set_rxmode,
3154         .ndo_set_mac_address  = cxgb_set_mac_addr,
3155         .ndo_set_features     = cxgb_set_features,
3156         .ndo_validate_addr    = eth_validate_addr,
3157         .ndo_do_ioctl         = cxgb_ioctl,
3158         .ndo_change_mtu       = cxgb_change_mtu,
3159 #ifdef CONFIG_NET_POLL_CONTROLLER
3160         .ndo_poll_controller  = cxgb_netpoll,
3161 #endif
3162 #ifdef CONFIG_CHELSIO_T4_FCOE
3163         .ndo_fcoe_enable      = cxgb_fcoe_enable,
3164         .ndo_fcoe_disable     = cxgb_fcoe_disable,
3165 #endif /* CONFIG_CHELSIO_T4_FCOE */
3166 #ifdef CONFIG_NET_RX_BUSY_POLL
3167         .ndo_busy_poll        = cxgb_busy_poll,
3168 #endif
3169 };
3170
3171 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3172 #ifdef CONFIG_PCI_IOV
3173         .ndo_set_vf_mac       = cxgb_set_vf_mac,
3174 #endif
3175 };
3176
3177 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3178 {
3179         struct adapter *adapter = netdev2adap(dev);
3180
3181         strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3182         strlcpy(info->version, cxgb4_driver_version,
3183                 sizeof(info->version));
3184         strlcpy(info->bus_info, pci_name(adapter->pdev),
3185                 sizeof(info->bus_info));
3186 }
3187
3188 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3189         .get_drvinfo       = get_drvinfo,
3190 };
3191
3192 void t4_fatal_err(struct adapter *adap)
3193 {
3194         t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
3195         t4_intr_disable(adap);
3196         dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3197 }
3198
3199 static void setup_memwin(struct adapter *adap)
3200 {
3201         u32 nic_win_base = t4_get_util_window(adap);
3202
3203         t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3204 }
3205
3206 static void setup_memwin_rdma(struct adapter *adap)
3207 {
3208         if (adap->vres.ocq.size) {
3209                 u32 start;
3210                 unsigned int sz_kb;
3211
3212                 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3213                 start &= PCI_BASE_ADDRESS_MEM_MASK;
3214                 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3215                 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3216                 t4_write_reg(adap,
3217                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3218                              start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3219                 t4_write_reg(adap,
3220                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3221                              adap->vres.ocq.start);
3222                 t4_read_reg(adap,
3223                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3224         }
3225 }
3226
3227 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3228 {
3229         u32 v;
3230         int ret;
3231
3232         /* get device capabilities */
3233         memset(c, 0, sizeof(*c));
3234         c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3235                                FW_CMD_REQUEST_F | FW_CMD_READ_F);
3236         c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3237         ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3238         if (ret < 0)
3239                 return ret;
3240
3241         c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3242                                FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3243         ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3244         if (ret < 0)
3245                 return ret;
3246
3247         ret = t4_config_glbl_rss(adap, adap->pf,
3248                                  FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3249                                  FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3250                                  FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3251         if (ret < 0)
3252                 return ret;
3253
3254         ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3255                           MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3256                           FW_CMD_CAP_PF);
3257         if (ret < 0)
3258                 return ret;
3259
3260         t4_sge_init(adap);
3261
3262         /* tweak some settings */
3263         t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3264         t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3265         t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3266         v = t4_read_reg(adap, TP_PIO_DATA_A);
3267         t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3268
3269         /* first 4 Tx modulation queues point to consecutive Tx channels */
3270         adap->params.tp.tx_modq_map = 0xE4;
3271         t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3272                      TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3273
3274         /* associate each Tx modulation queue with consecutive Tx channels */
3275         v = 0x84218421;
3276         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3277                           &v, 1, TP_TX_SCHED_HDR_A);
3278         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3279                           &v, 1, TP_TX_SCHED_FIFO_A);
3280         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3281                           &v, 1, TP_TX_SCHED_PCMD_A);
3282
3283 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3284         if (is_offload(adap)) {
3285                 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3286                              TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3287                              TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3288                              TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3289                              TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3290                 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3291                              TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3292                              TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3293                              TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3294                              TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3295         }
3296
3297         /* get basic stuff going */
3298         return t4_early_init(adap, adap->pf);
3299 }
3300
3301 /*
3302  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
3303  */
3304 #define MAX_ATIDS 8192U
3305
3306 /*
3307  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3308  *
3309  * If the firmware we're dealing with has Configuration File support, then
3310  * we use that to perform all configuration
3311  */
3312
3313 /*
3314  * Tweak configuration based on module parameters, etc.  Most of these have
3315  * defaults assigned to them by Firmware Configuration Files (if we're using
3316  * them) but need to be explicitly set if we're using hard-coded
3317  * initialization.  But even in the case of using Firmware Configuration
3318  * Files, we'd like to expose the ability to change these via module
3319  * parameters so these are essentially common tweaks/settings for
3320  * Configuration Files and hard-coded initialization ...
3321  */
3322 static int adap_init0_tweaks(struct adapter *adapter)
3323 {
3324         /*
3325          * Fix up various Host-Dependent Parameters like Page Size, Cache
3326          * Line Size, etc.  The firmware default is for a 4KB Page Size and
3327          * 64B Cache Line Size ...
3328          */
3329         t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3330
3331         /*
3332          * Process module parameters which affect early initialization.
3333          */
3334         if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3335                 dev_err(&adapter->pdev->dev,
3336                         "Ignoring illegal rx_dma_offset=%d, using 2\n",
3337                         rx_dma_offset);
3338                 rx_dma_offset = 2;
3339         }
3340         t4_set_reg_field(adapter, SGE_CONTROL_A,
3341                          PKTSHIFT_V(PKTSHIFT_M),
3342                          PKTSHIFT_V(rx_dma_offset));
3343
3344         /*
3345          * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3346          * adds the pseudo header itself.
3347          */
3348         t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3349                                CSUM_HAS_PSEUDO_HDR_F, 0);
3350
3351         return 0;
3352 }
3353
3354 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3355  * unto themselves and they contain their own firmware to perform their
3356  * tasks ...
3357  */
3358 static int phy_aq1202_version(const u8 *phy_fw_data,
3359                               size_t phy_fw_size)
3360 {
3361         int offset;
3362
3363         /* At offset 0x8 you're looking for the primary image's
3364          * starting offset which is 3 Bytes wide
3365          *
3366          * At offset 0xa of the primary image, you look for the offset
3367          * of the DRAM segment which is 3 Bytes wide.
3368          *
3369          * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3370          * wide
3371          */
3372         #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3373         #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3374         #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3375
3376         offset = le24(phy_fw_data + 0x8) << 12;
3377         offset = le24(phy_fw_data + offset + 0xa);
3378         return be16(phy_fw_data + offset + 0x27e);
3379
3380         #undef be16
3381         #undef le16
3382         #undef le24
3383 }
3384
3385 static struct info_10gbt_phy_fw {
3386         unsigned int phy_fw_id;         /* PCI Device ID */
3387         char *phy_fw_file;              /* /lib/firmware/ PHY Firmware file */
3388         int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3389         int phy_flash;                  /* Has FLASH for PHY Firmware */
3390 } phy_info_array[] = {
3391         {
3392                 PHY_AQ1202_DEVICEID,
3393                 PHY_AQ1202_FIRMWARE,
3394                 phy_aq1202_version,
3395                 1,
3396         },
3397         {
3398                 PHY_BCM84834_DEVICEID,
3399                 PHY_BCM84834_FIRMWARE,
3400                 NULL,
3401                 0,
3402         },
3403         { 0, NULL, NULL },
3404 };
3405
3406 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3407 {
3408         int i;
3409
3410         for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3411                 if (phy_info_array[i].phy_fw_id == devid)
3412                         return &phy_info_array[i];
3413         }
3414         return NULL;
3415 }
3416
3417 /* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
3418  * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
3419  * we return a negative error number.  If we transfer new firmware we return 1
3420  * (from t4_load_phy_fw()).  If we don't do anything we return 0.
3421  */
3422 static int adap_init0_phy(struct adapter *adap)
3423 {
3424         const struct firmware *phyf;
3425         int ret;
3426         struct info_10gbt_phy_fw *phy_info;
3427
3428         /* Use the device ID to determine which PHY file to flash.
3429          */
3430         phy_info = find_phy_info(adap->pdev->device);
3431         if (!phy_info) {
3432                 dev_warn(adap->pdev_dev,
3433                          "No PHY Firmware file found for this PHY\n");
3434                 return -EOPNOTSUPP;
3435         }
3436
3437         /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3438          * use that. The adapter firmware provides us with a memory buffer
3439          * where we can load a PHY firmware file from the host if we want to
3440          * override the PHY firmware File in flash.
3441          */
3442         ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3443                                       adap->pdev_dev);
3444         if (ret < 0) {
3445                 /* For adapters without FLASH attached to PHY for their
3446                  * firmware, it's obviously a fatal error if we can't get the
3447                  * firmware to the adapter.  For adapters with PHY firmware
3448                  * FLASH storage, it's worth a warning if we can't find the
3449                  * PHY Firmware but we'll neuter the error ...
3450                  */
3451                 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3452                         "/lib/firmware/%s, error %d\n",
3453                         phy_info->phy_fw_file, -ret);
3454                 if (phy_info->phy_flash) {
3455                         int cur_phy_fw_ver = 0;
3456
3457                         t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3458                         dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3459                                  "FLASH copy, version %#x\n", cur_phy_fw_ver);
3460                         ret = 0;
3461                 }
3462
3463                 return ret;
3464         }
3465
3466         /* Load PHY Firmware onto adapter.
3467          */
3468         ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3469                              phy_info->phy_fw_version,
3470                              (u8 *)phyf->data, phyf->size);
3471         if (ret < 0)
3472                 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3473                         -ret);
3474         else if (ret > 0) {
3475                 int new_phy_fw_ver = 0;
3476
3477                 if (phy_info->phy_fw_version)
3478                         new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3479                                                                   phyf->size);
3480                 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3481                          "Firmware /lib/firmware/%s, version %#x\n",
3482                          phy_info->phy_fw_file, new_phy_fw_ver);
3483         }
3484
3485         release_firmware(phyf);
3486
3487         return ret;
3488 }
3489
3490 /*
3491  * Attempt to initialize the adapter via a Firmware Configuration File.
3492  */
3493 static int adap_init0_config(struct adapter *adapter, int reset)
3494 {
3495         struct fw_caps_config_cmd caps_cmd;
3496         const struct firmware *cf;
3497         unsigned long mtype = 0, maddr = 0;
3498         u32 finiver, finicsum, cfcsum;
3499         int ret;
3500         int config_issued = 0;
3501         char *fw_config_file, fw_config_file_path[256];
3502         char *config_name = NULL;
3503
3504         /*
3505          * Reset device if necessary.
3506          */
3507         if (reset) {
3508                 ret = t4_fw_reset(adapter, adapter->mbox,
3509                                   PIORSTMODE_F | PIORST_F);
3510                 if (ret < 0)
3511                         goto bye;
3512         }
3513
3514         /* If this is a 10Gb/s-BT adapter make sure the chip-external
3515          * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
3516          * to be performed after any global adapter RESET above since some
3517          * PHYs only have local RAM copies of the PHY firmware.
3518          */
3519         if (is_10gbt_device(adapter->pdev->device)) {
3520                 ret = adap_init0_phy(adapter);
3521                 if (ret < 0)
3522                         goto bye;
3523         }
3524         /*
3525          * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3526          * then use that.  Otherwise, use the configuration file stored
3527          * in the adapter flash ...
3528          */
3529         switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3530         case CHELSIO_T4:
3531                 fw_config_file = FW4_CFNAME;
3532                 break;
3533         case CHELSIO_T5:
3534                 fw_config_file = FW5_CFNAME;
3535                 break;
3536         case CHELSIO_T6:
3537                 fw_config_file = FW6_CFNAME;
3538                 break;
3539         default:
3540                 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3541                        adapter->pdev->device);
3542                 ret = -EINVAL;
3543                 goto bye;
3544         }
3545
3546         ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3547         if (ret < 0) {
3548                 config_name = "On FLASH";
3549                 mtype = FW_MEMTYPE_CF_FLASH;
3550                 maddr = t4_flash_cfg_addr(adapter);
3551         } else {
3552                 u32 params[7], val[7];
3553
3554                 sprintf(fw_config_file_path,
3555                         "/lib/firmware/%s", fw_config_file);
3556                 config_name = fw_config_file_path;
3557
3558                 if (cf->size >= FLASH_CFG_MAX_SIZE)
3559                         ret = -ENOMEM;
3560                 else {
3561                         params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3562                              FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3563                         ret = t4_query_params(adapter, adapter->mbox,
3564                                               adapter->pf, 0, 1, params, val);
3565                         if (ret == 0) {
3566                                 /*
3567                                  * For t4_memory_rw() below addresses and
3568                                  * sizes have to be in terms of multiples of 4
3569                                  * bytes.  So, if the Configuration File isn't
3570                                  * a multiple of 4 bytes in length we'll have
3571                                  * to write that out separately since we can't
3572                                  * guarantee that the bytes following the
3573                                  * residual byte in the buffer returned by
3574                                  * request_firmware() are zeroed out ...
3575                                  */
3576                                 size_t resid = cf->size & 0x3;
3577                                 size_t size = cf->size & ~0x3;
3578                                 __be32 *data = (__be32 *)cf->data;
3579
3580                                 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3581                                 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3582
3583                                 spin_lock(&adapter->win0_lock);
3584                                 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3585                                                    size, data, T4_MEMORY_WRITE);
3586                                 if (ret == 0 && resid != 0) {
3587                                         union {
3588                                                 __be32 word;
3589                                                 char buf[4];
3590                                         } last;
3591                                         int i;
3592
3593                                         last.word = data[size >> 2];
3594                                         for (i = resid; i < 4; i++)
3595                                                 last.buf[i] = 0;
3596                                         ret = t4_memory_rw(adapter, 0, mtype,
3597                                                            maddr + size,
3598                                                            4, &last.word,
3599                                                            T4_MEMORY_WRITE);
3600                                 }
3601                                 spin_unlock(&adapter->win0_lock);
3602                         }
3603                 }
3604
3605                 release_firmware(cf);
3606                 if (ret)
3607                         goto bye;
3608         }
3609
3610         /*
3611          * Issue a Capability Configuration command to the firmware to get it
3612          * to parse the Configuration File.  We don't use t4_fw_config_file()
3613          * because we want the ability to modify various features after we've
3614          * processed the configuration file ...
3615          */
3616         memset(&caps_cmd, 0, sizeof(caps_cmd));
3617         caps_cmd.op_to_write =
3618                 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3619                       FW_CMD_REQUEST_F |
3620                       FW_CMD_READ_F);
3621         caps_cmd.cfvalid_to_len16 =
3622                 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3623                       FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3624                       FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3625                       FW_LEN16(caps_cmd));
3626         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3627                          &caps_cmd);
3628
3629         /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3630          * Configuration File in FLASH), our last gasp effort is to use the
3631          * Firmware Configuration File which is embedded in the firmware.  A
3632          * very few early versions of the firmware didn't have one embedded
3633          * but we can ignore those.
3634          */
3635         if (ret == -ENOENT) {
3636                 memset(&caps_cmd, 0, sizeof(caps_cmd));
3637                 caps_cmd.op_to_write =
3638                         htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3639                                         FW_CMD_REQUEST_F |
3640                                         FW_CMD_READ_F);
3641                 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3642                 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3643                                 sizeof(caps_cmd), &caps_cmd);
3644                 config_name = "Firmware Default";
3645         }
3646
3647         config_issued = 1;
3648         if (ret < 0)
3649                 goto bye;
3650
3651         finiver = ntohl(caps_cmd.finiver);
3652         finicsum = ntohl(caps_cmd.finicsum);
3653         cfcsum = ntohl(caps_cmd.cfcsum);
3654         if (finicsum != cfcsum)
3655                 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3656                          "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3657                          finicsum, cfcsum);
3658
3659         /*
3660          * And now tell the firmware to use the configuration we just loaded.
3661          */
3662         caps_cmd.op_to_write =
3663                 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3664                       FW_CMD_REQUEST_F |
3665                       FW_CMD_WRITE_F);
3666         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3667         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3668                          NULL);
3669         if (ret < 0)
3670                 goto bye;
3671
3672         /*
3673          * Tweak configuration based on system architecture, module
3674          * parameters, etc.
3675          */
3676         ret = adap_init0_tweaks(adapter);
3677         if (ret < 0)
3678                 goto bye;
3679
3680         /*
3681          * And finally tell the firmware to initialize itself using the
3682          * parameters from the Configuration File.
3683          */
3684         ret = t4_fw_initialize(adapter, adapter->mbox);
3685         if (ret < 0)
3686                 goto bye;
3687
3688         /* Emit Firmware Configuration File information and return
3689          * successfully.
3690          */
3691         dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3692                  "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3693                  config_name, finiver, cfcsum);
3694         return 0;
3695
3696         /*
3697          * Something bad happened.  Return the error ...  (If the "error"
3698          * is that there's no Configuration File on the adapter we don't
3699          * want to issue a warning since this is fairly common.)
3700          */
3701 bye:
3702         if (config_issued && ret != -ENOENT)
3703                 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3704                          config_name, -ret);
3705         return ret;
3706 }
3707
3708 static struct fw_info fw_info_array[] = {
3709         {
3710                 .chip = CHELSIO_T4,
3711                 .fs_name = FW4_CFNAME,
3712                 .fw_mod_name = FW4_FNAME,
3713                 .fw_hdr = {
3714                         .chip = FW_HDR_CHIP_T4,
3715                         .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3716                         .intfver_nic = FW_INTFVER(T4, NIC),
3717                         .intfver_vnic = FW_INTFVER(T4, VNIC),
3718                         .intfver_ri = FW_INTFVER(T4, RI),
3719                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3720                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
3721                 },
3722         }, {
3723                 .chip = CHELSIO_T5,
3724                 .fs_name = FW5_CFNAME,
3725                 .fw_mod_name = FW5_FNAME,
3726                 .fw_hdr = {
3727                         .chip = FW_HDR_CHIP_T5,
3728                         .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3729                         .intfver_nic = FW_INTFVER(T5, NIC),
3730                         .intfver_vnic = FW_INTFVER(T5, VNIC),
3731                         .intfver_ri = FW_INTFVER(T5, RI),
3732                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3733                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
3734                 },
3735         }, {
3736                 .chip = CHELSIO_T6,
3737                 .fs_name = FW6_CFNAME,
3738                 .fw_mod_name = FW6_FNAME,
3739                 .fw_hdr = {
3740                         .chip = FW_HDR_CHIP_T6,
3741                         .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3742                         .intfver_nic = FW_INTFVER(T6, NIC),
3743                         .intfver_vnic = FW_INTFVER(T6, VNIC),
3744                         .intfver_ofld = FW_INTFVER(T6, OFLD),
3745                         .intfver_ri = FW_INTFVER(T6, RI),
3746                         .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3747                         .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3748                         .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3749                         .intfver_fcoe = FW_INTFVER(T6, FCOE),
3750                 },
3751         }
3752
3753 };
3754
3755 static struct fw_info *find_fw_info(int chip)
3756 {
3757         int i;
3758
3759         for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3760                 if (fw_info_array[i].chip == chip)
3761                         return &fw_info_array[i];
3762         }
3763         return NULL;
3764 }
3765
3766 /*
3767  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3768  */
3769 static int adap_init0(struct adapter *adap)
3770 {
3771         int ret;
3772         u32 v, port_vec;
3773         enum dev_state state;
3774         u32 params[7], val[7];
3775         struct fw_caps_config_cmd caps_cmd;
3776         int reset = 1;
3777
3778         /* Grab Firmware Device Log parameters as early as possible so we have
3779          * access to it for debugging, etc.
3780          */
3781         ret = t4_init_devlog_params(adap);
3782         if (ret < 0)
3783                 return ret;
3784
3785         /* Contact FW, advertising Master capability */
3786         ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
3787                           is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
3788         if (ret < 0) {
3789                 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3790                         ret);
3791                 return ret;
3792         }
3793         if (ret == adap->mbox)
3794                 adap->flags |= MASTER_PF;
3795
3796         /*
3797          * If we're the Master PF Driver and the device is uninitialized,
3798          * then let's consider upgrading the firmware ...  (We always want
3799          * to check the firmware version number in order to A. get it for
3800          * later reporting and B. to warn if the currently loaded firmware
3801          * is excessively mismatched relative to the driver.)
3802          */
3803         t4_get_fw_version(adap, &adap->params.fw_vers);
3804         t4_get_bs_version(adap, &adap->params.bs_vers);
3805         t4_get_tp_version(adap, &adap->params.tp_vers);
3806         t4_get_exprom_version(adap, &adap->params.er_vers);
3807
3808         ret = t4_check_fw_version(adap);
3809         /* If firmware is too old (not supported by driver) force an update. */
3810         if (ret)
3811                 state = DEV_STATE_UNINIT;
3812         if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3813                 struct fw_info *fw_info;
3814                 struct fw_hdr *card_fw;
3815                 const struct firmware *fw;
3816                 const u8 *fw_data = NULL;
3817                 unsigned int fw_size = 0;
3818
3819                 /* This is the firmware whose headers the driver was compiled
3820                  * against
3821                  */
3822                 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3823                 if (fw_info == NULL) {
3824                         dev_err(adap->pdev_dev,
3825                                 "unable to get firmware info for chip %d.\n",
3826                                 CHELSIO_CHIP_VERSION(adap->params.chip));
3827                         return -EINVAL;
3828                 }
3829
3830                 /* allocate memory to read the header of the firmware on the
3831                  * card
3832                  */
3833                 card_fw = t4_alloc_mem(sizeof(*card_fw));
3834
3835                 /* Get FW from from /lib/firmware/ */
3836                 ret = request_firmware(&fw, fw_info->fw_mod_name,
3837                                        adap->pdev_dev);
3838                 if (ret < 0) {
3839                         dev_err(adap->pdev_dev,
3840                                 "unable to load firmware image %s, error %d\n",
3841                                 fw_info->fw_mod_name, ret);
3842                 } else {
3843                         fw_data = fw->data;
3844                         fw_size = fw->size;
3845                 }
3846
3847                 /* upgrade FW logic */
3848                 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3849                                  state, &reset);
3850
3851                 /* Cleaning up */
3852                 release_firmware(fw);
3853                 t4_free_mem(card_fw);
3854
3855                 if (ret < 0)
3856                         goto bye;
3857         }
3858
3859         /*
3860          * Grab VPD parameters.  This should be done after we establish a
3861          * connection to the firmware since some of the VPD parameters
3862          * (notably the Core Clock frequency) are retrieved via requests to
3863          * the firmware.  On the other hand, we need these fairly early on
3864          * so we do this right after getting ahold of the firmware.
3865          */
3866         ret = t4_get_vpd_params(adap, &adap->params.vpd);
3867         if (ret < 0)
3868                 goto bye;
3869
3870         /*
3871          * Find out what ports are available to us.  Note that we need to do
3872          * this before calling adap_init0_no_config() since it needs nports
3873          * and portvec ...
3874          */
3875         v =
3876             FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3877             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3878         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
3879         if (ret < 0)
3880                 goto bye;
3881
3882         adap->params.nports = hweight32(port_vec);
3883         adap->params.portvec = port_vec;
3884
3885         /* If the firmware is initialized already, emit a simply note to that
3886          * effect. Otherwise, it's time to try initializing the adapter.
3887          */
3888         if (state == DEV_STATE_INIT) {
3889                 dev_info(adap->pdev_dev, "Coming up as %s: "\
3890                          "Adapter already initialized\n",
3891                          adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3892         } else {
3893                 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3894                          "Initializing adapter\n");
3895
3896                 /* Find out whether we're dealing with a version of the
3897                  * firmware which has configuration file support.
3898                  */
3899                 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3900                              FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3901                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3902                                       params, val);
3903
3904                 /* If the firmware doesn't support Configuration Files,
3905                  * return an error.
3906                  */
3907                 if (ret < 0) {
3908                         dev_err(adap->pdev_dev, "firmware doesn't support "
3909                                 "Firmware Configuration Files\n");
3910                         goto bye;
3911                 }
3912
3913                 /* The firmware provides us with a memory buffer where we can
3914                  * load a Configuration File from the host if we want to
3915                  * override the Configuration File in flash.
3916                  */
3917                 ret = adap_init0_config(adap, reset);
3918                 if (ret == -ENOENT) {
3919                         dev_err(adap->pdev_dev, "no Configuration File "
3920                                 "present on adapter.\n");
3921                         goto bye;
3922                 }
3923                 if (ret < 0) {
3924                         dev_err(adap->pdev_dev, "could not initialize "
3925                                 "adapter, error %d\n", -ret);
3926                         goto bye;
3927                 }
3928         }
3929
3930         /* Give the SGE code a chance to pull in anything that it needs ...
3931          * Note that this must be called after we retrieve our VPD parameters
3932          * in order to know how to convert core ticks to seconds, etc.
3933          */
3934         ret = t4_sge_init(adap);
3935         if (ret < 0)
3936                 goto bye;
3937
3938         if (is_bypass_device(adap->pdev->device))
3939                 adap->params.bypass = 1;
3940
3941         /*
3942          * Grab some of our basic fundamental operating parameters.
3943          */
3944 #define FW_PARAM_DEV(param) \
3945         (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3946         FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3947
3948 #define FW_PARAM_PFVF(param) \
3949         FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3950         FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)|  \
3951         FW_PARAMS_PARAM_Y_V(0) | \
3952         FW_PARAMS_PARAM_Z_V(0)
3953
3954         params[0] = FW_PARAM_PFVF(EQ_START);
3955         params[1] = FW_PARAM_PFVF(L2T_START);
3956         params[2] = FW_PARAM_PFVF(L2T_END);
3957         params[3] = FW_PARAM_PFVF(FILTER_START);
3958         params[4] = FW_PARAM_PFVF(FILTER_END);
3959         params[5] = FW_PARAM_PFVF(IQFLINT_START);
3960         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
3961         if (ret < 0)
3962                 goto bye;
3963         adap->sge.egr_start = val[0];
3964         adap->l2t_start = val[1];
3965         adap->l2t_end = val[2];
3966         adap->tids.ftid_base = val[3];
3967         adap->tids.nftids = val[4] - val[3] + 1;
3968         adap->sge.ingr_start = val[5];
3969
3970         /* qids (ingress/egress) returned from firmware can be anywhere
3971          * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3972          * Hence driver needs to allocate memory for this range to
3973          * store the queue info. Get the highest IQFLINT/EQ index returned
3974          * in FW_EQ_*_CMD.alloc command.
3975          */
3976         params[0] = FW_PARAM_PFVF(EQ_END);
3977         params[1] = FW_PARAM_PFVF(IQFLINT_END);
3978         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3979         if (ret < 0)
3980                 goto bye;
3981         adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3982         adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3983
3984         adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3985                                     sizeof(*adap->sge.egr_map), GFP_KERNEL);
3986         if (!adap->sge.egr_map) {
3987                 ret = -ENOMEM;
3988                 goto bye;
3989         }
3990
3991         adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3992                                      sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3993         if (!adap->sge.ingr_map) {
3994                 ret = -ENOMEM;
3995                 goto bye;
3996         }
3997
3998         /* Allocate the memory for the vaious egress queue bitmaps
3999          * ie starving_fl, txq_maperr and blocked_fl.
4000          */
4001         adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4002                                         sizeof(long), GFP_KERNEL);
4003         if (!adap->sge.starving_fl) {
4004                 ret = -ENOMEM;
4005                 goto bye;
4006         }
4007
4008         adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4009                                        sizeof(long), GFP_KERNEL);
4010         if (!adap->sge.txq_maperr) {
4011                 ret = -ENOMEM;
4012                 goto bye;
4013         }
4014
4015 #ifdef CONFIG_DEBUG_FS
4016         adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4017                                        sizeof(long), GFP_KERNEL);
4018         if (!adap->sge.blocked_fl) {
4019                 ret = -ENOMEM;
4020                 goto bye;
4021         }
4022 #endif
4023
4024         params[0] = FW_PARAM_PFVF(CLIP_START);
4025         params[1] = FW_PARAM_PFVF(CLIP_END);
4026         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4027         if (ret < 0)
4028                 goto bye;
4029         adap->clipt_start = val[0];
4030         adap->clipt_end = val[1];
4031
4032         /* query params related to active filter region */
4033         params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4034         params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4035         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4036         /* If Active filter size is set we enable establishing
4037          * offload connection through firmware work request
4038          */
4039         if ((val[0] != val[1]) && (ret >= 0)) {
4040                 adap->flags |= FW_OFLD_CONN;
4041                 adap->tids.aftid_base = val[0];
4042                 adap->tids.aftid_end = val[1];
4043         }
4044
4045         /* If we're running on newer firmware, let it know that we're
4046          * prepared to deal with encapsulated CPL messages.  Older
4047          * firmware won't understand this and we'll just get
4048          * unencapsulated messages ...
4049          */
4050         params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4051         val[0] = 1;
4052         (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4053
4054         /*
4055          * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4056          * capability.  Earlier versions of the firmware didn't have the
4057          * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4058          * permission to use ULPTX MEMWRITE DSGL.
4059          */
4060         if (is_t4(adap->params.chip)) {
4061                 adap->params.ulptx_memwrite_dsgl = false;
4062         } else {
4063                 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4064                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4065                                       1, params, val);
4066                 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4067         }
4068
4069         /*
4070          * Get device capabilities so we can determine what resources we need
4071          * to manage.
4072          */
4073         memset(&caps_cmd, 0, sizeof(caps_cmd));
4074         caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4075                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
4076         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4077         ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4078                          &caps_cmd);
4079         if (ret < 0)
4080                 goto bye;
4081
4082         if (caps_cmd.ofldcaps) {
4083                 /* query offload-related parameters */
4084                 params[0] = FW_PARAM_DEV(NTID);
4085                 params[1] = FW_PARAM_PFVF(SERVER_START);
4086                 params[2] = FW_PARAM_PFVF(SERVER_END);
4087                 params[3] = FW_PARAM_PFVF(TDDP_START);
4088                 params[4] = FW_PARAM_PFVF(TDDP_END);
4089                 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4090                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4091                                       params, val);
4092                 if (ret < 0)
4093                         goto bye;
4094                 adap->tids.ntids = val[0];
4095                 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4096                 adap->tids.stid_base = val[1];
4097                 adap->tids.nstids = val[2] - val[1] + 1;
4098                 /*
4099                  * Setup server filter region. Divide the available filter
4100                  * region into two parts. Regular filters get 1/3rd and server
4101                  * filters get 2/3rd part. This is only enabled if workarond
4102                  * path is enabled.
4103                  * 1. For regular filters.
4104                  * 2. Server filter: This are special filters which are used
4105                  * to redirect SYN packets to offload queue.
4106                  */
4107                 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4108                         adap->tids.sftid_base = adap->tids.ftid_base +
4109                                         DIV_ROUND_UP(adap->tids.nftids, 3);
4110                         adap->tids.nsftids = adap->tids.nftids -
4111                                          DIV_ROUND_UP(adap->tids.nftids, 3);
4112                         adap->tids.nftids = adap->tids.sftid_base -
4113                                                 adap->tids.ftid_base;
4114                 }
4115                 adap->vres.ddp.start = val[3];
4116                 adap->vres.ddp.size = val[4] - val[3] + 1;
4117                 adap->params.ofldq_wr_cred = val[5];
4118
4119                 adap->params.offload = 1;
4120         }
4121         if (caps_cmd.rdmacaps) {
4122                 params[0] = FW_PARAM_PFVF(STAG_START);
4123                 params[1] = FW_PARAM_PFVF(STAG_END);
4124                 params[2] = FW_PARAM_PFVF(RQ_START);
4125                 params[3] = FW_PARAM_PFVF(RQ_END);
4126                 params[4] = FW_PARAM_PFVF(PBL_START);
4127                 params[5] = FW_PARAM_PFVF(PBL_END);
4128                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4129                                       params, val);
4130                 if (ret < 0)
4131                         goto bye;
4132                 adap->vres.stag.start = val[0];
4133                 adap->vres.stag.size = val[1] - val[0] + 1;
4134                 adap->vres.rq.start = val[2];
4135                 adap->vres.rq.size = val[3] - val[2] + 1;
4136                 adap->vres.pbl.start = val[4];
4137                 adap->vres.pbl.size = val[5] - val[4] + 1;
4138
4139                 params[0] = FW_PARAM_PFVF(SQRQ_START);
4140                 params[1] = FW_PARAM_PFVF(SQRQ_END);
4141                 params[2] = FW_PARAM_PFVF(CQ_START);
4142                 params[3] = FW_PARAM_PFVF(CQ_END);
4143                 params[4] = FW_PARAM_PFVF(OCQ_START);
4144                 params[5] = FW_PARAM_PFVF(OCQ_END);
4145                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4146                                       val);
4147                 if (ret < 0)
4148                         goto bye;
4149                 adap->vres.qp.start = val[0];
4150                 adap->vres.qp.size = val[1] - val[0] + 1;
4151                 adap->vres.cq.start = val[2];
4152                 adap->vres.cq.size = val[3] - val[2] + 1;
4153                 adap->vres.ocq.start = val[4];
4154                 adap->vres.ocq.size = val[5] - val[4] + 1;
4155
4156                 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4157                 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4158                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
4159                                       val);
4160                 if (ret < 0) {
4161                         adap->params.max_ordird_qp = 8;
4162                         adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4163                         ret = 0;
4164                 } else {
4165                         adap->params.max_ordird_qp = val[0];
4166                         adap->params.max_ird_adapter = val[1];
4167                 }
4168                 dev_info(adap->pdev_dev,
4169                          "max_ordird_qp %d max_ird_adapter %d\n",
4170                          adap->params.max_ordird_qp,
4171                          adap->params.max_ird_adapter);
4172         }
4173         if (caps_cmd.iscsicaps) {
4174                 params[0] = FW_PARAM_PFVF(ISCSI_START);
4175                 params[1] = FW_PARAM_PFVF(ISCSI_END);
4176                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4177                                       params, val);
4178                 if (ret < 0)
4179                         goto bye;
4180                 adap->vres.iscsi.start = val[0];
4181                 adap->vres.iscsi.size = val[1] - val[0] + 1;
4182         }
4183         if (caps_cmd.cryptocaps) {
4184                 /* Should query params here...TODO */
4185                 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
4186                 adap->num_uld += 1;
4187         }
4188 #undef FW_PARAM_PFVF
4189 #undef FW_PARAM_DEV
4190
4191         /* The MTU/MSS Table is initialized by now, so load their values.  If
4192          * we're initializing the adapter, then we'll make any modifications
4193          * we want to the MTU/MSS Table and also initialize the congestion
4194          * parameters.
4195          */
4196         t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4197         if (state != DEV_STATE_INIT) {
4198                 int i;
4199
4200                 /* The default MTU Table contains values 1492 and 1500.
4201                  * However, for TCP, it's better to have two values which are
4202                  * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4203                  * This allows us to have a TCP Data Payload which is a
4204                  * multiple of 8 regardless of what combination of TCP Options
4205                  * are in use (always a multiple of 4 bytes) which is
4206                  * important for performance reasons.  For instance, if no
4207                  * options are in use, then we have a 20-byte IP header and a
4208                  * 20-byte TCP header.  In this case, a 1500-byte MSS would
4209                  * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4210                  * which is not a multiple of 8.  So using an MSS of 1488 in
4211                  * this case results in a TCP Data Payload of 1448 bytes which
4212                  * is a multiple of 8.  On the other hand, if 12-byte TCP Time
4213                  * Stamps have been negotiated, then an MTU of 1500 bytes
4214                  * results in a TCP Data Payload of 1448 bytes which, as
4215                  * above, is a multiple of 8 bytes ...
4216                  */
4217                 for (i = 0; i < NMTUS; i++)
4218                         if (adap->params.mtus[i] == 1492) {
4219                                 adap->params.mtus[i] = 1488;
4220                                 break;
4221                         }
4222
4223                 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4224                              adap->params.b_wnd);
4225         }
4226         t4_init_sge_params(adap);
4227         adap->flags |= FW_OK;
4228         t4_init_tp_params(adap);
4229         return 0;
4230
4231         /*
4232          * Something bad happened.  If a command timed out or failed with EIO
4233          * FW does not operate within its spec or something catastrophic
4234          * happened to HW/FW, stop issuing commands.
4235          */
4236 bye:
4237         kfree(adap->sge.egr_map);
4238         kfree(adap->sge.ingr_map);
4239         kfree(adap->sge.starving_fl);
4240         kfree(adap->sge.txq_maperr);
4241 #ifdef CONFIG_DEBUG_FS
4242         kfree(adap->sge.blocked_fl);
4243 #endif
4244         if (ret != -ETIMEDOUT && ret != -EIO)
4245                 t4_fw_bye(adap, adap->mbox);
4246         return ret;
4247 }
4248
4249 /* EEH callbacks */
4250
4251 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4252                                          pci_channel_state_t state)
4253 {
4254         int i;
4255         struct adapter *adap = pci_get_drvdata(pdev);
4256
4257         if (!adap)
4258                 goto out;
4259
4260         rtnl_lock();
4261         adap->flags &= ~FW_OK;
4262         notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4263         spin_lock(&adap->stats_lock);
4264         for_each_port(adap, i) {
4265                 struct net_device *dev = adap->port[i];
4266
4267                 netif_device_detach(dev);
4268                 netif_carrier_off(dev);
4269         }
4270         spin_unlock(&adap->stats_lock);
4271         disable_interrupts(adap);
4272         if (adap->flags & FULL_INIT_DONE)
4273                 cxgb_down(adap);
4274         rtnl_unlock();
4275         if ((adap->flags & DEV_ENABLED)) {
4276                 pci_disable_device(pdev);
4277                 adap->flags &= ~DEV_ENABLED;
4278         }
4279 out:    return state == pci_channel_io_perm_failure ?
4280                 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4281 }
4282
4283 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4284 {
4285         int i, ret;
4286         struct fw_caps_config_cmd c;
4287         struct adapter *adap = pci_get_drvdata(pdev);
4288
4289         if (!adap) {
4290                 pci_restore_state(pdev);
4291                 pci_save_state(pdev);
4292                 return PCI_ERS_RESULT_RECOVERED;
4293         }
4294
4295         if (!(adap->flags & DEV_ENABLED)) {
4296                 if (pci_enable_device(pdev)) {
4297                         dev_err(&pdev->dev, "Cannot reenable PCI "
4298                                             "device after reset\n");
4299                         return PCI_ERS_RESULT_DISCONNECT;
4300                 }
4301                 adap->flags |= DEV_ENABLED;
4302         }
4303
4304         pci_set_master(pdev);
4305         pci_restore_state(pdev);
4306         pci_save_state(pdev);
4307         pci_cleanup_aer_uncorrect_error_status(pdev);
4308
4309         if (t4_wait_dev_ready(adap->regs) < 0)
4310                 return PCI_ERS_RESULT_DISCONNECT;
4311         if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4312                 return PCI_ERS_RESULT_DISCONNECT;
4313         adap->flags |= FW_OK;
4314         if (adap_init1(adap, &c))
4315                 return PCI_ERS_RESULT_DISCONNECT;
4316
4317         for_each_port(adap, i) {
4318                 struct port_info *p = adap2pinfo(adap, i);
4319
4320                 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4321                                   NULL, NULL);
4322                 if (ret < 0)
4323                         return PCI_ERS_RESULT_DISCONNECT;
4324                 p->viid = ret;
4325                 p->xact_addr_filt = -1;
4326         }
4327
4328         t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4329                      adap->params.b_wnd);
4330         setup_memwin(adap);
4331         if (cxgb_up(adap))
4332                 return PCI_ERS_RESULT_DISCONNECT;
4333         return PCI_ERS_RESULT_RECOVERED;
4334 }
4335
4336 static void eeh_resume(struct pci_dev *pdev)
4337 {
4338         int i;
4339         struct adapter *adap = pci_get_drvdata(pdev);
4340
4341         if (!adap)
4342                 return;
4343
4344         rtnl_lock();
4345         for_each_port(adap, i) {
4346                 struct net_device *dev = adap->port[i];
4347
4348                 if (netif_running(dev)) {
4349                         link_start(dev);
4350                         cxgb_set_rxmode(dev);
4351                 }
4352                 netif_device_attach(dev);
4353         }
4354         rtnl_unlock();
4355 }
4356
4357 static const struct pci_error_handlers cxgb4_eeh = {
4358         .error_detected = eeh_err_detected,
4359         .slot_reset     = eeh_slot_reset,
4360         .resume         = eeh_resume,
4361 };
4362
4363 static inline bool is_x_10g_port(const struct link_config *lc)
4364 {
4365         return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
4366                (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
4367 }
4368
4369 /*
4370  * Perform default configuration of DMA queues depending on the number and type
4371  * of ports we found and the number of available CPUs.  Most settings can be
4372  * modified by the admin prior to actual use.
4373  */
4374 static void cfg_queues(struct adapter *adap)
4375 {
4376         struct sge *s = &adap->sge;
4377         int i, n10g = 0, qidx = 0;
4378 #ifndef CONFIG_CHELSIO_T4_DCB
4379         int q10g = 0;
4380 #endif
4381         int ciq_size;
4382
4383         /* Reduce memory usage in kdump environment, disable all offload.
4384          */
4385         if (is_kdump_kernel()) {
4386                 adap->params.offload = 0;
4387                 adap->params.crypto = 0;
4388         } else if (adap->num_uld && uld_mem_alloc(adap)) {
4389                 adap->params.crypto = 0;
4390         }
4391
4392         for_each_port(adap, i)
4393                 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4394 #ifdef CONFIG_CHELSIO_T4_DCB
4395         /* For Data Center Bridging support we need to be able to support up
4396          * to 8 Traffic Priorities; each of which will be assigned to its
4397          * own TX Queue in order to prevent Head-Of-Line Blocking.
4398          */
4399         if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4400                 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4401                         MAX_ETH_QSETS, adap->params.nports * 8);
4402                 BUG_ON(1);
4403         }
4404
4405         for_each_port(adap, i) {
4406                 struct port_info *pi = adap2pinfo(adap, i);
4407
4408                 pi->first_qset = qidx;
4409                 pi->nqsets = 8;
4410                 qidx += pi->nqsets;
4411         }
4412 #else /* !CONFIG_CHELSIO_T4_DCB */
4413         /*
4414          * We default to 1 queue per non-10G port and up to # of cores queues
4415          * per 10G port.
4416          */
4417         if (n10g)
4418                 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4419         if (q10g > netif_get_num_default_rss_queues())
4420                 q10g = netif_get_num_default_rss_queues();
4421
4422         for_each_port(adap, i) {
4423                 struct port_info *pi = adap2pinfo(adap, i);
4424
4425                 pi->first_qset = qidx;
4426                 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4427                 qidx += pi->nqsets;
4428         }
4429 #endif /* !CONFIG_CHELSIO_T4_DCB */
4430
4431         s->ethqsets = qidx;
4432         s->max_ethqsets = qidx;   /* MSI-X may lower it later */
4433
4434         if (is_offload(adap)) {
4435                 /*
4436                  * For offload we use 1 queue/channel if all ports are up to 1G,
4437                  * otherwise we divide all available queues amongst the channels
4438                  * capped by the number of available cores.
4439                  */
4440                 if (n10g) {
4441                         i = min_t(int, ARRAY_SIZE(s->iscsirxq),
4442                                   num_online_cpus());
4443                         s->iscsiqsets = roundup(i, adap->params.nports);
4444                 } else
4445                         s->iscsiqsets = adap->params.nports;
4446                 /* For RDMA one Rx queue per channel suffices */
4447                 s->rdmaqs = adap->params.nports;
4448                 /* Try and allow at least 1 CIQ per cpu rounding down
4449                  * to the number of ports, with a minimum of 1 per port.
4450                  * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4451                  * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4452                  * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4453                  */
4454                 s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
4455                 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4456                                 adap->params.nports;
4457                 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
4458
4459                 if (!is_t4(adap->params.chip))
4460                         s->niscsitq = s->iscsiqsets;
4461         }
4462
4463         for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4464                 struct sge_eth_rxq *r = &s->ethrxq[i];
4465
4466                 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4467                 r->fl.size = 72;
4468         }
4469
4470         for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4471                 s->ethtxq[i].q.size = 1024;
4472
4473         for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4474                 s->ctrlq[i].q.size = 512;
4475
4476         for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4477                 s->ofldtxq[i].q.size = 1024;
4478
4479         for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
4480                 struct sge_ofld_rxq *r = &s->iscsirxq[i];
4481
4482                 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4483                 r->rspq.uld = CXGB4_ULD_ISCSI;
4484                 r->fl.size = 72;
4485         }
4486
4487         if (!is_t4(adap->params.chip)) {
4488                 for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
4489                         struct sge_ofld_rxq *r = &s->iscsitrxq[i];
4490
4491                         init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4492                         r->rspq.uld = CXGB4_ULD_ISCSIT;
4493                         r->fl.size = 72;
4494                 }
4495         }
4496
4497         for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4498                 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4499
4500                 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
4501                 r->rspq.uld = CXGB4_ULD_RDMA;
4502                 r->fl.size = 72;
4503         }
4504
4505         ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
4506         if (ciq_size > SGE_MAX_IQ_SIZE) {
4507                 CH_WARN(adap, "CIQ size too small for available IQs\n");
4508                 ciq_size = SGE_MAX_IQ_SIZE;
4509         }
4510
4511         for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
4512                 struct sge_ofld_rxq *r = &s->rdmaciq[i];
4513
4514                 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
4515                 r->rspq.uld = CXGB4_ULD_RDMA;
4516         }
4517
4518         init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4519         init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
4520 }
4521
4522 /*
4523  * Reduce the number of Ethernet queues across all ports to at most n.
4524  * n provides at least one queue per port.
4525  */
4526 static void reduce_ethqs(struct adapter *adap, int n)
4527 {
4528         int i;
4529         struct port_info *pi;
4530
4531         while (n < adap->sge.ethqsets)
4532                 for_each_port(adap, i) {
4533                         pi = adap2pinfo(adap, i);
4534                         if (pi->nqsets > 1) {
4535                                 pi->nqsets--;
4536                                 adap->sge.ethqsets--;
4537                                 if (adap->sge.ethqsets <= n)
4538                                         break;
4539                         }
4540                 }
4541
4542         n = 0;
4543         for_each_port(adap, i) {
4544                 pi = adap2pinfo(adap, i);
4545                 pi->first_qset = n;
4546                 n += pi->nqsets;
4547         }
4548 }
4549
4550 static int get_msix_info(struct adapter *adap)
4551 {
4552         struct uld_msix_info *msix_info;
4553         int max_ingq = (MAX_OFLD_QSETS * adap->num_uld);
4554
4555         msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4556         if (!msix_info)
4557                 return -ENOMEM;
4558
4559         adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4560                                                  sizeof(long), GFP_KERNEL);
4561         if (!adap->msix_bmap_ulds.msix_bmap) {
4562                 kfree(msix_info);
4563                 return -ENOMEM;
4564         }
4565         spin_lock_init(&adap->msix_bmap_ulds.lock);
4566         adap->msix_info_ulds = msix_info;
4567         return 0;
4568 }
4569
4570 static void free_msix_info(struct adapter *adap)
4571 {
4572         if (!adap->num_uld)
4573                 return;
4574
4575         kfree(adap->msix_info_ulds);
4576         kfree(adap->msix_bmap_ulds.msix_bmap);
4577 }
4578
4579 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4580 #define EXTRA_VECS 2
4581
4582 static int enable_msix(struct adapter *adap)
4583 {
4584         int ofld_need = 0, uld_need = 0;
4585         int i, j, want, need, allocated;
4586         struct sge *s = &adap->sge;
4587         unsigned int nchan = adap->params.nports;
4588         struct msix_entry *entries;
4589         int max_ingq = MAX_INGQ;
4590
4591         max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
4592         entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
4593                           GFP_KERNEL);
4594         if (!entries)
4595                 return -ENOMEM;
4596
4597         /* map for msix */
4598         if (is_pci_uld(adap) && get_msix_info(adap))
4599                 adap->params.crypto = 0;
4600
4601         for (i = 0; i < max_ingq + 1; ++i)
4602                 entries[i].entry = i;
4603
4604         want = s->max_ethqsets + EXTRA_VECS;
4605         if (is_offload(adap)) {
4606                 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
4607                         s->niscsitq;
4608                 /* need nchan for each possible ULD */
4609                 if (is_t4(adap->params.chip))
4610                         ofld_need = 3 * nchan;
4611                 else
4612                         ofld_need = 4 * nchan;
4613         }
4614         if (is_pci_uld(adap)) {
4615                 want += netif_get_num_default_rss_queues() * nchan;
4616                 uld_need = nchan;
4617         }
4618 #ifdef CONFIG_CHELSIO_T4_DCB
4619         /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4620          * each port.
4621          */
4622         need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4623 #else
4624         need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4625 #endif
4626         allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4627         if (allocated < 0) {
4628                 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4629                          " not using MSI-X\n");
4630                 kfree(entries);
4631                 return allocated;
4632         }
4633
4634         /* Distribute available vectors to the various queue groups.
4635          * Every group gets its minimum requirement and NIC gets top
4636          * priority for leftovers.
4637          */
4638         i = allocated - EXTRA_VECS - ofld_need - uld_need;
4639         if (i < s->max_ethqsets) {
4640                 s->max_ethqsets = i;
4641                 if (i < s->ethqsets)
4642                         reduce_ethqs(adap, i);
4643         }
4644         if (is_pci_uld(adap)) {
4645                 if (allocated < want)
4646                         s->nqs_per_uld = nchan;
4647                 else
4648                         s->nqs_per_uld = netif_get_num_default_rss_queues() *
4649                                         nchan;
4650         }
4651
4652         if (is_offload(adap)) {
4653                 if (allocated < want) {
4654                         s->rdmaqs = nchan;
4655                         s->rdmaciqs = nchan;
4656
4657                         if (!is_t4(adap->params.chip))
4658                                 s->niscsitq = nchan;
4659                 }
4660
4661                 /* leftovers go to OFLD */
4662                 i = allocated - EXTRA_VECS - s->max_ethqsets -
4663                         s->rdmaqs - s->rdmaciqs - s->niscsitq;
4664                 if (is_pci_uld(adap))
4665                         i -= s->nqs_per_uld * adap->num_uld;
4666                 s->iscsiqsets = (i / nchan) * nchan;  /* round down */
4667
4668         }
4669
4670         for (i = 0; i < (allocated - (s->nqs_per_uld * adap->num_uld)); ++i)
4671                 adap->msix_info[i].vec = entries[i].vector;
4672         if (is_pci_uld(adap)) {
4673                 for (j = 0 ; i < allocated; ++i, j++)
4674                         adap->msix_info_ulds[j].vec = entries[i].vector;
4675                 adap->msix_bmap_ulds.mapsize = j;
4676         }
4677         dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
4678                  "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n",
4679                  allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
4680                  s->rdmaciqs, s->nqs_per_uld);
4681
4682         kfree(entries);
4683         return 0;
4684 }
4685
4686 #undef EXTRA_VECS
4687
4688 static int init_rss(struct adapter *adap)
4689 {
4690         unsigned int i;
4691         int err;
4692
4693         err = t4_init_rss_mode(adap, adap->mbox);
4694         if (err)
4695                 return err;
4696
4697         for_each_port(adap, i) {
4698                 struct port_info *pi = adap2pinfo(adap, i);
4699
4700                 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4701                 if (!pi->rss)
4702                         return -ENOMEM;
4703         }
4704         return 0;
4705 }
4706
4707 static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
4708                                         enum pci_bus_speed *speed,
4709                                         enum pcie_link_width *width)
4710 {
4711         u32 lnkcap1, lnkcap2;
4712         int err1, err2;
4713
4714 #define  PCIE_MLW_CAP_SHIFT 4   /* start of MLW mask in link capabilities */
4715
4716         *speed = PCI_SPEED_UNKNOWN;
4717         *width = PCIE_LNK_WIDTH_UNKNOWN;
4718
4719         err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
4720                                           &lnkcap1);
4721         err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
4722                                           &lnkcap2);
4723         if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
4724                 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
4725                         *speed = PCIE_SPEED_8_0GT;
4726                 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
4727                         *speed = PCIE_SPEED_5_0GT;
4728                 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
4729                         *speed = PCIE_SPEED_2_5GT;
4730         }
4731         if (!err1) {
4732                 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
4733                 if (!lnkcap2) { /* pre-r3.0 */
4734                         if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
4735                                 *speed = PCIE_SPEED_5_0GT;
4736                         else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
4737                                 *speed = PCIE_SPEED_2_5GT;
4738                 }
4739         }
4740
4741         if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
4742                 return err1 ? err1 : err2 ? err2 : -EINVAL;
4743         return 0;
4744 }
4745
4746 static void cxgb4_check_pcie_caps(struct adapter *adap)
4747 {
4748         enum pcie_link_width width, width_cap;
4749         enum pci_bus_speed speed, speed_cap;
4750
4751 #define PCIE_SPEED_STR(speed) \
4752         (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4753          speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4754          speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4755          "Unknown")
4756
4757         if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
4758                 dev_warn(adap->pdev_dev,
4759                          "Unable to determine PCIe device BW capabilities\n");
4760                 return;
4761         }
4762
4763         if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
4764             speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
4765                 dev_warn(adap->pdev_dev,
4766                          "Unable to determine PCI Express bandwidth.\n");
4767                 return;
4768         }
4769
4770         dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
4771                  PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
4772         dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
4773                  width, width_cap);
4774         if (speed < speed_cap || width < width_cap)
4775                 dev_info(adap->pdev_dev,
4776                          "A slot with more lanes and/or higher speed is "
4777                          "suggested for optimal performance.\n");
4778 }
4779
4780 /* Dump basic information about the adapter */
4781 static void print_adapter_info(struct adapter *adapter)
4782 {
4783         /* Device information */
4784         dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
4785                  adapter->params.vpd.id,
4786                  CHELSIO_CHIP_RELEASE(adapter->params.chip));
4787         dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
4788                  adapter->params.vpd.sn, adapter->params.vpd.pn);
4789
4790         /* Firmware Version */
4791         if (!adapter->params.fw_vers)
4792                 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
4793         else
4794                 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
4795                          FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
4796                          FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
4797                          FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
4798                          FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
4799
4800         /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
4801          * Firmware, so dev_info() is more appropriate here.)
4802          */
4803         if (!adapter->params.bs_vers)
4804                 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
4805         else
4806                 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
4807                          FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
4808                          FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
4809                          FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
4810                          FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
4811
4812         /* TP Microcode Version */
4813         if (!adapter->params.tp_vers)
4814                 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
4815         else
4816                 dev_info(adapter->pdev_dev,
4817                          "TP Microcode version: %u.%u.%u.%u\n",
4818                          FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
4819                          FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
4820                          FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
4821                          FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
4822
4823         /* Expansion ROM version */
4824         if (!adapter->params.er_vers)
4825                 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
4826         else
4827                 dev_info(adapter->pdev_dev,
4828                          "Expansion ROM version: %u.%u.%u.%u\n",
4829                          FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
4830                          FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
4831                          FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
4832                          FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
4833
4834         /* Software/Hardware configuration */
4835         dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
4836                  is_offload(adapter) ? "R" : "",
4837                  ((adapter->flags & USING_MSIX) ? "MSI-X" :
4838                   (adapter->flags & USING_MSI) ? "MSI" : ""),
4839                  is_offload(adapter) ? "Offload" : "non-Offload");
4840 }
4841
4842 static void print_port_info(const struct net_device *dev)
4843 {
4844         char buf[80];
4845         char *bufp = buf;
4846         const char *spd = "";
4847         const struct port_info *pi = netdev_priv(dev);
4848         const struct adapter *adap = pi->adapter;
4849
4850         if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4851                 spd = " 2.5 GT/s";
4852         else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4853                 spd = " 5 GT/s";
4854         else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4855                 spd = " 8 GT/s";
4856
4857         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4858                 bufp += sprintf(bufp, "100/");
4859         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4860                 bufp += sprintf(bufp, "1000/");
4861         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4862                 bufp += sprintf(bufp, "10G/");
4863         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4864                 bufp += sprintf(bufp, "40G/");
4865         if (bufp != buf)
4866                 --bufp;
4867         sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4868
4869         netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
4870                     dev->name, adap->params.vpd.id, adap->name, buf);
4871 }
4872
4873 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4874 {
4875         pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4876 }
4877
4878 /*
4879  * Free the following resources:
4880  * - memory used for tables
4881  * - MSI/MSI-X
4882  * - net devices
4883  * - resources FW is holding for us
4884  */
4885 static void free_some_resources(struct adapter *adapter)
4886 {
4887         unsigned int i;
4888
4889         t4_free_mem(adapter->l2t);
4890         t4_free_mem(adapter->tids.tid_tab);
4891         kfree(adapter->sge.egr_map);
4892         kfree(adapter->sge.ingr_map);
4893         kfree(adapter->sge.starving_fl);
4894         kfree(adapter->sge.txq_maperr);
4895 #ifdef CONFIG_DEBUG_FS
4896         kfree(adapter->sge.blocked_fl);
4897 #endif
4898         disable_msi(adapter);
4899
4900         for_each_port(adapter, i)
4901                 if (adapter->port[i]) {
4902                         struct port_info *pi = adap2pinfo(adapter, i);
4903
4904                         if (pi->viid != 0)
4905                                 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4906                                            0, pi->viid);
4907                         kfree(adap2pinfo(adapter, i)->rss);
4908                         free_netdev(adapter->port[i]);
4909                 }
4910         if (adapter->flags & FW_OK)
4911                 t4_fw_bye(adapter, adapter->pf);
4912 }
4913
4914 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4915 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4916                    NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4917 #define SEGMENT_SIZE 128
4918
4919 static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4920 {
4921         u16 device_id;
4922
4923         /* Retrieve adapter's device ID */
4924         pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
4925
4926         switch (device_id >> 12) {
4927         case CHELSIO_T4:
4928                 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
4929         case CHELSIO_T5:
4930                 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4931         case CHELSIO_T6:
4932                 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4933         default:
4934                 dev_err(&pdev->dev, "Device %d is not supported\n",
4935                         device_id);
4936         }
4937         return -EINVAL;
4938 }
4939
4940 #ifdef CONFIG_PCI_IOV
4941 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4942 {
4943         struct adapter *adap = pci_get_drvdata(pdev);
4944         int err = 0;
4945         int current_vfs = pci_num_vf(pdev);
4946         u32 pcie_fw;
4947
4948         pcie_fw = readl(adap->regs + PCIE_FW_A);
4949         /* Check if cxgb4 is the MASTER and fw is initialized */
4950         if (!(pcie_fw & PCIE_FW_INIT_F) ||
4951             !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4952             PCIE_FW_MASTER_G(pcie_fw) != 4) {
4953                 dev_warn(&pdev->dev,
4954                          "cxgb4 driver needs to be MASTER to support SRIOV\n");
4955                 return -EOPNOTSUPP;
4956         }
4957
4958         /* If any of the VF's is already assigned to Guest OS, then
4959          * SRIOV for the same cannot be modified
4960          */
4961         if (current_vfs && pci_vfs_assigned(pdev)) {
4962                 dev_err(&pdev->dev,
4963                         "Cannot modify SR-IOV while VFs are assigned\n");
4964                 num_vfs = current_vfs;
4965                 return num_vfs;
4966         }
4967
4968         /* Disable SRIOV when zero is passed.
4969          * One needs to disable SRIOV before modifying it, else
4970          * stack throws the below warning:
4971          * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
4972          */
4973         if (!num_vfs) {
4974                 pci_disable_sriov(pdev);
4975                 if (adap->port[0]->reg_state == NETREG_REGISTERED)
4976                         unregister_netdev(adap->port[0]);
4977                 return num_vfs;
4978         }
4979
4980         if (num_vfs != current_vfs) {
4981                 err = pci_enable_sriov(pdev, num_vfs);
4982                 if (err)
4983                         return err;
4984
4985                 if (adap->port[0]->reg_state == NETREG_UNINITIALIZED) {
4986                         err = register_netdev(adap->port[0]);
4987                         if (err < 0)
4988                                 pr_info("Unable to register VF mgmt netdev\n");
4989                 }
4990         }
4991         return num_vfs;
4992 }
4993 #endif
4994
4995 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4996 {
4997         int func, i, err, s_qpp, qpp, num_seg;
4998         struct port_info *pi;
4999         bool highdma = false;
5000         struct adapter *adapter = NULL;
5001         struct net_device *netdev;
5002 #ifdef CONFIG_PCI_IOV
5003         char name[IFNAMSIZ];
5004 #endif
5005         void __iomem *regs;
5006         u32 whoami, pl_rev;
5007         enum chip_type chip;
5008         static int adap_idx = 1;
5009
5010         printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5011
5012         err = pci_request_regions(pdev, KBUILD_MODNAME);
5013         if (err) {
5014                 /* Just info, some other driver may have claimed the device. */
5015                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5016                 return err;
5017         }
5018
5019         err = pci_enable_device(pdev);
5020         if (err) {
5021                 dev_err(&pdev->dev, "cannot enable PCI device\n");
5022                 goto out_release_regions;
5023         }
5024
5025         regs = pci_ioremap_bar(pdev, 0);
5026         if (!regs) {
5027                 dev_err(&pdev->dev, "cannot map device registers\n");
5028                 err = -ENOMEM;
5029                 goto out_disable_device;
5030         }
5031
5032         err = t4_wait_dev_ready(regs);
5033         if (err < 0)
5034                 goto out_unmap_bar0;
5035
5036         /* We control everything through one PF */
5037         whoami = readl(regs + PL_WHOAMI_A);
5038         pl_rev = REV_G(readl(regs + PL_REV_A));
5039         chip = get_chip_type(pdev, pl_rev);
5040         func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
5041                 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5042         if (func != ent->driver_data) {
5043 #ifndef CONFIG_PCI_IOV
5044                 iounmap(regs);
5045 #endif
5046                 pci_disable_device(pdev);
5047                 pci_save_state(pdev);        /* to restore SR-IOV later */
5048                 goto sriov;
5049         }
5050
5051         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5052                 highdma = true;
5053                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5054                 if (err) {
5055                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5056                                 "coherent allocations\n");
5057                         goto out_unmap_bar0;
5058                 }
5059         } else {
5060                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5061                 if (err) {
5062                         dev_err(&pdev->dev, "no usable DMA configuration\n");
5063                         goto out_unmap_bar0;
5064                 }
5065         }
5066
5067         pci_enable_pcie_error_reporting(pdev);
5068         enable_pcie_relaxed_ordering(pdev);
5069         pci_set_master(pdev);
5070         pci_save_state(pdev);
5071
5072         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5073         if (!adapter) {
5074                 err = -ENOMEM;
5075                 goto out_unmap_bar0;
5076         }
5077         adap_idx++;
5078
5079         adapter->workq = create_singlethread_workqueue("cxgb4");
5080         if (!adapter->workq) {
5081                 err = -ENOMEM;
5082                 goto out_free_adapter;
5083         }
5084
5085         adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5086                                     (sizeof(struct mbox_cmd) *
5087                                      T4_OS_LOG_MBOX_CMDS),
5088                                     GFP_KERNEL);
5089         if (!adapter->mbox_log) {
5090                 err = -ENOMEM;
5091                 goto out_free_adapter;
5092         }
5093         adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
5094
5095         /* PCI device has been enabled */
5096         adapter->flags |= DEV_ENABLED;
5097
5098         adapter->regs = regs;
5099         adapter->pdev = pdev;
5100         adapter->pdev_dev = &pdev->dev;
5101         adapter->name = pci_name(pdev);
5102         adapter->mbox = func;
5103         adapter->pf = func;
5104         adapter->msg_enable = dflt_msg_enable;
5105         memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5106
5107         spin_lock_init(&adapter->stats_lock);
5108         spin_lock_init(&adapter->tid_release_lock);
5109         spin_lock_init(&adapter->win0_lock);
5110
5111         INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5112         INIT_WORK(&adapter->db_full_task, process_db_full);
5113         INIT_WORK(&adapter->db_drop_task, process_db_drop);
5114
5115         err = t4_prep_adapter(adapter);
5116         if (err)
5117                 goto out_free_adapter;
5118
5119
5120         if (!is_t4(adapter->params.chip)) {
5121                 s_qpp = (QUEUESPERPAGEPF0_S +
5122                         (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
5123                         adapter->pf);
5124                 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
5125                       SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
5126                 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5127
5128                 /* Each segment size is 128B. Write coalescing is enabled only
5129                  * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5130                  * queue is less no of segments that can be accommodated in
5131                  * a page size.
5132                  */
5133                 if (qpp > num_seg) {
5134                         dev_err(&pdev->dev,
5135                                 "Incorrect number of egress queues per page\n");
5136                         err = -EINVAL;
5137                         goto out_free_adapter;
5138                 }
5139                 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5140                 pci_resource_len(pdev, 2));
5141                 if (!adapter->bar2) {
5142                         dev_err(&pdev->dev, "cannot map device bar2 region\n");
5143                         err = -ENOMEM;
5144                         goto out_free_adapter;
5145                 }
5146         }
5147
5148         setup_memwin(adapter);
5149         err = adap_init0(adapter);
5150 #ifdef CONFIG_DEBUG_FS
5151         bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5152 #endif
5153         setup_memwin_rdma(adapter);
5154         if (err)
5155                 goto out_unmap_bar;
5156
5157         /* configure SGE_STAT_CFG_A to read WC stats */
5158         if (!is_t4(adapter->params.chip))
5159                 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5160                              (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5161                               T6_STATMODE_V(0)));
5162
5163         for_each_port(adapter, i) {
5164                 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5165                                            MAX_ETH_QSETS);
5166                 if (!netdev) {
5167                         err = -ENOMEM;
5168                         goto out_free_dev;
5169                 }
5170
5171                 SET_NETDEV_DEV(netdev, &pdev->dev);
5172
5173                 adapter->port[i] = netdev;
5174                 pi = netdev_priv(netdev);
5175                 pi->adapter = adapter;
5176                 pi->xact_addr_filt = -1;
5177                 pi->port_id = i;
5178                 netdev->irq = pdev->irq;
5179
5180                 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5181                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5182                         NETIF_F_RXCSUM | NETIF_F_RXHASH |
5183                         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5184                 if (highdma)
5185                         netdev->hw_features |= NETIF_F_HIGHDMA;
5186                 netdev->features |= netdev->hw_features;
5187                 netdev->vlan_features = netdev->features & VLAN_FEAT;
5188
5189                 netdev->priv_flags |= IFF_UNICAST_FLT;
5190
5191                 netdev->netdev_ops = &cxgb4_netdev_ops;
5192 #ifdef CONFIG_CHELSIO_T4_DCB
5193                 netdev->dcbnl_ops = &cxgb4_dcb_ops;
5194                 cxgb4_dcb_state_init(netdev);
5195 #endif
5196                 cxgb4_set_ethtool_ops(netdev);
5197         }
5198
5199         pci_set_drvdata(pdev, adapter);
5200
5201         if (adapter->flags & FW_OK) {
5202                 err = t4_port_init(adapter, func, func, 0);
5203                 if (err)
5204                         goto out_free_dev;
5205         } else if (adapter->params.nports == 1) {
5206                 /* If we don't have a connection to the firmware -- possibly
5207                  * because of an error -- grab the raw VPD parameters so we
5208                  * can set the proper MAC Address on the debug network
5209                  * interface that we've created.
5210                  */
5211                 u8 hw_addr[ETH_ALEN];
5212                 u8 *na = adapter->params.vpd.na;
5213
5214                 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5215                 if (!err) {
5216                         for (i = 0; i < ETH_ALEN; i++)
5217                                 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5218                                               hex2val(na[2 * i + 1]));
5219                         t4_set_hw_addr(adapter, 0, hw_addr);
5220                 }
5221         }
5222
5223         /* Configure queues and allocate tables now, they can be needed as
5224          * soon as the first register_netdev completes.
5225          */
5226         cfg_queues(adapter);
5227
5228         adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
5229         if (!adapter->l2t) {
5230                 /* We tolerate a lack of L2T, giving up some functionality */
5231                 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5232                 adapter->params.offload = 0;
5233         }
5234
5235 #if IS_ENABLED(CONFIG_IPV6)
5236         if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
5237             (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5238                 /* CLIP functionality is not present in hardware,
5239                  * hence disable all offload features
5240                  */
5241                 dev_warn(&pdev->dev,
5242                          "CLIP not enabled in hardware, continuing\n");
5243                 adapter->params.offload = 0;
5244         } else {
5245                 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5246                                                   adapter->clipt_end);
5247                 if (!adapter->clipt) {
5248                         /* We tolerate a lack of clip_table, giving up
5249                          * some functionality
5250                          */
5251                         dev_warn(&pdev->dev,
5252                                  "could not allocate Clip table, continuing\n");
5253                         adapter->params.offload = 0;
5254                 }
5255         }
5256 #endif
5257         if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5258                 dev_warn(&pdev->dev, "could not allocate TID table, "
5259                          "continuing\n");
5260                 adapter->params.offload = 0;
5261         }
5262
5263         if (is_offload(adapter)) {
5264                 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5265                         u32 hash_base, hash_reg;
5266
5267                         if (chip <= CHELSIO_T5) {
5268                                 hash_reg = LE_DB_TID_HASHBASE_A;
5269                                 hash_base = t4_read_reg(adapter, hash_reg);
5270                                 adapter->tids.hash_base = hash_base / 4;
5271                         } else {
5272                                 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5273                                 hash_base = t4_read_reg(adapter, hash_reg);
5274                                 adapter->tids.hash_base = hash_base;
5275                         }
5276                 }
5277         }
5278
5279         /* See what interrupts we'll be using */
5280         if (msi > 1 && enable_msix(adapter) == 0)
5281                 adapter->flags |= USING_MSIX;
5282         else if (msi > 0 && pci_enable_msi(pdev) == 0) {
5283                 adapter->flags |= USING_MSI;
5284                 if (msi > 1)
5285                         free_msix_info(adapter);
5286         }
5287
5288         /* check for PCI Express bandwidth capabiltites */
5289         cxgb4_check_pcie_caps(adapter);
5290
5291         err = init_rss(adapter);
5292         if (err)
5293                 goto out_free_dev;
5294
5295         /*
5296          * The card is now ready to go.  If any errors occur during device
5297          * registration we do not fail the whole card but rather proceed only
5298          * with the ports we manage to register successfully.  However we must
5299          * register at least one net device.
5300          */
5301         for_each_port(adapter, i) {
5302                 pi = adap2pinfo(adapter, i);
5303                 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5304                 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5305
5306                 err = register_netdev(adapter->port[i]);
5307                 if (err)
5308                         break;
5309                 adapter->chan_map[pi->tx_chan] = i;
5310                 print_port_info(adapter->port[i]);
5311         }
5312         if (i == 0) {
5313                 dev_err(&pdev->dev, "could not register any net devices\n");
5314                 goto out_free_dev;
5315         }
5316         if (err) {
5317                 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5318                 err = 0;
5319         }
5320
5321         if (cxgb4_debugfs_root) {
5322                 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5323                                                            cxgb4_debugfs_root);
5324                 setup_debugfs(adapter);
5325         }
5326
5327         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5328         pdev->needs_freset = 1;
5329
5330         if (is_offload(adapter))
5331                 attach_ulds(adapter);
5332
5333         print_adapter_info(adapter);
5334         return 0;
5335
5336 sriov:
5337 #ifdef CONFIG_PCI_IOV
5338         if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
5339                 dev_warn(&pdev->dev,
5340                          "Enabling SR-IOV VFs using the num_vf module "
5341                          "parameter is deprecated - please use the pci sysfs "
5342                          "interface instead.\n");
5343                 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5344                         dev_info(&pdev->dev,
5345                                  "instantiated %u virtual functions\n",
5346                                  num_vf[func]);
5347         }
5348
5349         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5350         if (!adapter) {
5351                 err = -ENOMEM;
5352                 goto free_pci_region;
5353         }
5354
5355         snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap_idx, func);
5356         netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, ether_setup);
5357         if (!netdev) {
5358                 err = -ENOMEM;
5359                 goto free_adapter;
5360         }
5361
5362         adapter->pdev = pdev;
5363         adapter->pdev_dev = &pdev->dev;
5364         adapter->name = pci_name(pdev);
5365         adapter->mbox = func;
5366         adapter->pf = func;
5367         adapter->regs = regs;
5368         adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5369                                     (sizeof(struct mbox_cmd) *
5370                                      T4_OS_LOG_MBOX_CMDS),
5371                                     GFP_KERNEL);
5372         if (!adapter->mbox_log) {
5373                 err = -ENOMEM;
5374                 goto free_netdevice;
5375         }
5376         pi = netdev_priv(netdev);
5377         pi->adapter = adapter;
5378         SET_NETDEV_DEV(netdev, &pdev->dev);
5379         pci_set_drvdata(pdev, adapter);
5380
5381         adapter->port[0] = netdev;
5382         netdev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5383         netdev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5384
5385         return 0;
5386
5387  free_netdevice:
5388         free_netdev(adapter->port[0]);
5389  free_adapter:
5390         kfree(adapter);
5391  free_pci_region:
5392         iounmap(regs);
5393         pci_disable_sriov(pdev);
5394         pci_release_regions(pdev);
5395         return err;
5396 #else
5397         return 0;
5398 #endif
5399
5400  out_free_dev:
5401         free_some_resources(adapter);
5402         if (adapter->flags & USING_MSIX)
5403                 free_msix_info(adapter);
5404         if (adapter->num_uld)
5405                 uld_mem_free(adapter);
5406  out_unmap_bar:
5407         if (!is_t4(adapter->params.chip))
5408                 iounmap(adapter->bar2);
5409  out_free_adapter:
5410         if (adapter->workq)
5411                 destroy_workqueue(adapter->workq);
5412
5413         kfree(adapter->mbox_log);
5414         kfree(adapter);
5415  out_unmap_bar0:
5416         iounmap(regs);
5417  out_disable_device:
5418         pci_disable_pcie_error_reporting(pdev);
5419         pci_disable_device(pdev);
5420  out_release_regions:
5421         pci_release_regions(pdev);
5422         return err;
5423 }
5424
5425 static void remove_one(struct pci_dev *pdev)
5426 {
5427         struct adapter *adapter = pci_get_drvdata(pdev);
5428
5429         if (!adapter) {
5430                 pci_release_regions(pdev);
5431                 return;
5432         }
5433
5434         if (adapter->pf == 4) {
5435                 int i;
5436
5437                 /* Tear down per-adapter Work Queue first since it can contain
5438                  * references to our adapter data structure.
5439                  */
5440                 destroy_workqueue(adapter->workq);
5441
5442                 if (is_offload(adapter))
5443                         detach_ulds(adapter);
5444
5445                 disable_interrupts(adapter);
5446
5447                 for_each_port(adapter, i)
5448                         if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5449                                 unregister_netdev(adapter->port[i]);
5450
5451                 debugfs_remove_recursive(adapter->debugfs_root);
5452
5453                 /* If we allocated filters, free up state associated with any
5454                  * valid filters ...
5455                  */
5456                 if (adapter->tids.ftid_tab) {
5457                         struct filter_entry *f = &adapter->tids.ftid_tab[0];
5458                         for (i = 0; i < (adapter->tids.nftids +
5459                                         adapter->tids.nsftids); i++, f++)
5460                                 if (f->valid)
5461                                         clear_filter(adapter, f);
5462                 }
5463
5464                 if (adapter->flags & FULL_INIT_DONE)
5465                         cxgb_down(adapter);
5466
5467                 if (adapter->flags & USING_MSIX)
5468                         free_msix_info(adapter);
5469                 if (adapter->num_uld)
5470                         uld_mem_free(adapter);
5471                 free_some_resources(adapter);
5472 #if IS_ENABLED(CONFIG_IPV6)
5473                 t4_cleanup_clip_tbl(adapter);
5474 #endif
5475                 iounmap(adapter->regs);
5476                 if (!is_t4(adapter->params.chip))
5477                         iounmap(adapter->bar2);
5478                 pci_disable_pcie_error_reporting(pdev);
5479                 if ((adapter->flags & DEV_ENABLED)) {
5480                         pci_disable_device(pdev);
5481                         adapter->flags &= ~DEV_ENABLED;
5482                 }
5483                 pci_release_regions(pdev);
5484                 kfree(adapter->mbox_log);
5485                 synchronize_rcu();
5486                 kfree(adapter);
5487         }
5488 #ifdef CONFIG_PCI_IOV
5489         else {
5490                 if (adapter->port[0]->reg_state == NETREG_REGISTERED)
5491                         unregister_netdev(adapter->port[0]);
5492                 free_netdev(adapter->port[0]);
5493                 iounmap(adapter->regs);
5494                 kfree(adapter);
5495                 pci_disable_sriov(pdev);
5496                 pci_release_regions(pdev);
5497         }
5498 #endif
5499 }
5500
5501 static struct pci_driver cxgb4_driver = {
5502         .name     = KBUILD_MODNAME,
5503         .id_table = cxgb4_pci_tbl,
5504         .probe    = init_one,
5505         .remove   = remove_one,
5506         .shutdown = remove_one,
5507 #ifdef CONFIG_PCI_IOV
5508         .sriov_configure = cxgb4_iov_configure,
5509 #endif
5510         .err_handler = &cxgb4_eeh,
5511 };
5512
5513 static int __init cxgb4_init_module(void)
5514 {
5515         int ret;
5516
5517         /* Debugfs support is optional, just warn if this fails */
5518         cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5519         if (!cxgb4_debugfs_root)
5520                 pr_warn("could not create debugfs entry, continuing\n");
5521
5522         ret = pci_register_driver(&cxgb4_driver);
5523         if (ret < 0)
5524                 debugfs_remove(cxgb4_debugfs_root);
5525
5526 #if IS_ENABLED(CONFIG_IPV6)
5527         if (!inet6addr_registered) {
5528                 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5529                 inet6addr_registered = true;
5530         }
5531 #endif
5532
5533         return ret;
5534 }
5535
5536 static void __exit cxgb4_cleanup_module(void)
5537 {
5538 #if IS_ENABLED(CONFIG_IPV6)
5539         if (inet6addr_registered) {
5540                 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5541                 inet6addr_registered = false;
5542         }
5543 #endif
5544         pci_unregister_driver(&cxgb4_driver);
5545         debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
5546 }
5547
5548 module_init(cxgb4_init_module);
5549 module_exit(cxgb4_cleanup_module);