2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
67 #include <linux/crash_dump.h>
71 #include "t4_values.h"
74 #include "t4fw_version.h"
75 #include "cxgb4_dcb.h"
76 #include "cxgb4_debugfs.h"
80 char cxgb4_driver_name[] = KBUILD_MODNAME;
85 #define DRV_VERSION "2.0.0-ko"
86 const char cxgb4_driver_version[] = DRV_VERSION;
87 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
89 /* Host shadow copy of ingress filter entry. This is in host native format
90 * and doesn't match the ordering or bit order, etc. of the hardware of the
91 * firmware command. The use of bit-field structure elements is purely to
92 * remind ourselves of the field size limitations and save memory in the case
93 * where the filter table is large.
96 /* Administrative fields for filter.
98 u32 valid:1; /* filter allocated and valid */
99 u32 locked:1; /* filter is administratively locked */
101 u32 pending:1; /* filter action is pending firmware reply */
102 u32 smtidx:8; /* Source MAC Table index for smac */
103 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
105 /* The filter itself. Most of this is a straight copy of information
106 * provided by the extended ioctl(). Some fields are translated to
107 * internal forms -- for instance the Ingress Queue ID passed in from
108 * the ioctl() is translated into the Absolute Ingress Queue ID.
110 struct ch_filter_specification fs;
113 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
114 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
115 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
117 /* Macros needed to support the PCI Device ID Table ...
119 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
120 static const struct pci_device_id cxgb4_pci_tbl[] = {
121 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
123 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
126 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
128 #define CH_PCI_ID_TABLE_ENTRY(devid) \
129 {PCI_VDEVICE(CHELSIO, (devid)), 4}
131 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
135 #include "t4_pci_id_tbl.h"
137 #define FW4_FNAME "cxgb4/t4fw.bin"
138 #define FW5_FNAME "cxgb4/t5fw.bin"
139 #define FW6_FNAME "cxgb4/t6fw.bin"
140 #define FW4_CFNAME "cxgb4/t4-config.txt"
141 #define FW5_CFNAME "cxgb4/t5-config.txt"
142 #define FW6_CFNAME "cxgb4/t6-config.txt"
143 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
144 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
145 #define PHY_AQ1202_DEVICEID 0x4409
146 #define PHY_BCM84834_DEVICEID 0x4486
148 MODULE_DESCRIPTION(DRV_DESC);
149 MODULE_AUTHOR("Chelsio Communications");
150 MODULE_LICENSE("Dual BSD/GPL");
151 MODULE_VERSION(DRV_VERSION);
152 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
153 MODULE_FIRMWARE(FW4_FNAME);
154 MODULE_FIRMWARE(FW5_FNAME);
155 MODULE_FIRMWARE(FW6_FNAME);
158 * Normally we're willing to become the firmware's Master PF but will be happy
159 * if another PF has already become the Master and initialized the adapter.
160 * Setting "force_init" will cause this driver to forcibly establish itself as
161 * the Master PF and initialize the adapter.
163 static uint force_init;
165 module_param(force_init, uint, 0644);
166 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
167 "deprecated parameter");
169 static int dflt_msg_enable = DFLT_MSG_ENABLE;
171 module_param(dflt_msg_enable, int, 0644);
172 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
173 "deprecated parameter");
176 * The driver uses the best interrupt scheme available on a platform in the
177 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
178 * of these schemes the driver may consider as follows:
180 * msi = 2: choose from among all three options
181 * msi = 1: only consider MSI and INTx interrupts
182 * msi = 0: force INTx interrupts
186 module_param(msi, int, 0644);
187 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
190 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
191 * offset by 2 bytes in order to have the IP headers line up on 4-byte
192 * boundaries. This is a requirement for many architectures which will throw
193 * a machine check fault if an attempt is made to access one of the 4-byte IP
194 * header fields on a non-4-byte boundary. And it's a major performance issue
195 * even on some architectures which allow it like some implementations of the
196 * x86 ISA. However, some architectures don't mind this and for some very
197 * edge-case performance sensitive applications (like forwarding large volumes
198 * of small packets), setting this DMA offset to 0 will decrease the number of
199 * PCI-E Bus transfers enough to measurably affect performance.
201 static int rx_dma_offset = 2;
203 #ifdef CONFIG_PCI_IOV
204 /* Configure the number of PCI-E Virtual Function which are to be instantiated
205 * on SR-IOV Capable Physical Functions.
207 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
209 module_param_array(num_vf, uint, NULL, 0644);
210 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
213 /* TX Queue select used to determine what algorithm to use for selecting TX
214 * queue. Select between the kernel provided function (select_queue=0) or user
215 * cxgb_select_queue function (select_queue=1)
217 * Default: select_queue=0
219 static int select_queue;
220 module_param(select_queue, int, 0644);
221 MODULE_PARM_DESC(select_queue,
222 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
224 static struct dentry *cxgb4_debugfs_root;
226 static LIST_HEAD(adapter_list);
227 static DEFINE_MUTEX(uld_mutex);
228 /* Adapter list to be accessed from atomic context */
229 static LIST_HEAD(adap_rcu_list);
230 static DEFINE_SPINLOCK(adap_rcu_lock);
231 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
232 static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
234 static void link_report(struct net_device *dev)
236 if (!netif_carrier_ok(dev))
237 netdev_info(dev, "link down\n");
239 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
242 const struct port_info *p = netdev_priv(dev);
244 switch (p->link_cfg.speed) {
258 pr_info("%s: unsupported speed: %d\n",
259 dev->name, p->link_cfg.speed);
263 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
268 #ifdef CONFIG_CHELSIO_T4_DCB
269 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
270 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
272 struct port_info *pi = netdev_priv(dev);
273 struct adapter *adap = pi->adapter;
274 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
277 /* We use a simple mapping of Port TX Queue Index to DCB
278 * Priority when we're enabling DCB.
280 for (i = 0; i < pi->nqsets; i++, txq++) {
284 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
286 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
287 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
288 value = enable ? i : 0xffffffff;
290 /* Since we can be called while atomic (from "interrupt
291 * level") we need to issue the Set Parameters Commannd
292 * without sleeping (timeout < 0).
294 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
296 -FW_CMD_MAX_TIMEOUT);
299 dev_err(adap->pdev_dev,
300 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
301 enable ? "set" : "unset", pi->port_id, i, -err);
303 txq->dcb_prio = value;
306 #endif /* CONFIG_CHELSIO_T4_DCB */
308 int cxgb4_dcb_enabled(const struct net_device *dev)
310 #ifdef CONFIG_CHELSIO_T4_DCB
311 struct port_info *pi = netdev_priv(dev);
313 if (!pi->dcb.enabled)
316 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
317 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
322 EXPORT_SYMBOL(cxgb4_dcb_enabled);
324 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
326 struct net_device *dev = adapter->port[port_id];
328 /* Skip changes from disabled ports. */
329 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
331 netif_carrier_on(dev);
333 #ifdef CONFIG_CHELSIO_T4_DCB
334 if (cxgb4_dcb_enabled(dev)) {
335 cxgb4_dcb_state_init(dev);
336 dcb_tx_queue_prio_enable(dev, false);
338 #endif /* CONFIG_CHELSIO_T4_DCB */
339 netif_carrier_off(dev);
346 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
348 static const char *mod_str[] = {
349 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
352 const struct net_device *dev = adap->port[port_id];
353 const struct port_info *pi = netdev_priv(dev);
355 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
356 netdev_info(dev, "port module unplugged\n");
357 else if (pi->mod_type < ARRAY_SIZE(mod_str))
358 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
359 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
360 netdev_info(dev, "%s: unsupported port module inserted\n",
362 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
363 netdev_info(dev, "%s: unknown port module inserted\n",
365 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
366 netdev_info(dev, "%s: transceiver module error\n", dev->name);
368 netdev_info(dev, "%s: unknown module type %d inserted\n",
369 dev->name, pi->mod_type);
372 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
373 module_param(dbfifo_int_thresh, int, 0644);
374 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
377 * usecs to sleep while draining the dbfifo
379 static int dbfifo_drain_delay = 1000;
380 module_param(dbfifo_drain_delay, int, 0644);
381 MODULE_PARM_DESC(dbfifo_drain_delay,
382 "usecs to sleep while draining the dbfifo");
384 static inline int cxgb4_set_addr_hash(struct port_info *pi)
386 struct adapter *adap = pi->adapter;
389 struct hash_mac_addr *entry;
391 /* Calculate the hash vector for the updated list and program it */
392 list_for_each_entry(entry, &adap->mac_hlist, list) {
393 ucast |= is_unicast_ether_addr(entry->addr);
394 vec |= (1ULL << hash_mac_addr(entry->addr));
396 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
400 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
402 struct port_info *pi = netdev_priv(netdev);
403 struct adapter *adap = pi->adapter;
408 bool ucast = is_unicast_ether_addr(mac_addr);
409 const u8 *maclist[1] = {mac_addr};
410 struct hash_mac_addr *new_entry;
412 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
413 NULL, ucast ? &uhash : &mhash, false);
416 /* if hash != 0, then add the addr to hash addr list
417 * so on the end we will calculate the hash for the
418 * list and program it
420 if (uhash || mhash) {
421 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
424 ether_addr_copy(new_entry->addr, mac_addr);
425 list_add_tail(&new_entry->list, &adap->mac_hlist);
426 ret = cxgb4_set_addr_hash(pi);
429 return ret < 0 ? ret : 0;
432 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
434 struct port_info *pi = netdev_priv(netdev);
435 struct adapter *adap = pi->adapter;
437 const u8 *maclist[1] = {mac_addr};
438 struct hash_mac_addr *entry, *tmp;
440 /* If the MAC address to be removed is in the hash addr
441 * list, delete it from the list and update hash vector
443 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
444 if (ether_addr_equal(entry->addr, mac_addr)) {
445 list_del(&entry->list);
447 return cxgb4_set_addr_hash(pi);
451 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
452 return ret < 0 ? -EINVAL : 0;
456 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
457 * If @mtu is -1 it is left unchanged.
459 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
461 struct port_info *pi = netdev_priv(dev);
462 struct adapter *adapter = pi->adapter;
464 if (!(dev->flags & IFF_PROMISC)) {
465 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
466 if (!(dev->flags & IFF_ALLMULTI))
467 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
470 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
471 (dev->flags & IFF_PROMISC) ? 1 : 0,
472 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
477 * link_start - enable a port
478 * @dev: the port to enable
480 * Performs the MAC and PHY actions needed to enable a port.
482 static int link_start(struct net_device *dev)
485 struct port_info *pi = netdev_priv(dev);
486 unsigned int mb = pi->adapter->pf;
489 * We do not set address filters and promiscuity here, the stack does
490 * that step explicitly.
492 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
493 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
495 ret = t4_change_mac(pi->adapter, mb, pi->viid,
496 pi->xact_addr_filt, dev->dev_addr, true,
499 pi->xact_addr_filt = ret;
504 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
508 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
509 true, CXGB4_DCB_ENABLED);
516 #ifdef CONFIG_CHELSIO_T4_DCB
517 /* Handle a Data Center Bridging update message from the firmware. */
518 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
520 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
521 struct net_device *dev = adap->port[adap->chan_map[port]];
522 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
525 cxgb4_dcb_handle_fw_update(adap, pcmd);
526 new_dcb_enabled = cxgb4_dcb_enabled(dev);
528 /* If the DCB has become enabled or disabled on the port then we're
529 * going to need to set up/tear down DCB Priority parameters for the
530 * TX Queues associated with the port.
532 if (new_dcb_enabled != old_dcb_enabled)
533 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
535 #endif /* CONFIG_CHELSIO_T4_DCB */
537 /* Clear a filter and release any of its resources that we own. This also
538 * clears the filter's "pending" status.
540 static void clear_filter(struct adapter *adap, struct filter_entry *f)
542 /* If the new or old filter have loopback rewriteing rules then we'll
543 * need to free any existing Layer Two Table (L2T) entries of the old
544 * filter rule. The firmware will handle freeing up any Source MAC
545 * Table (SMT) entries used for rewriting Source MAC Addresses in
549 cxgb4_l2t_release(f->l2t);
551 /* The zeroing of the filter rule below clears the filter valid,
552 * pending, locked flags, l2t pointer, etc. so it's all we need for
555 memset(f, 0, sizeof(*f));
558 /* Handle a filter write/deletion reply.
560 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
562 unsigned int idx = GET_TID(rpl);
563 unsigned int nidx = idx - adap->tids.ftid_base;
565 struct filter_entry *f;
567 if (idx >= adap->tids.ftid_base && nidx <
568 (adap->tids.nftids + adap->tids.nsftids)) {
570 ret = TCB_COOKIE_G(rpl->cookie);
571 f = &adap->tids.ftid_tab[idx];
573 if (ret == FW_FILTER_WR_FLT_DELETED) {
574 /* Clear the filter when we get confirmation from the
575 * hardware that the filter has been deleted.
577 clear_filter(adap, f);
578 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
579 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
581 clear_filter(adap, f);
582 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
583 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
584 f->pending = 0; /* asynchronous setup completed */
587 /* Something went wrong. Issue a warning about the
588 * problem and clear everything out.
590 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
592 clear_filter(adap, f);
597 /* Response queue handler for the FW event queue.
599 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
600 const struct pkt_gl *gl)
602 u8 opcode = ((const struct rss_header *)rsp)->opcode;
604 rsp++; /* skip RSS header */
606 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
608 if (unlikely(opcode == CPL_FW4_MSG &&
609 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
611 opcode = ((const struct rss_header *)rsp)->opcode;
613 if (opcode != CPL_SGE_EGR_UPDATE) {
614 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
620 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
621 const struct cpl_sge_egr_update *p = (void *)rsp;
622 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
625 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
627 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
628 struct sge_eth_txq *eq;
630 eq = container_of(txq, struct sge_eth_txq, q);
631 netif_tx_wake_queue(eq->txq);
633 struct sge_ofld_txq *oq;
635 oq = container_of(txq, struct sge_ofld_txq, q);
636 tasklet_schedule(&oq->qresume_tsk);
638 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
639 const struct cpl_fw6_msg *p = (void *)rsp;
641 #ifdef CONFIG_CHELSIO_T4_DCB
642 const struct fw_port_cmd *pcmd = (const void *)p->data;
643 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
644 unsigned int action =
645 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
647 if (cmd == FW_PORT_CMD &&
648 action == FW_PORT_ACTION_GET_PORT_INFO) {
649 int port = FW_PORT_CMD_PORTID_G(
650 be32_to_cpu(pcmd->op_to_portid));
651 struct net_device *dev =
652 q->adap->port[q->adap->chan_map[port]];
653 int state_input = ((pcmd->u.info.dcbxdis_pkd &
654 FW_PORT_CMD_DCBXDIS_F)
655 ? CXGB4_DCB_INPUT_FW_DISABLED
656 : CXGB4_DCB_INPUT_FW_ENABLED);
658 cxgb4_dcb_state_fsm(dev, state_input);
661 if (cmd == FW_PORT_CMD &&
662 action == FW_PORT_ACTION_L2_DCB_CFG)
663 dcb_rpl(q->adap, pcmd);
667 t4_handle_fw_rpl(q->adap, p->data);
668 } else if (opcode == CPL_L2T_WRITE_RPL) {
669 const struct cpl_l2t_write_rpl *p = (void *)rsp;
671 do_l2t_write_rpl(q->adap, p);
672 } else if (opcode == CPL_SET_TCB_RPL) {
673 const struct cpl_set_tcb_rpl *p = (void *)rsp;
675 filter_rpl(q->adap, p);
677 dev_err(q->adap->pdev_dev,
678 "unexpected CPL %#x on FW event queue\n", opcode);
683 /* Flush the aggregated lro sessions */
684 static void uldrx_flush_handler(struct sge_rspq *q)
686 if (ulds[q->uld].lro_flush)
687 ulds[q->uld].lro_flush(&q->lro_mgr);
691 * uldrx_handler - response queue handler for ULD queues
692 * @q: the response queue that received the packet
693 * @rsp: the response queue descriptor holding the offload message
694 * @gl: the gather list of packet fragments
696 * Deliver an ingress offload packet to a ULD. All processing is done by
697 * the ULD, we just maintain statistics.
699 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
700 const struct pkt_gl *gl)
702 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
705 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
707 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
708 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
711 if (q->flush_handler)
712 ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
713 rsp, gl, &q->lro_mgr,
716 ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
726 else if (gl == CXGB4_MSG_AN)
733 static void disable_msi(struct adapter *adapter)
735 if (adapter->flags & USING_MSIX) {
736 pci_disable_msix(adapter->pdev);
737 adapter->flags &= ~USING_MSIX;
738 } else if (adapter->flags & USING_MSI) {
739 pci_disable_msi(adapter->pdev);
740 adapter->flags &= ~USING_MSI;
745 * Interrupt handler for non-data events used with MSI-X.
747 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
749 struct adapter *adap = cookie;
750 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
754 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
756 if (adap->flags & MASTER_PF)
757 t4_slow_intr_handler(adap);
762 * Name the MSI-X interrupts.
764 static void name_msix_vecs(struct adapter *adap)
766 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
768 /* non-data interrupts */
769 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
772 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
773 adap->port[0]->name);
775 /* Ethernet queues */
776 for_each_port(adap, j) {
777 struct net_device *d = adap->port[j];
778 const struct port_info *pi = netdev_priv(d);
780 for (i = 0; i < pi->nqsets; i++, msi_idx++)
781 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
786 for_each_iscsirxq(&adap->sge, i)
787 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
788 adap->port[0]->name, i);
790 for_each_iscsitrxq(&adap->sge, i)
791 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
792 adap->port[0]->name, i);
794 for_each_rdmarxq(&adap->sge, i)
795 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
796 adap->port[0]->name, i);
798 for_each_rdmaciq(&adap->sge, i)
799 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
800 adap->port[0]->name, i);
803 static int request_msix_queue_irqs(struct adapter *adap)
805 struct sge *s = &adap->sge;
806 int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
810 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
811 adap->msix_info[1].desc, &s->fw_evtq);
815 for_each_ethrxq(s, ethqidx) {
816 err = request_irq(adap->msix_info[msi_index].vec,
818 adap->msix_info[msi_index].desc,
819 &s->ethrxq[ethqidx].rspq);
824 for_each_iscsirxq(s, iscsiqidx) {
825 err = request_irq(adap->msix_info[msi_index].vec,
827 adap->msix_info[msi_index].desc,
828 &s->iscsirxq[iscsiqidx].rspq);
833 for_each_iscsitrxq(s, iscsitqidx) {
834 err = request_irq(adap->msix_info[msi_index].vec,
836 adap->msix_info[msi_index].desc,
837 &s->iscsitrxq[iscsitqidx].rspq);
842 for_each_rdmarxq(s, rdmaqidx) {
843 err = request_irq(adap->msix_info[msi_index].vec,
845 adap->msix_info[msi_index].desc,
846 &s->rdmarxq[rdmaqidx].rspq);
851 for_each_rdmaciq(s, rdmaciqqidx) {
852 err = request_irq(adap->msix_info[msi_index].vec,
854 adap->msix_info[msi_index].desc,
855 &s->rdmaciq[rdmaciqqidx].rspq);
863 while (--rdmaciqqidx >= 0)
864 free_irq(adap->msix_info[--msi_index].vec,
865 &s->rdmaciq[rdmaciqqidx].rspq);
866 while (--rdmaqidx >= 0)
867 free_irq(adap->msix_info[--msi_index].vec,
868 &s->rdmarxq[rdmaqidx].rspq);
869 while (--iscsitqidx >= 0)
870 free_irq(adap->msix_info[--msi_index].vec,
871 &s->iscsitrxq[iscsitqidx].rspq);
872 while (--iscsiqidx >= 0)
873 free_irq(adap->msix_info[--msi_index].vec,
874 &s->iscsirxq[iscsiqidx].rspq);
875 while (--ethqidx >= 0)
876 free_irq(adap->msix_info[--msi_index].vec,
877 &s->ethrxq[ethqidx].rspq);
878 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
882 static void free_msix_queue_irqs(struct adapter *adap)
884 int i, msi_index = 2;
885 struct sge *s = &adap->sge;
887 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
888 for_each_ethrxq(s, i)
889 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
890 for_each_iscsirxq(s, i)
891 free_irq(adap->msix_info[msi_index++].vec,
892 &s->iscsirxq[i].rspq);
893 for_each_iscsitrxq(s, i)
894 free_irq(adap->msix_info[msi_index++].vec,
895 &s->iscsitrxq[i].rspq);
896 for_each_rdmarxq(s, i)
897 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
898 for_each_rdmaciq(s, i)
899 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
903 * cxgb4_write_rss - write the RSS table for a given port
905 * @queues: array of queue indices for RSS
907 * Sets up the portion of the HW RSS table for the port's VI to distribute
908 * packets to the Rx queues in @queues.
909 * Should never be called before setting up sge eth rx queues
911 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
915 struct adapter *adapter = pi->adapter;
916 const struct sge_eth_rxq *rxq;
918 rxq = &adapter->sge.ethrxq[pi->first_qset];
919 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
923 /* map the queue indices to queue ids */
924 for (i = 0; i < pi->rss_size; i++, queues++)
925 rss[i] = rxq[*queues].rspq.abs_id;
927 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
928 pi->rss_size, rss, pi->rss_size);
929 /* If Tunnel All Lookup isn't specified in the global RSS
930 * Configuration, then we need to specify a default Ingress
931 * Queue for any ingress packets which aren't hashed. We'll
932 * use our first ingress queue ...
935 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
936 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
937 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
938 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
939 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
940 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
947 * setup_rss - configure RSS
950 * Sets up RSS for each port.
952 static int setup_rss(struct adapter *adap)
956 for_each_port(adap, i) {
957 const struct port_info *pi = adap2pinfo(adap, i);
959 /* Fill default values with equal distribution */
960 for (j = 0; j < pi->rss_size; j++)
961 pi->rss[j] = j % pi->nqsets;
963 err = cxgb4_write_rss(pi, pi->rss);
971 * Return the channel of the ingress queue with the given qid.
973 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
975 qid -= p->ingr_start;
976 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
980 * Wait until all NAPI handlers are descheduled.
982 static void quiesce_rx(struct adapter *adap)
986 for (i = 0; i < adap->sge.ingr_sz; i++) {
987 struct sge_rspq *q = adap->sge.ingr_map[i];
989 if (q && q->handler) {
990 napi_disable(&q->napi);
992 while (!cxgb_poll_lock_napi(q))
1000 /* Disable interrupt and napi handler */
1001 static void disable_interrupts(struct adapter *adap)
1003 if (adap->flags & FULL_INIT_DONE) {
1004 t4_intr_disable(adap);
1005 if (adap->flags & USING_MSIX) {
1006 free_msix_queue_irqs(adap);
1007 free_irq(adap->msix_info[0].vec, adap);
1009 free_irq(adap->pdev->irq, adap);
1016 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1018 static void enable_rx(struct adapter *adap)
1022 for (i = 0; i < adap->sge.ingr_sz; i++) {
1023 struct sge_rspq *q = adap->sge.ingr_map[i];
1028 cxgb_busy_poll_init_lock(q);
1029 napi_enable(&q->napi);
1031 /* 0-increment GTS to start the timer and enable interrupts */
1032 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
1033 SEINTARM_V(q->intr_params) |
1034 INGRESSQID_V(q->cntxt_id));
1038 static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
1039 unsigned int nq, unsigned int per_chan, int msi_idx,
1044 for (i = 0; i < nq; i++, q++) {
1047 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
1048 adap->port[i / per_chan],
1049 msi_idx, q->fl.size ? &q->fl : NULL,
1051 lro ? uldrx_flush_handler : NULL,
1055 memset(&q->stats, 0, sizeof(q->stats));
1057 ids[i] = q->rspq.abs_id;
1063 * setup_sge_queues - configure SGE Tx/Rx/response queues
1064 * @adap: the adapter
1066 * Determines how many sets of SGE queues to use and initializes them.
1067 * We support multiple queue sets per port if we have MSI-X, otherwise
1068 * just one queue set per port.
1070 static int setup_sge_queues(struct adapter *adap)
1072 int err, msi_idx, i, j;
1073 struct sge *s = &adap->sge;
1075 bitmap_zero(s->starving_fl, s->egr_sz);
1076 bitmap_zero(s->txq_maperr, s->egr_sz);
1078 if (adap->flags & USING_MSIX)
1079 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1081 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1082 NULL, NULL, NULL, -1);
1085 msi_idx = -((int)s->intrq.abs_id + 1);
1088 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1089 * don't forget to update the following which need to be
1090 * synchronized to and changes here.
1092 * 1. The calculations of MAX_INGQ in cxgb4.h.
1094 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1095 * to accommodate any new/deleted Ingress Queues
1096 * which need MSI-X Vectors.
1098 * 3. Update sge_qinfo_show() to include information on the
1099 * new/deleted queues.
1101 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1102 msi_idx, NULL, fwevtq_handler, NULL, -1);
1104 freeout: t4_free_sge_resources(adap);
1108 for_each_port(adap, i) {
1109 struct net_device *dev = adap->port[i];
1110 struct port_info *pi = netdev_priv(dev);
1111 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1112 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1114 for (j = 0; j < pi->nqsets; j++, q++) {
1117 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1121 t4_get_mps_bg_map(adap,
1126 memset(&q->stats, 0, sizeof(q->stats));
1128 for (j = 0; j < pi->nqsets; j++, t++) {
1129 err = t4_sge_alloc_eth_txq(adap, t, dev,
1130 netdev_get_tx_queue(dev, j),
1131 s->fw_evtq.cntxt_id);
1137 j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
1138 for_each_iscsirxq(s, i) {
1139 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
1141 s->fw_evtq.cntxt_id);
1146 #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1147 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
1154 ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
1155 ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
1156 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
1157 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
1158 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
1160 #undef ALLOC_OFLD_RXQS
1162 for_each_port(adap, i) {
1164 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1165 * have RDMA queues, and that's the right value.
1167 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1168 s->fw_evtq.cntxt_id,
1169 s->rdmarxq[i].rspq.cntxt_id);
1174 t4_write_reg(adap, is_t4(adap->params.chip) ?
1175 MPS_TRC_RSS_CONTROL_A :
1176 MPS_T5_TRC_RSS_CONTROL_A,
1177 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1178 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1183 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1184 * The allocated memory is cleared.
1186 void *t4_alloc_mem(size_t size)
1188 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1196 * Free memory allocated through alloc_mem().
1198 void t4_free_mem(void *addr)
1203 /* Send a Work Request to write the filter at a specified index. We construct
1204 * a Firmware Filter Work Request to have the work done and put the indicated
1205 * filter into "pending" mode which will prevent any further actions against
1206 * it till we get a reply from the firmware on the completion status of the
1209 static int set_filter_wr(struct adapter *adapter, int fidx)
1211 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1212 struct sk_buff *skb;
1213 struct fw_filter_wr *fwr;
1216 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1220 /* If the new filter requires loopback Destination MAC and/or VLAN
1221 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1224 if (f->fs.newdmac || f->fs.newvlan) {
1225 /* allocate L2T entry for new filter */
1226 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1227 f->fs.eport, f->fs.dmac);
1228 if (f->l2t == NULL) {
1234 ftid = adapter->tids.ftid_base + fidx;
1236 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1237 memset(fwr, 0, sizeof(*fwr));
1239 /* It would be nice to put most of the following in t4_hw.c but most
1240 * of the work is translating the cxgbtool ch_filter_specification
1241 * into the Work Request and the definition of that structure is
1242 * currently in cxgbtool.h which isn't appropriate to pull into the
1243 * common code. We may eventually try to come up with a more neutral
1244 * filter specification structure but for now it's easiest to simply
1245 * put this fairly direct code in line ...
1247 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1248 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1250 htonl(FW_FILTER_WR_TID_V(ftid) |
1251 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1252 FW_FILTER_WR_NOREPLY_V(0) |
1253 FW_FILTER_WR_IQ_V(f->fs.iq));
1254 fwr->del_filter_to_l2tix =
1255 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1256 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1257 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1258 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1259 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1260 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1261 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1262 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1263 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1264 f->fs.newvlan == VLAN_REWRITE) |
1265 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1266 f->fs.newvlan == VLAN_REWRITE) |
1267 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1268 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1269 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1270 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1271 fwr->ethtype = htons(f->fs.val.ethtype);
1272 fwr->ethtypem = htons(f->fs.mask.ethtype);
1273 fwr->frag_to_ovlan_vldm =
1274 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1275 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1276 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1277 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1278 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1279 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1281 fwr->rx_chan_rx_rpl_iq =
1282 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1283 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1284 fwr->maci_to_matchtypem =
1285 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1286 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1287 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1288 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1289 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1290 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1291 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1292 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1293 fwr->ptcl = f->fs.val.proto;
1294 fwr->ptclm = f->fs.mask.proto;
1295 fwr->ttyp = f->fs.val.tos;
1296 fwr->ttypm = f->fs.mask.tos;
1297 fwr->ivlan = htons(f->fs.val.ivlan);
1298 fwr->ivlanm = htons(f->fs.mask.ivlan);
1299 fwr->ovlan = htons(f->fs.val.ovlan);
1300 fwr->ovlanm = htons(f->fs.mask.ovlan);
1301 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1302 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1303 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1304 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1305 fwr->lp = htons(f->fs.val.lport);
1306 fwr->lpm = htons(f->fs.mask.lport);
1307 fwr->fp = htons(f->fs.val.fport);
1308 fwr->fpm = htons(f->fs.mask.fport);
1310 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1312 /* Mark the filter as "pending" and ship off the Filter Work Request.
1313 * When we get the Work Request Reply we'll clear the pending status.
1316 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1317 t4_ofld_send(adapter, skb);
1321 /* Delete the filter at a specified index.
1323 static int del_filter_wr(struct adapter *adapter, int fidx)
1325 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1326 struct sk_buff *skb;
1327 struct fw_filter_wr *fwr;
1328 unsigned int len, ftid;
1331 ftid = adapter->tids.ftid_base + fidx;
1333 skb = alloc_skb(len, GFP_KERNEL);
1337 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1338 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1340 /* Mark the filter as "pending" and ship off the Filter Work Request.
1341 * When we get the Work Request Reply we'll clear the pending status.
1344 t4_mgmt_tx(adapter, skb);
1348 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1349 void *accel_priv, select_queue_fallback_t fallback)
1353 #ifdef CONFIG_CHELSIO_T4_DCB
1354 /* If a Data Center Bridging has been successfully negotiated on this
1355 * link then we'll use the skb's priority to map it to a TX Queue.
1356 * The skb's priority is determined via the VLAN Tag Priority Code
1359 if (cxgb4_dcb_enabled(dev)) {
1363 err = vlan_get_tag(skb, &vlan_tci);
1364 if (unlikely(err)) {
1365 if (net_ratelimit())
1367 "TX Packet without VLAN Tag on DCB Link\n");
1370 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1371 #ifdef CONFIG_CHELSIO_T4_FCOE
1372 if (skb->protocol == htons(ETH_P_FCOE))
1373 txq = skb->priority & 0x7;
1374 #endif /* CONFIG_CHELSIO_T4_FCOE */
1378 #endif /* CONFIG_CHELSIO_T4_DCB */
1381 txq = (skb_rx_queue_recorded(skb)
1382 ? skb_get_rx_queue(skb)
1383 : smp_processor_id());
1385 while (unlikely(txq >= dev->real_num_tx_queues))
1386 txq -= dev->real_num_tx_queues;
1391 return fallback(dev, skb) % dev->real_num_tx_queues;
1394 static int closest_timer(const struct sge *s, int time)
1396 int i, delta, match = 0, min_delta = INT_MAX;
1398 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1399 delta = time - s->timer_val[i];
1402 if (delta < min_delta) {
1410 static int closest_thres(const struct sge *s, int thres)
1412 int i, delta, match = 0, min_delta = INT_MAX;
1414 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1415 delta = thres - s->counter_val[i];
1418 if (delta < min_delta) {
1427 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1429 * @us: the hold-off time in us, or 0 to disable timer
1430 * @cnt: the hold-off packet count, or 0 to disable counter
1432 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1433 * one of the two needs to be enabled for the queue to generate interrupts.
1435 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1436 unsigned int us, unsigned int cnt)
1438 struct adapter *adap = q->adap;
1440 if ((us | cnt) == 0)
1447 new_idx = closest_thres(&adap->sge, cnt);
1448 if (q->desc && q->pktcnt_idx != new_idx) {
1449 /* the queue has already been created, update it */
1450 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1451 FW_PARAMS_PARAM_X_V(
1452 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1453 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1454 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1459 q->pktcnt_idx = new_idx;
1462 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1463 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1467 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1469 const struct port_info *pi = netdev_priv(dev);
1470 netdev_features_t changed = dev->features ^ features;
1473 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1476 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1478 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1480 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1484 static int setup_debugfs(struct adapter *adap)
1486 if (IS_ERR_OR_NULL(adap->debugfs_root))
1489 #ifdef CONFIG_DEBUG_FS
1490 t4_setup_debugfs(adap);
1496 * upper-layer driver support
1500 * Allocate an active-open TID and set it to the supplied value.
1502 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1506 spin_lock_bh(&t->atid_lock);
1508 union aopen_entry *p = t->afree;
1510 atid = (p - t->atid_tab) + t->atid_base;
1515 spin_unlock_bh(&t->atid_lock);
1518 EXPORT_SYMBOL(cxgb4_alloc_atid);
1521 * Release an active-open TID.
1523 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1525 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1527 spin_lock_bh(&t->atid_lock);
1531 spin_unlock_bh(&t->atid_lock);
1533 EXPORT_SYMBOL(cxgb4_free_atid);
1536 * Allocate a server TID and set it to the supplied value.
1538 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1542 spin_lock_bh(&t->stid_lock);
1543 if (family == PF_INET) {
1544 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1545 if (stid < t->nstids)
1546 __set_bit(stid, t->stid_bmap);
1550 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1555 t->stid_tab[stid].data = data;
1556 stid += t->stid_base;
1557 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1558 * This is equivalent to 4 TIDs. With CLIP enabled it
1561 if (family == PF_INET)
1564 t->stids_in_use += 2;
1566 spin_unlock_bh(&t->stid_lock);
1569 EXPORT_SYMBOL(cxgb4_alloc_stid);
1571 /* Allocate a server filter TID and set it to the supplied value.
1573 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1577 spin_lock_bh(&t->stid_lock);
1578 if (family == PF_INET) {
1579 stid = find_next_zero_bit(t->stid_bmap,
1580 t->nstids + t->nsftids, t->nstids);
1581 if (stid < (t->nstids + t->nsftids))
1582 __set_bit(stid, t->stid_bmap);
1589 t->stid_tab[stid].data = data;
1591 stid += t->sftid_base;
1594 spin_unlock_bh(&t->stid_lock);
1597 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1599 /* Release a server TID.
1601 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1603 /* Is it a server filter TID? */
1604 if (t->nsftids && (stid >= t->sftid_base)) {
1605 stid -= t->sftid_base;
1608 stid -= t->stid_base;
1611 spin_lock_bh(&t->stid_lock);
1612 if (family == PF_INET)
1613 __clear_bit(stid, t->stid_bmap);
1615 bitmap_release_region(t->stid_bmap, stid, 1);
1616 t->stid_tab[stid].data = NULL;
1617 if (stid < t->nstids) {
1618 if (family == PF_INET)
1621 t->stids_in_use -= 2;
1625 spin_unlock_bh(&t->stid_lock);
1627 EXPORT_SYMBOL(cxgb4_free_stid);
1630 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1632 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1635 struct cpl_tid_release *req;
1637 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1638 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1639 INIT_TP_WR(req, tid);
1640 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1644 * Queue a TID release request and if necessary schedule a work queue to
1647 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1650 void **p = &t->tid_tab[tid];
1651 struct adapter *adap = container_of(t, struct adapter, tids);
1653 spin_lock_bh(&adap->tid_release_lock);
1654 *p = adap->tid_release_head;
1655 /* Low 2 bits encode the Tx channel number */
1656 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1657 if (!adap->tid_release_task_busy) {
1658 adap->tid_release_task_busy = true;
1659 queue_work(adap->workq, &adap->tid_release_task);
1661 spin_unlock_bh(&adap->tid_release_lock);
1665 * Process the list of pending TID release requests.
1667 static void process_tid_release_list(struct work_struct *work)
1669 struct sk_buff *skb;
1670 struct adapter *adap;
1672 adap = container_of(work, struct adapter, tid_release_task);
1674 spin_lock_bh(&adap->tid_release_lock);
1675 while (adap->tid_release_head) {
1676 void **p = adap->tid_release_head;
1677 unsigned int chan = (uintptr_t)p & 3;
1678 p = (void *)p - chan;
1680 adap->tid_release_head = *p;
1682 spin_unlock_bh(&adap->tid_release_lock);
1684 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1686 schedule_timeout_uninterruptible(1);
1688 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1689 t4_ofld_send(adap, skb);
1690 spin_lock_bh(&adap->tid_release_lock);
1692 adap->tid_release_task_busy = false;
1693 spin_unlock_bh(&adap->tid_release_lock);
1697 * Release a TID and inform HW. If we are unable to allocate the release
1698 * message we defer to a work queue.
1700 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1702 struct sk_buff *skb;
1703 struct adapter *adap = container_of(t, struct adapter, tids);
1705 WARN_ON(tid >= t->ntids);
1707 if (t->tid_tab[tid]) {
1708 t->tid_tab[tid] = NULL;
1709 if (t->hash_base && (tid >= t->hash_base))
1710 atomic_dec(&t->hash_tids_in_use);
1712 atomic_dec(&t->tids_in_use);
1715 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1717 mk_tid_release(skb, chan, tid);
1718 t4_ofld_send(adap, skb);
1720 cxgb4_queue_tid_release(t, chan, tid);
1722 EXPORT_SYMBOL(cxgb4_remove_tid);
1725 * Allocate and initialize the TID tables. Returns 0 on success.
1727 static int tid_init(struct tid_info *t)
1730 unsigned int stid_bmap_size;
1731 unsigned int natids = t->natids;
1732 struct adapter *adap = container_of(t, struct adapter, tids);
1734 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1735 size = t->ntids * sizeof(*t->tid_tab) +
1736 natids * sizeof(*t->atid_tab) +
1737 t->nstids * sizeof(*t->stid_tab) +
1738 t->nsftids * sizeof(*t->stid_tab) +
1739 stid_bmap_size * sizeof(long) +
1740 t->nftids * sizeof(*t->ftid_tab) +
1741 t->nsftids * sizeof(*t->ftid_tab);
1743 t->tid_tab = t4_alloc_mem(size);
1747 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1748 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1749 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1750 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1751 spin_lock_init(&t->stid_lock);
1752 spin_lock_init(&t->atid_lock);
1754 t->stids_in_use = 0;
1755 t->sftids_in_use = 0;
1757 t->atids_in_use = 0;
1758 atomic_set(&t->tids_in_use, 0);
1759 atomic_set(&t->hash_tids_in_use, 0);
1761 /* Setup the free list for atid_tab and clear the stid bitmap. */
1764 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1765 t->afree = t->atid_tab;
1767 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1768 /* Reserve stid 0 for T4/T5 adapters */
1769 if (!t->stid_base &&
1770 (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
1771 __set_bit(0, t->stid_bmap);
1777 * cxgb4_create_server - create an IP server
1779 * @stid: the server TID
1780 * @sip: local IP address to bind server to
1781 * @sport: the server's TCP port
1782 * @queue: queue to direct messages from this server to
1784 * Create an IP server for the given port and address.
1785 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1787 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1788 __be32 sip, __be16 sport, __be16 vlan,
1792 struct sk_buff *skb;
1793 struct adapter *adap;
1794 struct cpl_pass_open_req *req;
1797 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1801 adap = netdev2adap(dev);
1802 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1804 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1805 req->local_port = sport;
1806 req->peer_port = htons(0);
1807 req->local_ip = sip;
1808 req->peer_ip = htonl(0);
1809 chan = rxq_to_chan(&adap->sge, queue);
1810 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1811 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1812 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1813 ret = t4_mgmt_tx(adap, skb);
1814 return net_xmit_eval(ret);
1816 EXPORT_SYMBOL(cxgb4_create_server);
1818 /* cxgb4_create_server6 - create an IPv6 server
1820 * @stid: the server TID
1821 * @sip: local IPv6 address to bind server to
1822 * @sport: the server's TCP port
1823 * @queue: queue to direct messages from this server to
1825 * Create an IPv6 server for the given port and address.
1826 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1828 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1829 const struct in6_addr *sip, __be16 sport,
1833 struct sk_buff *skb;
1834 struct adapter *adap;
1835 struct cpl_pass_open_req6 *req;
1838 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1842 adap = netdev2adap(dev);
1843 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1845 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1846 req->local_port = sport;
1847 req->peer_port = htons(0);
1848 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1849 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1850 req->peer_ip_hi = cpu_to_be64(0);
1851 req->peer_ip_lo = cpu_to_be64(0);
1852 chan = rxq_to_chan(&adap->sge, queue);
1853 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1854 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1855 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1856 ret = t4_mgmt_tx(adap, skb);
1857 return net_xmit_eval(ret);
1859 EXPORT_SYMBOL(cxgb4_create_server6);
1861 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1862 unsigned int queue, bool ipv6)
1864 struct sk_buff *skb;
1865 struct adapter *adap;
1866 struct cpl_close_listsvr_req *req;
1869 adap = netdev2adap(dev);
1871 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1875 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1877 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1878 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1879 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1880 ret = t4_mgmt_tx(adap, skb);
1881 return net_xmit_eval(ret);
1883 EXPORT_SYMBOL(cxgb4_remove_server);
1886 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1887 * @mtus: the HW MTU table
1888 * @mtu: the target MTU
1889 * @idx: index of selected entry in the MTU table
1891 * Returns the index and the value in the HW MTU table that is closest to
1892 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1893 * table, in which case that smallest available value is selected.
1895 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1900 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1906 EXPORT_SYMBOL(cxgb4_best_mtu);
1909 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1910 * @mtus: the HW MTU table
1911 * @header_size: Header Size
1912 * @data_size_max: maximum Data Segment Size
1913 * @data_size_align: desired Data Segment Size Alignment (2^N)
1914 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1916 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1917 * MTU Table based solely on a Maximum MTU parameter, we break that
1918 * parameter up into a Header Size and Maximum Data Segment Size, and
1919 * provide a desired Data Segment Size Alignment. If we find an MTU in
1920 * the Hardware MTU Table which will result in a Data Segment Size with
1921 * the requested alignment _and_ that MTU isn't "too far" from the
1922 * closest MTU, then we'll return that rather than the closest MTU.
1924 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1925 unsigned short header_size,
1926 unsigned short data_size_max,
1927 unsigned short data_size_align,
1928 unsigned int *mtu_idxp)
1930 unsigned short max_mtu = header_size + data_size_max;
1931 unsigned short data_size_align_mask = data_size_align - 1;
1932 int mtu_idx, aligned_mtu_idx;
1934 /* Scan the MTU Table till we find an MTU which is larger than our
1935 * Maximum MTU or we reach the end of the table. Along the way,
1936 * record the last MTU found, if any, which will result in a Data
1937 * Segment Length matching the requested alignment.
1939 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1940 unsigned short data_size = mtus[mtu_idx] - header_size;
1942 /* If this MTU minus the Header Size would result in a
1943 * Data Segment Size of the desired alignment, remember it.
1945 if ((data_size & data_size_align_mask) == 0)
1946 aligned_mtu_idx = mtu_idx;
1948 /* If we're not at the end of the Hardware MTU Table and the
1949 * next element is larger than our Maximum MTU, drop out of
1952 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1956 /* If we fell out of the loop because we ran to the end of the table,
1957 * then we just have to use the last [largest] entry.
1959 if (mtu_idx == NMTUS)
1962 /* If we found an MTU which resulted in the requested Data Segment
1963 * Length alignment and that's "not far" from the largest MTU which is
1964 * less than or equal to the maximum MTU, then use that.
1966 if (aligned_mtu_idx >= 0 &&
1967 mtu_idx - aligned_mtu_idx <= 1)
1968 mtu_idx = aligned_mtu_idx;
1970 /* If the caller has passed in an MTU Index pointer, pass the
1971 * MTU Index back. Return the MTU value.
1974 *mtu_idxp = mtu_idx;
1975 return mtus[mtu_idx];
1977 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1980 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1982 * @viid: VI id of the given port
1984 * Return the SMT index for this VI.
1986 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1988 /* In T4/T5, SMT contains 256 SMAC entries organized in
1989 * 128 rows of 2 entries each.
1990 * In T6, SMT contains 256 SMAC entries in 256 rows.
1991 * TODO: The below code needs to be updated when we add support
1994 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1995 return ((viid & 0x7f) << 1);
1997 return (viid & 0x7f);
1999 EXPORT_SYMBOL(cxgb4_tp_smt_idx);
2002 * cxgb4_port_chan - get the HW channel of a port
2003 * @dev: the net device for the port
2005 * Return the HW Tx channel of the given port.
2007 unsigned int cxgb4_port_chan(const struct net_device *dev)
2009 return netdev2pinfo(dev)->tx_chan;
2011 EXPORT_SYMBOL(cxgb4_port_chan);
2013 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2015 struct adapter *adap = netdev2adap(dev);
2016 u32 v1, v2, lp_count, hp_count;
2018 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2019 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2020 if (is_t4(adap->params.chip)) {
2021 lp_count = LP_COUNT_G(v1);
2022 hp_count = HP_COUNT_G(v1);
2024 lp_count = LP_COUNT_T5_G(v1);
2025 hp_count = HP_COUNT_T5_G(v2);
2027 return lpfifo ? lp_count : hp_count;
2029 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2032 * cxgb4_port_viid - get the VI id of a port
2033 * @dev: the net device for the port
2035 * Return the VI id of the given port.
2037 unsigned int cxgb4_port_viid(const struct net_device *dev)
2039 return netdev2pinfo(dev)->viid;
2041 EXPORT_SYMBOL(cxgb4_port_viid);
2044 * cxgb4_port_idx - get the index of a port
2045 * @dev: the net device for the port
2047 * Return the index of the given port.
2049 unsigned int cxgb4_port_idx(const struct net_device *dev)
2051 return netdev2pinfo(dev)->port_id;
2053 EXPORT_SYMBOL(cxgb4_port_idx);
2055 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2056 struct tp_tcp_stats *v6)
2058 struct adapter *adap = pci_get_drvdata(pdev);
2060 spin_lock(&adap->stats_lock);
2061 t4_tp_get_tcp_stats(adap, v4, v6);
2062 spin_unlock(&adap->stats_lock);
2064 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2066 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2067 const unsigned int *pgsz_order)
2069 struct adapter *adap = netdev2adap(dev);
2071 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2072 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2073 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2074 HPZ3_V(pgsz_order[3]));
2076 EXPORT_SYMBOL(cxgb4_iscsi_init);
2078 int cxgb4_flush_eq_cache(struct net_device *dev)
2080 struct adapter *adap = netdev2adap(dev);
2082 return t4_sge_ctxt_flush(adap, adap->mbox);
2084 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2086 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2088 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2092 spin_lock(&adap->win0_lock);
2093 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2094 sizeof(indices), (__be32 *)&indices,
2096 spin_unlock(&adap->win0_lock);
2098 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2099 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2104 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2107 struct adapter *adap = netdev2adap(dev);
2108 u16 hw_pidx, hw_cidx;
2111 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2115 if (pidx != hw_pidx) {
2119 if (pidx >= hw_pidx)
2120 delta = pidx - hw_pidx;
2122 delta = size - hw_pidx + pidx;
2124 if (is_t4(adap->params.chip))
2125 val = PIDX_V(delta);
2127 val = PIDX_T5_V(delta);
2129 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2135 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2137 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2139 struct adapter *adap;
2140 u32 offset, memtype, memaddr;
2141 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2142 u32 edc0_end, edc1_end, mc0_end, mc1_end;
2145 adap = netdev2adap(dev);
2147 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2149 /* Figure out where the offset lands in the Memory Type/Address scheme.
2150 * This code assumes that the memory is laid out starting at offset 0
2151 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2152 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2153 * MC0, and some have both MC0 and MC1.
2155 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2156 edc0_size = EDRAM0_SIZE_G(size) << 20;
2157 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2158 edc1_size = EDRAM1_SIZE_G(size) << 20;
2159 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2160 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2162 edc0_end = edc0_size;
2163 edc1_end = edc0_end + edc1_size;
2164 mc0_end = edc1_end + mc0_size;
2166 if (offset < edc0_end) {
2169 } else if (offset < edc1_end) {
2171 memaddr = offset - edc0_end;
2173 if (offset < mc0_end) {
2175 memaddr = offset - edc1_end;
2176 } else if (is_t5(adap->params.chip)) {
2177 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2178 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2179 mc1_end = mc0_end + mc1_size;
2180 if (offset < mc1_end) {
2182 memaddr = offset - mc0_end;
2184 /* offset beyond the end of any memory */
2188 /* T4/T6 only has a single memory channel */
2193 spin_lock(&adap->win0_lock);
2194 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2195 spin_unlock(&adap->win0_lock);
2199 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2203 EXPORT_SYMBOL(cxgb4_read_tpte);
2205 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2208 struct adapter *adap;
2210 adap = netdev2adap(dev);
2211 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2212 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2214 return ((u64)hi << 32) | (u64)lo;
2216 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2218 int cxgb4_bar2_sge_qregs(struct net_device *dev,
2220 enum cxgb4_bar2_qtype qtype,
2223 unsigned int *pbar2_qid)
2225 return t4_bar2_sge_qregs(netdev2adap(dev),
2227 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2228 ? T4_BAR2_QTYPE_EGRESS
2229 : T4_BAR2_QTYPE_INGRESS),
2234 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2236 static struct pci_driver cxgb4_driver;
2238 static void check_neigh_update(struct neighbour *neigh)
2240 const struct device *parent;
2241 const struct net_device *netdev = neigh->dev;
2243 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2244 netdev = vlan_dev_real_dev(netdev);
2245 parent = netdev->dev.parent;
2246 if (parent && parent->driver == &cxgb4_driver.driver)
2247 t4_l2t_update(dev_get_drvdata(parent), neigh);
2250 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2254 case NETEVENT_NEIGH_UPDATE:
2255 check_neigh_update(data);
2257 case NETEVENT_REDIRECT:
2264 static bool netevent_registered;
2265 static struct notifier_block cxgb4_netevent_nb = {
2266 .notifier_call = netevent_cb
2269 static void drain_db_fifo(struct adapter *adap, int usecs)
2271 u32 v1, v2, lp_count, hp_count;
2274 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2275 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2276 if (is_t4(adap->params.chip)) {
2277 lp_count = LP_COUNT_G(v1);
2278 hp_count = HP_COUNT_G(v1);
2280 lp_count = LP_COUNT_T5_G(v1);
2281 hp_count = HP_COUNT_T5_G(v2);
2284 if (lp_count == 0 && hp_count == 0)
2286 set_current_state(TASK_UNINTERRUPTIBLE);
2287 schedule_timeout(usecs_to_jiffies(usecs));
2291 static void disable_txq_db(struct sge_txq *q)
2293 unsigned long flags;
2295 spin_lock_irqsave(&q->db_lock, flags);
2297 spin_unlock_irqrestore(&q->db_lock, flags);
2300 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2302 spin_lock_irq(&q->db_lock);
2303 if (q->db_pidx_inc) {
2304 /* Make sure that all writes to the TX descriptors
2305 * are committed before we tell HW about them.
2308 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2309 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2313 spin_unlock_irq(&q->db_lock);
2316 static void disable_dbs(struct adapter *adap)
2320 for_each_ethrxq(&adap->sge, i)
2321 disable_txq_db(&adap->sge.ethtxq[i].q);
2322 for_each_iscsirxq(&adap->sge, i)
2323 disable_txq_db(&adap->sge.ofldtxq[i].q);
2324 for_each_port(adap, i)
2325 disable_txq_db(&adap->sge.ctrlq[i].q);
2328 static void enable_dbs(struct adapter *adap)
2332 for_each_ethrxq(&adap->sge, i)
2333 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2334 for_each_iscsirxq(&adap->sge, i)
2335 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
2336 for_each_port(adap, i)
2337 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2340 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2342 if (adap->uld_handle[CXGB4_ULD_RDMA])
2343 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2347 static void process_db_full(struct work_struct *work)
2349 struct adapter *adap;
2351 adap = container_of(work, struct adapter, db_full_task);
2353 drain_db_fifo(adap, dbfifo_drain_delay);
2355 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2356 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2357 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2358 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2359 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2361 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2362 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2365 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2367 u16 hw_pidx, hw_cidx;
2370 spin_lock_irq(&q->db_lock);
2371 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2374 if (q->db_pidx != hw_pidx) {
2378 if (q->db_pidx >= hw_pidx)
2379 delta = q->db_pidx - hw_pidx;
2381 delta = q->size - hw_pidx + q->db_pidx;
2383 if (is_t4(adap->params.chip))
2384 val = PIDX_V(delta);
2386 val = PIDX_T5_V(delta);
2388 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2389 QID_V(q->cntxt_id) | val);
2394 spin_unlock_irq(&q->db_lock);
2396 CH_WARN(adap, "DB drop recovery failed.\n");
2398 static void recover_all_queues(struct adapter *adap)
2402 for_each_ethrxq(&adap->sge, i)
2403 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2404 for_each_iscsirxq(&adap->sge, i)
2405 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2406 for_each_port(adap, i)
2407 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2410 static void process_db_drop(struct work_struct *work)
2412 struct adapter *adap;
2414 adap = container_of(work, struct adapter, db_drop_task);
2416 if (is_t4(adap->params.chip)) {
2417 drain_db_fifo(adap, dbfifo_drain_delay);
2418 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2419 drain_db_fifo(adap, dbfifo_drain_delay);
2420 recover_all_queues(adap);
2421 drain_db_fifo(adap, dbfifo_drain_delay);
2423 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2424 } else if (is_t5(adap->params.chip)) {
2425 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2426 u16 qid = (dropped_db >> 15) & 0x1ffff;
2427 u16 pidx_inc = dropped_db & 0x1fff;
2429 unsigned int bar2_qid;
2432 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2433 0, &bar2_qoffset, &bar2_qid);
2435 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2436 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2438 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2439 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2441 /* Re-enable BAR2 WC */
2442 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2445 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2446 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2449 void t4_db_full(struct adapter *adap)
2451 if (is_t4(adap->params.chip)) {
2453 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2454 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2455 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2456 queue_work(adap->workq, &adap->db_full_task);
2460 void t4_db_dropped(struct adapter *adap)
2462 if (is_t4(adap->params.chip)) {
2464 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2466 queue_work(adap->workq, &adap->db_drop_task);
2469 static void uld_attach(struct adapter *adap, unsigned int uld)
2472 struct cxgb4_lld_info lli;
2475 lli.pdev = adap->pdev;
2477 lli.l2t = adap->l2t;
2478 lli.tids = &adap->tids;
2479 lli.ports = adap->port;
2480 lli.vr = &adap->vres;
2481 lli.mtus = adap->params.mtus;
2482 if (uld == CXGB4_ULD_RDMA) {
2483 lli.rxq_ids = adap->sge.rdma_rxq;
2484 lli.ciq_ids = adap->sge.rdma_ciq;
2485 lli.nrxq = adap->sge.rdmaqs;
2486 lli.nciq = adap->sge.rdmaciqs;
2487 } else if (uld == CXGB4_ULD_ISCSI) {
2488 lli.rxq_ids = adap->sge.iscsi_rxq;
2489 lli.nrxq = adap->sge.iscsiqsets;
2490 } else if (uld == CXGB4_ULD_ISCSIT) {
2491 lli.rxq_ids = adap->sge.iscsit_rxq;
2492 lli.nrxq = adap->sge.niscsitq;
2494 lli.ntxq = adap->sge.iscsiqsets;
2495 lli.nchan = adap->params.nports;
2496 lli.nports = adap->params.nports;
2497 lli.wr_cred = adap->params.ofldq_wr_cred;
2498 lli.adapter_type = adap->params.chip;
2499 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
2500 lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
2501 lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
2502 lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
2503 lli.iscsi_ppm = &adap->iscsi_ppm;
2504 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
2505 lli.udb_density = 1 << adap->params.sge.eq_qpp;
2506 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
2507 lli.filt_mode = adap->params.tp.vlan_pri_map;
2508 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2509 for (i = 0; i < NCHAN; i++)
2511 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
2512 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
2513 lli.fw_vers = adap->params.fw_vers;
2514 lli.dbfifo_int_thresh = dbfifo_int_thresh;
2515 lli.sge_ingpadboundary = adap->sge.fl_align;
2516 lli.sge_egrstatuspagesize = adap->sge.stat_len;
2517 lli.sge_pktshift = adap->sge.pktshift;
2518 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
2519 lli.max_ordird_qp = adap->params.max_ordird_qp;
2520 lli.max_ird_adapter = adap->params.max_ird_adapter;
2521 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
2522 lli.nodeid = dev_to_node(adap->pdev_dev);
2524 handle = ulds[uld].add(&lli);
2525 if (IS_ERR(handle)) {
2526 dev_warn(adap->pdev_dev,
2527 "could not attach to the %s driver, error %ld\n",
2528 uld_str[uld], PTR_ERR(handle));
2532 adap->uld_handle[uld] = handle;
2534 if (!netevent_registered) {
2535 register_netevent_notifier(&cxgb4_netevent_nb);
2536 netevent_registered = true;
2539 if (adap->flags & FULL_INIT_DONE)
2540 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2543 static void attach_ulds(struct adapter *adap)
2547 spin_lock(&adap_rcu_lock);
2548 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
2549 spin_unlock(&adap_rcu_lock);
2551 mutex_lock(&uld_mutex);
2552 list_add_tail(&adap->list_node, &adapter_list);
2553 for (i = 0; i < CXGB4_ULD_MAX; i++)
2555 uld_attach(adap, i);
2556 mutex_unlock(&uld_mutex);
2559 static void detach_ulds(struct adapter *adap)
2563 mutex_lock(&uld_mutex);
2564 list_del(&adap->list_node);
2565 for (i = 0; i < CXGB4_ULD_MAX; i++)
2566 if (adap->uld_handle[i]) {
2567 ulds[i].state_change(adap->uld_handle[i],
2568 CXGB4_STATE_DETACH);
2569 adap->uld_handle[i] = NULL;
2571 if (netevent_registered && list_empty(&adapter_list)) {
2572 unregister_netevent_notifier(&cxgb4_netevent_nb);
2573 netevent_registered = false;
2575 mutex_unlock(&uld_mutex);
2577 spin_lock(&adap_rcu_lock);
2578 list_del_rcu(&adap->rcu_node);
2579 spin_unlock(&adap_rcu_lock);
2582 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2586 mutex_lock(&uld_mutex);
2587 for (i = 0; i < CXGB4_ULD_MAX; i++)
2588 if (adap->uld_handle[i])
2589 ulds[i].state_change(adap->uld_handle[i], new_state);
2590 mutex_unlock(&uld_mutex);
2594 * cxgb4_register_uld - register an upper-layer driver
2595 * @type: the ULD type
2596 * @p: the ULD methods
2598 * Registers an upper-layer driver with this driver and notifies the ULD
2599 * about any presently available devices that support its type. Returns
2600 * %-EBUSY if a ULD of the same type is already registered.
2602 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2605 struct adapter *adap;
2607 if (type >= CXGB4_ULD_MAX)
2609 mutex_lock(&uld_mutex);
2610 if (ulds[type].add) {
2615 list_for_each_entry(adap, &adapter_list, list_node)
2616 uld_attach(adap, type);
2617 out: mutex_unlock(&uld_mutex);
2620 EXPORT_SYMBOL(cxgb4_register_uld);
2623 * cxgb4_unregister_uld - unregister an upper-layer driver
2624 * @type: the ULD type
2626 * Unregisters an existing upper-layer driver.
2628 int cxgb4_unregister_uld(enum cxgb4_uld type)
2630 struct adapter *adap;
2632 if (type >= CXGB4_ULD_MAX)
2634 mutex_lock(&uld_mutex);
2635 list_for_each_entry(adap, &adapter_list, list_node)
2636 adap->uld_handle[type] = NULL;
2637 ulds[type].add = NULL;
2638 mutex_unlock(&uld_mutex);
2641 EXPORT_SYMBOL(cxgb4_unregister_uld);
2643 #if IS_ENABLED(CONFIG_IPV6)
2644 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2645 unsigned long event, void *data)
2647 struct inet6_ifaddr *ifa = data;
2648 struct net_device *event_dev = ifa->idev->dev;
2649 const struct device *parent = NULL;
2650 #if IS_ENABLED(CONFIG_BONDING)
2651 struct adapter *adap;
2653 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2654 event_dev = vlan_dev_real_dev(event_dev);
2655 #if IS_ENABLED(CONFIG_BONDING)
2656 if (event_dev->flags & IFF_MASTER) {
2657 list_for_each_entry(adap, &adapter_list, list_node) {
2660 cxgb4_clip_get(adap->port[0],
2661 (const u32 *)ifa, 1);
2664 cxgb4_clip_release(adap->port[0],
2665 (const u32 *)ifa, 1);
2676 parent = event_dev->dev.parent;
2678 if (parent && parent->driver == &cxgb4_driver.driver) {
2681 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2684 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2693 static bool inet6addr_registered;
2694 static struct notifier_block cxgb4_inet6addr_notifier = {
2695 .notifier_call = cxgb4_inet6addr_handler
2698 static void update_clip(const struct adapter *adap)
2701 struct net_device *dev;
2706 for (i = 0; i < MAX_NPORTS; i++) {
2707 dev = adap->port[i];
2711 ret = cxgb4_update_root_dev_clip(dev);
2718 #endif /* IS_ENABLED(CONFIG_IPV6) */
2721 * cxgb_up - enable the adapter
2722 * @adap: adapter being enabled
2724 * Called when the first port is enabled, this function performs the
2725 * actions necessary to make an adapter operational, such as completing
2726 * the initialization of HW modules, and enabling interrupts.
2728 * Must be called with the rtnl lock held.
2730 static int cxgb_up(struct adapter *adap)
2734 err = setup_sge_queues(adap);
2737 err = setup_rss(adap);
2741 if (adap->flags & USING_MSIX) {
2742 name_msix_vecs(adap);
2743 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2744 adap->msix_info[0].desc, adap);
2748 err = request_msix_queue_irqs(adap);
2750 free_irq(adap->msix_info[0].vec, adap);
2754 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2755 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2756 adap->port[0]->name, adap);
2762 t4_intr_enable(adap);
2763 adap->flags |= FULL_INIT_DONE;
2764 notify_ulds(adap, CXGB4_STATE_UP);
2765 #if IS_ENABLED(CONFIG_IPV6)
2768 /* Initialize hash mac addr list*/
2769 INIT_LIST_HEAD(&adap->mac_hlist);
2773 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2775 t4_free_sge_resources(adap);
2779 static void cxgb_down(struct adapter *adapter)
2781 cancel_work_sync(&adapter->tid_release_task);
2782 cancel_work_sync(&adapter->db_full_task);
2783 cancel_work_sync(&adapter->db_drop_task);
2784 adapter->tid_release_task_busy = false;
2785 adapter->tid_release_head = NULL;
2787 t4_sge_stop(adapter);
2788 t4_free_sge_resources(adapter);
2789 adapter->flags &= ~FULL_INIT_DONE;
2793 * net_device operations
2795 static int cxgb_open(struct net_device *dev)
2798 struct port_info *pi = netdev_priv(dev);
2799 struct adapter *adapter = pi->adapter;
2801 netif_carrier_off(dev);
2803 if (!(adapter->flags & FULL_INIT_DONE)) {
2804 err = cxgb_up(adapter);
2809 err = link_start(dev);
2811 netif_tx_start_all_queues(dev);
2815 static int cxgb_close(struct net_device *dev)
2817 struct port_info *pi = netdev_priv(dev);
2818 struct adapter *adapter = pi->adapter;
2820 netif_tx_stop_all_queues(dev);
2821 netif_carrier_off(dev);
2822 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2825 /* Return an error number if the indicated filter isn't writable ...
2827 static int writable_filter(struct filter_entry *f)
2837 /* Delete the filter at the specified index (if valid). The checks for all
2838 * the common problems with doing this like the filter being locked, currently
2839 * pending in another operation, etc.
2841 static int delete_filter(struct adapter *adapter, unsigned int fidx)
2843 struct filter_entry *f;
2846 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
2849 f = &adapter->tids.ftid_tab[fidx];
2850 ret = writable_filter(f);
2854 return del_filter_wr(adapter, fidx);
2859 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2860 __be32 sip, __be16 sport, __be16 vlan,
2861 unsigned int queue, unsigned char port, unsigned char mask)
2864 struct filter_entry *f;
2865 struct adapter *adap;
2869 adap = netdev2adap(dev);
2871 /* Adjust stid to correct filter index */
2872 stid -= adap->tids.sftid_base;
2873 stid += adap->tids.nftids;
2875 /* Check to make sure the filter requested is writable ...
2877 f = &adap->tids.ftid_tab[stid];
2878 ret = writable_filter(f);
2882 /* Clear out any old resources being used by the filter before
2883 * we start constructing the new filter.
2886 clear_filter(adap, f);
2888 /* Clear out filter specifications */
2889 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2890 f->fs.val.lport = cpu_to_be16(sport);
2891 f->fs.mask.lport = ~0;
2893 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2894 for (i = 0; i < 4; i++) {
2895 f->fs.val.lip[i] = val[i];
2896 f->fs.mask.lip[i] = ~0;
2898 if (adap->params.tp.vlan_pri_map & PORT_F) {
2899 f->fs.val.iport = port;
2900 f->fs.mask.iport = mask;
2904 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2905 f->fs.val.proto = IPPROTO_TCP;
2906 f->fs.mask.proto = ~0;
2911 /* Mark filter as locked */
2915 ret = set_filter_wr(adap, stid);
2917 clear_filter(adap, f);
2923 EXPORT_SYMBOL(cxgb4_create_server_filter);
2925 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2926 unsigned int queue, bool ipv6)
2929 struct filter_entry *f;
2930 struct adapter *adap;
2932 adap = netdev2adap(dev);
2934 /* Adjust stid to correct filter index */
2935 stid -= adap->tids.sftid_base;
2936 stid += adap->tids.nftids;
2938 f = &adap->tids.ftid_tab[stid];
2939 /* Unlock the filter */
2942 ret = delete_filter(adap, stid);
2948 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2950 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2951 struct rtnl_link_stats64 *ns)
2953 struct port_stats stats;
2954 struct port_info *p = netdev_priv(dev);
2955 struct adapter *adapter = p->adapter;
2957 /* Block retrieving statistics during EEH error
2958 * recovery. Otherwise, the recovery might fail
2959 * and the PCI device will be removed permanently
2961 spin_lock(&adapter->stats_lock);
2962 if (!netif_device_present(dev)) {
2963 spin_unlock(&adapter->stats_lock);
2966 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2968 spin_unlock(&adapter->stats_lock);
2970 ns->tx_bytes = stats.tx_octets;
2971 ns->tx_packets = stats.tx_frames;
2972 ns->rx_bytes = stats.rx_octets;
2973 ns->rx_packets = stats.rx_frames;
2974 ns->multicast = stats.rx_mcast_frames;
2976 /* detailed rx_errors */
2977 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2979 ns->rx_over_errors = 0;
2980 ns->rx_crc_errors = stats.rx_fcs_err;
2981 ns->rx_frame_errors = stats.rx_symbol_err;
2982 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2983 stats.rx_ovflow2 + stats.rx_ovflow3 +
2984 stats.rx_trunc0 + stats.rx_trunc1 +
2985 stats.rx_trunc2 + stats.rx_trunc3;
2986 ns->rx_missed_errors = 0;
2988 /* detailed tx_errors */
2989 ns->tx_aborted_errors = 0;
2990 ns->tx_carrier_errors = 0;
2991 ns->tx_fifo_errors = 0;
2992 ns->tx_heartbeat_errors = 0;
2993 ns->tx_window_errors = 0;
2995 ns->tx_errors = stats.tx_error_frames;
2996 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2997 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3001 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3004 int ret = 0, prtad, devad;
3005 struct port_info *pi = netdev_priv(dev);
3006 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3010 if (pi->mdio_addr < 0)
3012 data->phy_id = pi->mdio_addr;
3016 if (mdio_phy_id_is_c45(data->phy_id)) {
3017 prtad = mdio_phy_id_prtad(data->phy_id);
3018 devad = mdio_phy_id_devad(data->phy_id);
3019 } else if (data->phy_id < 32) {
3020 prtad = data->phy_id;
3022 data->reg_num &= 0x1f;
3026 mbox = pi->adapter->pf;
3027 if (cmd == SIOCGMIIREG)
3028 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3029 data->reg_num, &data->val_out);
3031 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3032 data->reg_num, data->val_in);
3035 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3036 sizeof(pi->tstamp_config)) ?
3039 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3040 sizeof(pi->tstamp_config)))
3043 switch (pi->tstamp_config.rx_filter) {
3044 case HWTSTAMP_FILTER_NONE:
3045 pi->rxtstamp = false;
3047 case HWTSTAMP_FILTER_ALL:
3048 pi->rxtstamp = true;
3051 pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3055 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3056 sizeof(pi->tstamp_config)) ?
3064 static void cxgb_set_rxmode(struct net_device *dev)
3066 /* unfortunately we can't return errors to the stack */
3067 set_rxmode(dev, -1, false);
3070 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3073 struct port_info *pi = netdev_priv(dev);
3075 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
3077 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
3084 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3087 struct sockaddr *addr = p;
3088 struct port_info *pi = netdev_priv(dev);
3090 if (!is_valid_ether_addr(addr->sa_data))
3091 return -EADDRNOTAVAIL;
3093 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
3094 pi->xact_addr_filt, addr->sa_data, true, true);
3098 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3099 pi->xact_addr_filt = ret;
3103 #ifdef CONFIG_NET_POLL_CONTROLLER
3104 static void cxgb_netpoll(struct net_device *dev)
3106 struct port_info *pi = netdev_priv(dev);
3107 struct adapter *adap = pi->adapter;
3109 if (adap->flags & USING_MSIX) {
3111 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3113 for (i = pi->nqsets; i; i--, rx++)
3114 t4_sge_intr_msix(0, &rx->rspq);
3116 t4_intr_handler(adap)(0, adap);
3120 static const struct net_device_ops cxgb4_netdev_ops = {
3121 .ndo_open = cxgb_open,
3122 .ndo_stop = cxgb_close,
3123 .ndo_start_xmit = t4_eth_xmit,
3124 .ndo_select_queue = cxgb_select_queue,
3125 .ndo_get_stats64 = cxgb_get_stats,
3126 .ndo_set_rx_mode = cxgb_set_rxmode,
3127 .ndo_set_mac_address = cxgb_set_mac_addr,
3128 .ndo_set_features = cxgb_set_features,
3129 .ndo_validate_addr = eth_validate_addr,
3130 .ndo_do_ioctl = cxgb_ioctl,
3131 .ndo_change_mtu = cxgb_change_mtu,
3132 #ifdef CONFIG_NET_POLL_CONTROLLER
3133 .ndo_poll_controller = cxgb_netpoll,
3135 #ifdef CONFIG_CHELSIO_T4_FCOE
3136 .ndo_fcoe_enable = cxgb_fcoe_enable,
3137 .ndo_fcoe_disable = cxgb_fcoe_disable,
3138 #endif /* CONFIG_CHELSIO_T4_FCOE */
3139 #ifdef CONFIG_NET_RX_BUSY_POLL
3140 .ndo_busy_poll = cxgb_busy_poll,
3145 void t4_fatal_err(struct adapter *adap)
3147 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
3148 t4_intr_disable(adap);
3149 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3152 static void setup_memwin(struct adapter *adap)
3154 u32 nic_win_base = t4_get_util_window(adap);
3156 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3159 static void setup_memwin_rdma(struct adapter *adap)
3161 if (adap->vres.ocq.size) {
3165 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3166 start &= PCI_BASE_ADDRESS_MEM_MASK;
3167 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3168 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3170 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3171 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3173 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3174 adap->vres.ocq.start);
3176 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3180 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3185 /* get device capabilities */
3186 memset(c, 0, sizeof(*c));
3187 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3188 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3189 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3190 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3194 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3195 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3196 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3200 ret = t4_config_glbl_rss(adap, adap->pf,
3201 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3202 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3203 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3207 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3208 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3215 /* tweak some settings */
3216 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3217 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3218 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3219 v = t4_read_reg(adap, TP_PIO_DATA_A);
3220 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3222 /* first 4 Tx modulation queues point to consecutive Tx channels */
3223 adap->params.tp.tx_modq_map = 0xE4;
3224 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3225 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3227 /* associate each Tx modulation queue with consecutive Tx channels */
3229 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3230 &v, 1, TP_TX_SCHED_HDR_A);
3231 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3232 &v, 1, TP_TX_SCHED_FIFO_A);
3233 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3234 &v, 1, TP_TX_SCHED_PCMD_A);
3236 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3237 if (is_offload(adap)) {
3238 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3239 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3240 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3241 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3242 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3243 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3244 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3245 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3246 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3247 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3250 /* get basic stuff going */
3251 return t4_early_init(adap, adap->pf);
3255 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3257 #define MAX_ATIDS 8192U
3260 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3262 * If the firmware we're dealing with has Configuration File support, then
3263 * we use that to perform all configuration
3267 * Tweak configuration based on module parameters, etc. Most of these have
3268 * defaults assigned to them by Firmware Configuration Files (if we're using
3269 * them) but need to be explicitly set if we're using hard-coded
3270 * initialization. But even in the case of using Firmware Configuration
3271 * Files, we'd like to expose the ability to change these via module
3272 * parameters so these are essentially common tweaks/settings for
3273 * Configuration Files and hard-coded initialization ...
3275 static int adap_init0_tweaks(struct adapter *adapter)
3278 * Fix up various Host-Dependent Parameters like Page Size, Cache
3279 * Line Size, etc. The firmware default is for a 4KB Page Size and
3280 * 64B Cache Line Size ...
3282 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3285 * Process module parameters which affect early initialization.
3287 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3288 dev_err(&adapter->pdev->dev,
3289 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3293 t4_set_reg_field(adapter, SGE_CONTROL_A,
3294 PKTSHIFT_V(PKTSHIFT_M),
3295 PKTSHIFT_V(rx_dma_offset));
3298 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3299 * adds the pseudo header itself.
3301 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3302 CSUM_HAS_PSEUDO_HDR_F, 0);
3307 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3308 * unto themselves and they contain their own firmware to perform their
3311 static int phy_aq1202_version(const u8 *phy_fw_data,
3316 /* At offset 0x8 you're looking for the primary image's
3317 * starting offset which is 3 Bytes wide
3319 * At offset 0xa of the primary image, you look for the offset
3320 * of the DRAM segment which is 3 Bytes wide.
3322 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3325 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3326 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3327 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3329 offset = le24(phy_fw_data + 0x8) << 12;
3330 offset = le24(phy_fw_data + offset + 0xa);
3331 return be16(phy_fw_data + offset + 0x27e);
3338 static struct info_10gbt_phy_fw {
3339 unsigned int phy_fw_id; /* PCI Device ID */
3340 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3341 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3342 int phy_flash; /* Has FLASH for PHY Firmware */
3343 } phy_info_array[] = {
3345 PHY_AQ1202_DEVICEID,
3346 PHY_AQ1202_FIRMWARE,
3351 PHY_BCM84834_DEVICEID,
3352 PHY_BCM84834_FIRMWARE,
3359 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3363 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3364 if (phy_info_array[i].phy_fw_id == devid)
3365 return &phy_info_array[i];
3370 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3371 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3372 * we return a negative error number. If we transfer new firmware we return 1
3373 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3375 static int adap_init0_phy(struct adapter *adap)
3377 const struct firmware *phyf;
3379 struct info_10gbt_phy_fw *phy_info;
3381 /* Use the device ID to determine which PHY file to flash.
3383 phy_info = find_phy_info(adap->pdev->device);
3385 dev_warn(adap->pdev_dev,
3386 "No PHY Firmware file found for this PHY\n");
3390 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3391 * use that. The adapter firmware provides us with a memory buffer
3392 * where we can load a PHY firmware file from the host if we want to
3393 * override the PHY firmware File in flash.
3395 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3398 /* For adapters without FLASH attached to PHY for their
3399 * firmware, it's obviously a fatal error if we can't get the
3400 * firmware to the adapter. For adapters with PHY firmware
3401 * FLASH storage, it's worth a warning if we can't find the
3402 * PHY Firmware but we'll neuter the error ...
3404 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3405 "/lib/firmware/%s, error %d\n",
3406 phy_info->phy_fw_file, -ret);
3407 if (phy_info->phy_flash) {
3408 int cur_phy_fw_ver = 0;
3410 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3411 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3412 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3419 /* Load PHY Firmware onto adapter.
3421 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3422 phy_info->phy_fw_version,
3423 (u8 *)phyf->data, phyf->size);
3425 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3428 int new_phy_fw_ver = 0;
3430 if (phy_info->phy_fw_version)
3431 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3433 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3434 "Firmware /lib/firmware/%s, version %#x\n",
3435 phy_info->phy_fw_file, new_phy_fw_ver);
3438 release_firmware(phyf);
3444 * Attempt to initialize the adapter via a Firmware Configuration File.
3446 static int adap_init0_config(struct adapter *adapter, int reset)
3448 struct fw_caps_config_cmd caps_cmd;
3449 const struct firmware *cf;
3450 unsigned long mtype = 0, maddr = 0;
3451 u32 finiver, finicsum, cfcsum;
3453 int config_issued = 0;
3454 char *fw_config_file, fw_config_file_path[256];
3455 char *config_name = NULL;
3458 * Reset device if necessary.
3461 ret = t4_fw_reset(adapter, adapter->mbox,
3462 PIORSTMODE_F | PIORST_F);
3467 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3468 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3469 * to be performed after any global adapter RESET above since some
3470 * PHYs only have local RAM copies of the PHY firmware.
3472 if (is_10gbt_device(adapter->pdev->device)) {
3473 ret = adap_init0_phy(adapter);
3478 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3479 * then use that. Otherwise, use the configuration file stored
3480 * in the adapter flash ...
3482 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3484 fw_config_file = FW4_CFNAME;
3487 fw_config_file = FW5_CFNAME;
3490 fw_config_file = FW6_CFNAME;
3493 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3494 adapter->pdev->device);
3499 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3501 config_name = "On FLASH";
3502 mtype = FW_MEMTYPE_CF_FLASH;
3503 maddr = t4_flash_cfg_addr(adapter);
3505 u32 params[7], val[7];
3507 sprintf(fw_config_file_path,
3508 "/lib/firmware/%s", fw_config_file);
3509 config_name = fw_config_file_path;
3511 if (cf->size >= FLASH_CFG_MAX_SIZE)
3514 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3515 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3516 ret = t4_query_params(adapter, adapter->mbox,
3517 adapter->pf, 0, 1, params, val);
3520 * For t4_memory_rw() below addresses and
3521 * sizes have to be in terms of multiples of 4
3522 * bytes. So, if the Configuration File isn't
3523 * a multiple of 4 bytes in length we'll have
3524 * to write that out separately since we can't
3525 * guarantee that the bytes following the
3526 * residual byte in the buffer returned by
3527 * request_firmware() are zeroed out ...
3529 size_t resid = cf->size & 0x3;
3530 size_t size = cf->size & ~0x3;
3531 __be32 *data = (__be32 *)cf->data;
3533 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3534 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3536 spin_lock(&adapter->win0_lock);
3537 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3538 size, data, T4_MEMORY_WRITE);
3539 if (ret == 0 && resid != 0) {
3546 last.word = data[size >> 2];
3547 for (i = resid; i < 4; i++)
3549 ret = t4_memory_rw(adapter, 0, mtype,
3554 spin_unlock(&adapter->win0_lock);
3558 release_firmware(cf);
3564 * Issue a Capability Configuration command to the firmware to get it
3565 * to parse the Configuration File. We don't use t4_fw_config_file()
3566 * because we want the ability to modify various features after we've
3567 * processed the configuration file ...
3569 memset(&caps_cmd, 0, sizeof(caps_cmd));
3570 caps_cmd.op_to_write =
3571 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3574 caps_cmd.cfvalid_to_len16 =
3575 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3576 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3577 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3578 FW_LEN16(caps_cmd));
3579 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3582 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3583 * Configuration File in FLASH), our last gasp effort is to use the
3584 * Firmware Configuration File which is embedded in the firmware. A
3585 * very few early versions of the firmware didn't have one embedded
3586 * but we can ignore those.
3588 if (ret == -ENOENT) {
3589 memset(&caps_cmd, 0, sizeof(caps_cmd));
3590 caps_cmd.op_to_write =
3591 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3594 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3595 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3596 sizeof(caps_cmd), &caps_cmd);
3597 config_name = "Firmware Default";
3604 finiver = ntohl(caps_cmd.finiver);
3605 finicsum = ntohl(caps_cmd.finicsum);
3606 cfcsum = ntohl(caps_cmd.cfcsum);
3607 if (finicsum != cfcsum)
3608 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3609 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3613 * And now tell the firmware to use the configuration we just loaded.
3615 caps_cmd.op_to_write =
3616 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3619 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3620 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3626 * Tweak configuration based on system architecture, module
3629 ret = adap_init0_tweaks(adapter);
3634 * And finally tell the firmware to initialize itself using the
3635 * parameters from the Configuration File.
3637 ret = t4_fw_initialize(adapter, adapter->mbox);
3641 /* Emit Firmware Configuration File information and return
3644 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3645 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3646 config_name, finiver, cfcsum);
3650 * Something bad happened. Return the error ... (If the "error"
3651 * is that there's no Configuration File on the adapter we don't
3652 * want to issue a warning since this is fairly common.)
3655 if (config_issued && ret != -ENOENT)
3656 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3661 static struct fw_info fw_info_array[] = {
3664 .fs_name = FW4_CFNAME,
3665 .fw_mod_name = FW4_FNAME,
3667 .chip = FW_HDR_CHIP_T4,
3668 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3669 .intfver_nic = FW_INTFVER(T4, NIC),
3670 .intfver_vnic = FW_INTFVER(T4, VNIC),
3671 .intfver_ri = FW_INTFVER(T4, RI),
3672 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3673 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3677 .fs_name = FW5_CFNAME,
3678 .fw_mod_name = FW5_FNAME,
3680 .chip = FW_HDR_CHIP_T5,
3681 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3682 .intfver_nic = FW_INTFVER(T5, NIC),
3683 .intfver_vnic = FW_INTFVER(T5, VNIC),
3684 .intfver_ri = FW_INTFVER(T5, RI),
3685 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3686 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3690 .fs_name = FW6_CFNAME,
3691 .fw_mod_name = FW6_FNAME,
3693 .chip = FW_HDR_CHIP_T6,
3694 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3695 .intfver_nic = FW_INTFVER(T6, NIC),
3696 .intfver_vnic = FW_INTFVER(T6, VNIC),
3697 .intfver_ofld = FW_INTFVER(T6, OFLD),
3698 .intfver_ri = FW_INTFVER(T6, RI),
3699 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3700 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3701 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3702 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3708 static struct fw_info *find_fw_info(int chip)
3712 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3713 if (fw_info_array[i].chip == chip)
3714 return &fw_info_array[i];
3720 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3722 static int adap_init0(struct adapter *adap)
3726 enum dev_state state;
3727 u32 params[7], val[7];
3728 struct fw_caps_config_cmd caps_cmd;
3731 /* Grab Firmware Device Log parameters as early as possible so we have
3732 * access to it for debugging, etc.
3734 ret = t4_init_devlog_params(adap);
3738 /* Contact FW, advertising Master capability */
3739 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
3740 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
3742 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3746 if (ret == adap->mbox)
3747 adap->flags |= MASTER_PF;
3750 * If we're the Master PF Driver and the device is uninitialized,
3751 * then let's consider upgrading the firmware ... (We always want
3752 * to check the firmware version number in order to A. get it for
3753 * later reporting and B. to warn if the currently loaded firmware
3754 * is excessively mismatched relative to the driver.)
3756 t4_get_fw_version(adap, &adap->params.fw_vers);
3757 t4_get_bs_version(adap, &adap->params.bs_vers);
3758 t4_get_tp_version(adap, &adap->params.tp_vers);
3759 t4_get_exprom_version(adap, &adap->params.er_vers);
3761 ret = t4_check_fw_version(adap);
3762 /* If firmware is too old (not supported by driver) force an update. */
3764 state = DEV_STATE_UNINIT;
3765 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3766 struct fw_info *fw_info;
3767 struct fw_hdr *card_fw;
3768 const struct firmware *fw;
3769 const u8 *fw_data = NULL;
3770 unsigned int fw_size = 0;
3772 /* This is the firmware whose headers the driver was compiled
3775 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3776 if (fw_info == NULL) {
3777 dev_err(adap->pdev_dev,
3778 "unable to get firmware info for chip %d.\n",
3779 CHELSIO_CHIP_VERSION(adap->params.chip));
3783 /* allocate memory to read the header of the firmware on the
3786 card_fw = t4_alloc_mem(sizeof(*card_fw));
3788 /* Get FW from from /lib/firmware/ */
3789 ret = request_firmware(&fw, fw_info->fw_mod_name,
3792 dev_err(adap->pdev_dev,
3793 "unable to load firmware image %s, error %d\n",
3794 fw_info->fw_mod_name, ret);
3800 /* upgrade FW logic */
3801 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3805 release_firmware(fw);
3806 t4_free_mem(card_fw);
3813 * Grab VPD parameters. This should be done after we establish a
3814 * connection to the firmware since some of the VPD parameters
3815 * (notably the Core Clock frequency) are retrieved via requests to
3816 * the firmware. On the other hand, we need these fairly early on
3817 * so we do this right after getting ahold of the firmware.
3819 ret = t4_get_vpd_params(adap, &adap->params.vpd);
3824 * Find out what ports are available to us. Note that we need to do
3825 * this before calling adap_init0_no_config() since it needs nports
3829 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3830 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3831 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
3835 adap->params.nports = hweight32(port_vec);
3836 adap->params.portvec = port_vec;
3838 /* If the firmware is initialized already, emit a simply note to that
3839 * effect. Otherwise, it's time to try initializing the adapter.
3841 if (state == DEV_STATE_INIT) {
3842 dev_info(adap->pdev_dev, "Coming up as %s: "\
3843 "Adapter already initialized\n",
3844 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3846 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3847 "Initializing adapter\n");
3849 /* Find out whether we're dealing with a version of the
3850 * firmware which has configuration file support.
3852 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3853 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3854 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3857 /* If the firmware doesn't support Configuration Files,
3861 dev_err(adap->pdev_dev, "firmware doesn't support "
3862 "Firmware Configuration Files\n");
3866 /* The firmware provides us with a memory buffer where we can
3867 * load a Configuration File from the host if we want to
3868 * override the Configuration File in flash.
3870 ret = adap_init0_config(adap, reset);
3871 if (ret == -ENOENT) {
3872 dev_err(adap->pdev_dev, "no Configuration File "
3873 "present on adapter.\n");
3877 dev_err(adap->pdev_dev, "could not initialize "
3878 "adapter, error %d\n", -ret);
3883 /* Give the SGE code a chance to pull in anything that it needs ...
3884 * Note that this must be called after we retrieve our VPD parameters
3885 * in order to know how to convert core ticks to seconds, etc.
3887 ret = t4_sge_init(adap);
3891 if (is_bypass_device(adap->pdev->device))
3892 adap->params.bypass = 1;
3895 * Grab some of our basic fundamental operating parameters.
3897 #define FW_PARAM_DEV(param) \
3898 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3899 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3901 #define FW_PARAM_PFVF(param) \
3902 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3903 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3904 FW_PARAMS_PARAM_Y_V(0) | \
3905 FW_PARAMS_PARAM_Z_V(0)
3907 params[0] = FW_PARAM_PFVF(EQ_START);
3908 params[1] = FW_PARAM_PFVF(L2T_START);
3909 params[2] = FW_PARAM_PFVF(L2T_END);
3910 params[3] = FW_PARAM_PFVF(FILTER_START);
3911 params[4] = FW_PARAM_PFVF(FILTER_END);
3912 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3913 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
3916 adap->sge.egr_start = val[0];
3917 adap->l2t_start = val[1];
3918 adap->l2t_end = val[2];
3919 adap->tids.ftid_base = val[3];
3920 adap->tids.nftids = val[4] - val[3] + 1;
3921 adap->sge.ingr_start = val[5];
3923 /* qids (ingress/egress) returned from firmware can be anywhere
3924 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3925 * Hence driver needs to allocate memory for this range to
3926 * store the queue info. Get the highest IQFLINT/EQ index returned
3927 * in FW_EQ_*_CMD.alloc command.
3929 params[0] = FW_PARAM_PFVF(EQ_END);
3930 params[1] = FW_PARAM_PFVF(IQFLINT_END);
3931 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3934 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3935 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3937 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3938 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3939 if (!adap->sge.egr_map) {
3944 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3945 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3946 if (!adap->sge.ingr_map) {
3951 /* Allocate the memory for the vaious egress queue bitmaps
3952 * ie starving_fl, txq_maperr and blocked_fl.
3954 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3955 sizeof(long), GFP_KERNEL);
3956 if (!adap->sge.starving_fl) {
3961 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3962 sizeof(long), GFP_KERNEL);
3963 if (!adap->sge.txq_maperr) {
3968 #ifdef CONFIG_DEBUG_FS
3969 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3970 sizeof(long), GFP_KERNEL);
3971 if (!adap->sge.blocked_fl) {
3977 params[0] = FW_PARAM_PFVF(CLIP_START);
3978 params[1] = FW_PARAM_PFVF(CLIP_END);
3979 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3982 adap->clipt_start = val[0];
3983 adap->clipt_end = val[1];
3985 /* query params related to active filter region */
3986 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3987 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3988 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3989 /* If Active filter size is set we enable establishing
3990 * offload connection through firmware work request
3992 if ((val[0] != val[1]) && (ret >= 0)) {
3993 adap->flags |= FW_OFLD_CONN;
3994 adap->tids.aftid_base = val[0];
3995 adap->tids.aftid_end = val[1];
3998 /* If we're running on newer firmware, let it know that we're
3999 * prepared to deal with encapsulated CPL messages. Older
4000 * firmware won't understand this and we'll just get
4001 * unencapsulated messages ...
4003 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4005 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4008 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4009 * capability. Earlier versions of the firmware didn't have the
4010 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4011 * permission to use ULPTX MEMWRITE DSGL.
4013 if (is_t4(adap->params.chip)) {
4014 adap->params.ulptx_memwrite_dsgl = false;
4016 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4017 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4019 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4023 * Get device capabilities so we can determine what resources we need
4026 memset(&caps_cmd, 0, sizeof(caps_cmd));
4027 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4028 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4029 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4030 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4035 if (caps_cmd.ofldcaps) {
4036 /* query offload-related parameters */
4037 params[0] = FW_PARAM_DEV(NTID);
4038 params[1] = FW_PARAM_PFVF(SERVER_START);
4039 params[2] = FW_PARAM_PFVF(SERVER_END);
4040 params[3] = FW_PARAM_PFVF(TDDP_START);
4041 params[4] = FW_PARAM_PFVF(TDDP_END);
4042 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4043 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4047 adap->tids.ntids = val[0];
4048 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4049 adap->tids.stid_base = val[1];
4050 adap->tids.nstids = val[2] - val[1] + 1;
4052 * Setup server filter region. Divide the available filter
4053 * region into two parts. Regular filters get 1/3rd and server
4054 * filters get 2/3rd part. This is only enabled if workarond
4056 * 1. For regular filters.
4057 * 2. Server filter: This are special filters which are used
4058 * to redirect SYN packets to offload queue.
4060 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4061 adap->tids.sftid_base = adap->tids.ftid_base +
4062 DIV_ROUND_UP(adap->tids.nftids, 3);
4063 adap->tids.nsftids = adap->tids.nftids -
4064 DIV_ROUND_UP(adap->tids.nftids, 3);
4065 adap->tids.nftids = adap->tids.sftid_base -
4066 adap->tids.ftid_base;
4068 adap->vres.ddp.start = val[3];
4069 adap->vres.ddp.size = val[4] - val[3] + 1;
4070 adap->params.ofldq_wr_cred = val[5];
4072 adap->params.offload = 1;
4074 if (caps_cmd.rdmacaps) {
4075 params[0] = FW_PARAM_PFVF(STAG_START);
4076 params[1] = FW_PARAM_PFVF(STAG_END);
4077 params[2] = FW_PARAM_PFVF(RQ_START);
4078 params[3] = FW_PARAM_PFVF(RQ_END);
4079 params[4] = FW_PARAM_PFVF(PBL_START);
4080 params[5] = FW_PARAM_PFVF(PBL_END);
4081 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4085 adap->vres.stag.start = val[0];
4086 adap->vres.stag.size = val[1] - val[0] + 1;
4087 adap->vres.rq.start = val[2];
4088 adap->vres.rq.size = val[3] - val[2] + 1;
4089 adap->vres.pbl.start = val[4];
4090 adap->vres.pbl.size = val[5] - val[4] + 1;
4092 params[0] = FW_PARAM_PFVF(SQRQ_START);
4093 params[1] = FW_PARAM_PFVF(SQRQ_END);
4094 params[2] = FW_PARAM_PFVF(CQ_START);
4095 params[3] = FW_PARAM_PFVF(CQ_END);
4096 params[4] = FW_PARAM_PFVF(OCQ_START);
4097 params[5] = FW_PARAM_PFVF(OCQ_END);
4098 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4102 adap->vres.qp.start = val[0];
4103 adap->vres.qp.size = val[1] - val[0] + 1;
4104 adap->vres.cq.start = val[2];
4105 adap->vres.cq.size = val[3] - val[2] + 1;
4106 adap->vres.ocq.start = val[4];
4107 adap->vres.ocq.size = val[5] - val[4] + 1;
4109 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4110 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4111 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
4114 adap->params.max_ordird_qp = 8;
4115 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4118 adap->params.max_ordird_qp = val[0];
4119 adap->params.max_ird_adapter = val[1];
4121 dev_info(adap->pdev_dev,
4122 "max_ordird_qp %d max_ird_adapter %d\n",
4123 adap->params.max_ordird_qp,
4124 adap->params.max_ird_adapter);
4126 if (caps_cmd.iscsicaps) {
4127 params[0] = FW_PARAM_PFVF(ISCSI_START);
4128 params[1] = FW_PARAM_PFVF(ISCSI_END);
4129 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4133 adap->vres.iscsi.start = val[0];
4134 adap->vres.iscsi.size = val[1] - val[0] + 1;
4136 #undef FW_PARAM_PFVF
4139 /* The MTU/MSS Table is initialized by now, so load their values. If
4140 * we're initializing the adapter, then we'll make any modifications
4141 * we want to the MTU/MSS Table and also initialize the congestion
4144 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4145 if (state != DEV_STATE_INIT) {
4148 /* The default MTU Table contains values 1492 and 1500.
4149 * However, for TCP, it's better to have two values which are
4150 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4151 * This allows us to have a TCP Data Payload which is a
4152 * multiple of 8 regardless of what combination of TCP Options
4153 * are in use (always a multiple of 4 bytes) which is
4154 * important for performance reasons. For instance, if no
4155 * options are in use, then we have a 20-byte IP header and a
4156 * 20-byte TCP header. In this case, a 1500-byte MSS would
4157 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4158 * which is not a multiple of 8. So using an MSS of 1488 in
4159 * this case results in a TCP Data Payload of 1448 bytes which
4160 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4161 * Stamps have been negotiated, then an MTU of 1500 bytes
4162 * results in a TCP Data Payload of 1448 bytes which, as
4163 * above, is a multiple of 8 bytes ...
4165 for (i = 0; i < NMTUS; i++)
4166 if (adap->params.mtus[i] == 1492) {
4167 adap->params.mtus[i] = 1488;
4171 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4172 adap->params.b_wnd);
4174 t4_init_sge_params(adap);
4175 adap->flags |= FW_OK;
4176 t4_init_tp_params(adap);
4180 * Something bad happened. If a command timed out or failed with EIO
4181 * FW does not operate within its spec or something catastrophic
4182 * happened to HW/FW, stop issuing commands.
4185 kfree(adap->sge.egr_map);
4186 kfree(adap->sge.ingr_map);
4187 kfree(adap->sge.starving_fl);
4188 kfree(adap->sge.txq_maperr);
4189 #ifdef CONFIG_DEBUG_FS
4190 kfree(adap->sge.blocked_fl);
4192 if (ret != -ETIMEDOUT && ret != -EIO)
4193 t4_fw_bye(adap, adap->mbox);
4199 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4200 pci_channel_state_t state)
4203 struct adapter *adap = pci_get_drvdata(pdev);
4209 adap->flags &= ~FW_OK;
4210 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4211 spin_lock(&adap->stats_lock);
4212 for_each_port(adap, i) {
4213 struct net_device *dev = adap->port[i];
4215 netif_device_detach(dev);
4216 netif_carrier_off(dev);
4218 spin_unlock(&adap->stats_lock);
4219 disable_interrupts(adap);
4220 if (adap->flags & FULL_INIT_DONE)
4223 if ((adap->flags & DEV_ENABLED)) {
4224 pci_disable_device(pdev);
4225 adap->flags &= ~DEV_ENABLED;
4227 out: return state == pci_channel_io_perm_failure ?
4228 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4231 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4234 struct fw_caps_config_cmd c;
4235 struct adapter *adap = pci_get_drvdata(pdev);
4238 pci_restore_state(pdev);
4239 pci_save_state(pdev);
4240 return PCI_ERS_RESULT_RECOVERED;
4243 if (!(adap->flags & DEV_ENABLED)) {
4244 if (pci_enable_device(pdev)) {
4245 dev_err(&pdev->dev, "Cannot reenable PCI "
4246 "device after reset\n");
4247 return PCI_ERS_RESULT_DISCONNECT;
4249 adap->flags |= DEV_ENABLED;
4252 pci_set_master(pdev);
4253 pci_restore_state(pdev);
4254 pci_save_state(pdev);
4255 pci_cleanup_aer_uncorrect_error_status(pdev);
4257 if (t4_wait_dev_ready(adap->regs) < 0)
4258 return PCI_ERS_RESULT_DISCONNECT;
4259 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4260 return PCI_ERS_RESULT_DISCONNECT;
4261 adap->flags |= FW_OK;
4262 if (adap_init1(adap, &c))
4263 return PCI_ERS_RESULT_DISCONNECT;
4265 for_each_port(adap, i) {
4266 struct port_info *p = adap2pinfo(adap, i);
4268 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4271 return PCI_ERS_RESULT_DISCONNECT;
4273 p->xact_addr_filt = -1;
4276 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4277 adap->params.b_wnd);
4280 return PCI_ERS_RESULT_DISCONNECT;
4281 return PCI_ERS_RESULT_RECOVERED;
4284 static void eeh_resume(struct pci_dev *pdev)
4287 struct adapter *adap = pci_get_drvdata(pdev);
4293 for_each_port(adap, i) {
4294 struct net_device *dev = adap->port[i];
4296 if (netif_running(dev)) {
4298 cxgb_set_rxmode(dev);
4300 netif_device_attach(dev);
4305 static const struct pci_error_handlers cxgb4_eeh = {
4306 .error_detected = eeh_err_detected,
4307 .slot_reset = eeh_slot_reset,
4308 .resume = eeh_resume,
4311 static inline bool is_x_10g_port(const struct link_config *lc)
4313 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
4314 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
4317 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
4318 unsigned int us, unsigned int cnt,
4319 unsigned int size, unsigned int iqe_size)
4322 cxgb4_set_rspq_intr_params(q, us, cnt);
4323 q->iqe_len = iqe_size;
4328 * Perform default configuration of DMA queues depending on the number and type
4329 * of ports we found and the number of available CPUs. Most settings can be
4330 * modified by the admin prior to actual use.
4332 static void cfg_queues(struct adapter *adap)
4334 struct sge *s = &adap->sge;
4335 int i, n10g = 0, qidx = 0;
4336 #ifndef CONFIG_CHELSIO_T4_DCB
4341 for_each_port(adap, i)
4342 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4343 #ifdef CONFIG_CHELSIO_T4_DCB
4344 /* For Data Center Bridging support we need to be able to support up
4345 * to 8 Traffic Priorities; each of which will be assigned to its
4346 * own TX Queue in order to prevent Head-Of-Line Blocking.
4348 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4349 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4350 MAX_ETH_QSETS, adap->params.nports * 8);
4354 for_each_port(adap, i) {
4355 struct port_info *pi = adap2pinfo(adap, i);
4357 pi->first_qset = qidx;
4361 #else /* !CONFIG_CHELSIO_T4_DCB */
4363 * We default to 1 queue per non-10G port and up to # of cores queues
4367 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4368 if (q10g > netif_get_num_default_rss_queues())
4369 q10g = netif_get_num_default_rss_queues();
4371 /* Reduce memory usage in kdump environment, disable all offload.
4373 if (is_kdump_kernel())
4374 adap->params.offload = 0;
4376 for_each_port(adap, i) {
4377 struct port_info *pi = adap2pinfo(adap, i);
4379 pi->first_qset = qidx;
4380 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4383 #endif /* !CONFIG_CHELSIO_T4_DCB */
4386 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4388 if (is_offload(adap)) {
4390 * For offload we use 1 queue/channel if all ports are up to 1G,
4391 * otherwise we divide all available queues amongst the channels
4392 * capped by the number of available cores.
4395 i = min_t(int, ARRAY_SIZE(s->iscsirxq),
4397 s->iscsiqsets = roundup(i, adap->params.nports);
4399 s->iscsiqsets = adap->params.nports;
4400 /* For RDMA one Rx queue per channel suffices */
4401 s->rdmaqs = adap->params.nports;
4402 /* Try and allow at least 1 CIQ per cpu rounding down
4403 * to the number of ports, with a minimum of 1 per port.
4404 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4405 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4406 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4408 s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
4409 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4410 adap->params.nports;
4411 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
4413 if (!is_t4(adap->params.chip))
4414 s->niscsitq = s->iscsiqsets;
4417 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4418 struct sge_eth_rxq *r = &s->ethrxq[i];
4420 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4424 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4425 s->ethtxq[i].q.size = 1024;
4427 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4428 s->ctrlq[i].q.size = 512;
4430 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4431 s->ofldtxq[i].q.size = 1024;
4433 for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
4434 struct sge_ofld_rxq *r = &s->iscsirxq[i];
4436 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4437 r->rspq.uld = CXGB4_ULD_ISCSI;
4441 if (!is_t4(adap->params.chip)) {
4442 for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
4443 struct sge_ofld_rxq *r = &s->iscsitrxq[i];
4445 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4446 r->rspq.uld = CXGB4_ULD_ISCSIT;
4451 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4452 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4454 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
4455 r->rspq.uld = CXGB4_ULD_RDMA;
4459 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
4460 if (ciq_size > SGE_MAX_IQ_SIZE) {
4461 CH_WARN(adap, "CIQ size too small for available IQs\n");
4462 ciq_size = SGE_MAX_IQ_SIZE;
4465 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
4466 struct sge_ofld_rxq *r = &s->rdmaciq[i];
4468 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
4469 r->rspq.uld = CXGB4_ULD_RDMA;
4472 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4473 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
4477 * Reduce the number of Ethernet queues across all ports to at most n.
4478 * n provides at least one queue per port.
4480 static void reduce_ethqs(struct adapter *adap, int n)
4483 struct port_info *pi;
4485 while (n < adap->sge.ethqsets)
4486 for_each_port(adap, i) {
4487 pi = adap2pinfo(adap, i);
4488 if (pi->nqsets > 1) {
4490 adap->sge.ethqsets--;
4491 if (adap->sge.ethqsets <= n)
4497 for_each_port(adap, i) {
4498 pi = adap2pinfo(adap, i);
4504 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4505 #define EXTRA_VECS 2
4507 static int enable_msix(struct adapter *adap)
4510 int i, want, need, allocated;
4511 struct sge *s = &adap->sge;
4512 unsigned int nchan = adap->params.nports;
4513 struct msix_entry *entries;
4515 entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
4520 for (i = 0; i < MAX_INGQ + 1; ++i)
4521 entries[i].entry = i;
4523 want = s->max_ethqsets + EXTRA_VECS;
4524 if (is_offload(adap)) {
4525 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
4527 /* need nchan for each possible ULD */
4528 if (is_t4(adap->params.chip))
4529 ofld_need = 3 * nchan;
4531 ofld_need = 4 * nchan;
4533 #ifdef CONFIG_CHELSIO_T4_DCB
4534 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4537 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
4539 need = adap->params.nports + EXTRA_VECS + ofld_need;
4541 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4542 if (allocated < 0) {
4543 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4544 " not using MSI-X\n");
4549 /* Distribute available vectors to the various queue groups.
4550 * Every group gets its minimum requirement and NIC gets top
4551 * priority for leftovers.
4553 i = allocated - EXTRA_VECS - ofld_need;
4554 if (i < s->max_ethqsets) {
4555 s->max_ethqsets = i;
4556 if (i < s->ethqsets)
4557 reduce_ethqs(adap, i);
4559 if (is_offload(adap)) {
4560 if (allocated < want) {
4562 s->rdmaciqs = nchan;
4564 if (!is_t4(adap->params.chip))
4565 s->niscsitq = nchan;
4568 /* leftovers go to OFLD */
4569 i = allocated - EXTRA_VECS - s->max_ethqsets -
4570 s->rdmaqs - s->rdmaciqs - s->niscsitq;
4571 s->iscsiqsets = (i / nchan) * nchan; /* round down */
4574 for (i = 0; i < allocated; ++i)
4575 adap->msix_info[i].vec = entries[i].vector;
4576 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
4577 "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
4578 allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
4587 static int init_rss(struct adapter *adap)
4592 err = t4_init_rss_mode(adap, adap->mbox);
4596 for_each_port(adap, i) {
4597 struct port_info *pi = adap2pinfo(adap, i);
4599 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4606 static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
4607 enum pci_bus_speed *speed,
4608 enum pcie_link_width *width)
4610 u32 lnkcap1, lnkcap2;
4613 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4615 *speed = PCI_SPEED_UNKNOWN;
4616 *width = PCIE_LNK_WIDTH_UNKNOWN;
4618 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
4620 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
4622 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
4623 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
4624 *speed = PCIE_SPEED_8_0GT;
4625 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
4626 *speed = PCIE_SPEED_5_0GT;
4627 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
4628 *speed = PCIE_SPEED_2_5GT;
4631 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
4632 if (!lnkcap2) { /* pre-r3.0 */
4633 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
4634 *speed = PCIE_SPEED_5_0GT;
4635 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
4636 *speed = PCIE_SPEED_2_5GT;
4640 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
4641 return err1 ? err1 : err2 ? err2 : -EINVAL;
4645 static void cxgb4_check_pcie_caps(struct adapter *adap)
4647 enum pcie_link_width width, width_cap;
4648 enum pci_bus_speed speed, speed_cap;
4650 #define PCIE_SPEED_STR(speed) \
4651 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4652 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4653 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4656 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
4657 dev_warn(adap->pdev_dev,
4658 "Unable to determine PCIe device BW capabilities\n");
4662 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
4663 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
4664 dev_warn(adap->pdev_dev,
4665 "Unable to determine PCI Express bandwidth.\n");
4669 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
4670 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
4671 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
4673 if (speed < speed_cap || width < width_cap)
4674 dev_info(adap->pdev_dev,
4675 "A slot with more lanes and/or higher speed is "
4676 "suggested for optimal performance.\n");
4679 /* Dump basic information about the adapter */
4680 static void print_adapter_info(struct adapter *adapter)
4682 /* Device information */
4683 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
4684 adapter->params.vpd.id,
4685 CHELSIO_CHIP_RELEASE(adapter->params.chip));
4686 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
4687 adapter->params.vpd.sn, adapter->params.vpd.pn);
4689 /* Firmware Version */
4690 if (!adapter->params.fw_vers)
4691 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
4693 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
4694 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
4695 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
4696 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
4697 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
4699 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
4700 * Firmware, so dev_info() is more appropriate here.)
4702 if (!adapter->params.bs_vers)
4703 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
4705 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
4706 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
4707 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
4708 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
4709 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
4711 /* TP Microcode Version */
4712 if (!adapter->params.tp_vers)
4713 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
4715 dev_info(adapter->pdev_dev,
4716 "TP Microcode version: %u.%u.%u.%u\n",
4717 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
4718 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
4719 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
4720 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
4722 /* Expansion ROM version */
4723 if (!adapter->params.er_vers)
4724 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
4726 dev_info(adapter->pdev_dev,
4727 "Expansion ROM version: %u.%u.%u.%u\n",
4728 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
4729 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
4730 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
4731 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
4733 /* Software/Hardware configuration */
4734 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
4735 is_offload(adapter) ? "R" : "",
4736 ((adapter->flags & USING_MSIX) ? "MSI-X" :
4737 (adapter->flags & USING_MSI) ? "MSI" : ""),
4738 is_offload(adapter) ? "Offload" : "non-Offload");
4741 static void print_port_info(const struct net_device *dev)
4745 const char *spd = "";
4746 const struct port_info *pi = netdev_priv(dev);
4747 const struct adapter *adap = pi->adapter;
4749 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4751 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4753 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4756 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4757 bufp += sprintf(bufp, "100/");
4758 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4759 bufp += sprintf(bufp, "1000/");
4760 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4761 bufp += sprintf(bufp, "10G/");
4762 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4763 bufp += sprintf(bufp, "40G/");
4766 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4768 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
4769 dev->name, adap->params.vpd.id, adap->name, buf);
4772 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4774 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4778 * Free the following resources:
4779 * - memory used for tables
4782 * - resources FW is holding for us
4784 static void free_some_resources(struct adapter *adapter)
4788 t4_free_mem(adapter->l2t);
4789 t4_free_mem(adapter->tids.tid_tab);
4790 kfree(adapter->sge.egr_map);
4791 kfree(adapter->sge.ingr_map);
4792 kfree(adapter->sge.starving_fl);
4793 kfree(adapter->sge.txq_maperr);
4794 #ifdef CONFIG_DEBUG_FS
4795 kfree(adapter->sge.blocked_fl);
4797 disable_msi(adapter);
4799 for_each_port(adapter, i)
4800 if (adapter->port[i]) {
4801 struct port_info *pi = adap2pinfo(adapter, i);
4804 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4806 kfree(adap2pinfo(adapter, i)->rss);
4807 free_netdev(adapter->port[i]);
4809 if (adapter->flags & FW_OK)
4810 t4_fw_bye(adapter, adapter->pf);
4813 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4814 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4815 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4816 #define SEGMENT_SIZE 128
4818 static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4822 /* Retrieve adapter's device ID */
4823 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
4825 switch (device_id >> 12) {
4827 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
4829 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4831 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4833 dev_err(&pdev->dev, "Device %d is not supported\n",
4839 #ifdef CONFIG_PCI_IOV
4840 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4843 int current_vfs = pci_num_vf(pdev);
4847 regs = pci_ioremap_bar(pdev, 0);
4849 dev_err(&pdev->dev, "cannot map device registers\n");
4853 pcie_fw = readl(regs + PCIE_FW_A);
4855 /* Check if cxgb4 is the MASTER and fw is initialized */
4856 if (!(pcie_fw & PCIE_FW_INIT_F) ||
4857 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4858 PCIE_FW_MASTER_G(pcie_fw) != 4) {
4859 dev_warn(&pdev->dev,
4860 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4864 /* If any of the VF's is already assigned to Guest OS, then
4865 * SRIOV for the same cannot be modified
4867 if (current_vfs && pci_vfs_assigned(pdev)) {
4869 "Cannot modify SR-IOV while VFs are assigned\n");
4870 num_vfs = current_vfs;
4874 /* Disable SRIOV when zero is passed.
4875 * One needs to disable SRIOV before modifying it, else
4876 * stack throws the below warning:
4877 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
4880 pci_disable_sriov(pdev);
4884 if (num_vfs != current_vfs) {
4885 err = pci_enable_sriov(pdev, num_vfs);
4893 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4895 int func, i, err, s_qpp, qpp, num_seg;
4896 struct port_info *pi;
4897 bool highdma = false;
4898 struct adapter *adapter = NULL;
4901 enum chip_type chip;
4903 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4905 err = pci_request_regions(pdev, KBUILD_MODNAME);
4907 /* Just info, some other driver may have claimed the device. */
4908 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4912 err = pci_enable_device(pdev);
4914 dev_err(&pdev->dev, "cannot enable PCI device\n");
4915 goto out_release_regions;
4918 regs = pci_ioremap_bar(pdev, 0);
4920 dev_err(&pdev->dev, "cannot map device registers\n");
4922 goto out_disable_device;
4925 err = t4_wait_dev_ready(regs);
4927 goto out_unmap_bar0;
4929 /* We control everything through one PF */
4930 whoami = readl(regs + PL_WHOAMI_A);
4931 pl_rev = REV_G(readl(regs + PL_REV_A));
4932 chip = get_chip_type(pdev, pl_rev);
4933 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
4934 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4935 if (func != ent->driver_data) {
4937 pci_disable_device(pdev);
4938 pci_save_state(pdev); /* to restore SR-IOV later */
4942 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4944 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4946 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4947 "coherent allocations\n");
4948 goto out_unmap_bar0;
4951 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4953 dev_err(&pdev->dev, "no usable DMA configuration\n");
4954 goto out_unmap_bar0;
4958 pci_enable_pcie_error_reporting(pdev);
4959 enable_pcie_relaxed_ordering(pdev);
4960 pci_set_master(pdev);
4961 pci_save_state(pdev);
4963 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4966 goto out_unmap_bar0;
4969 adapter->workq = create_singlethread_workqueue("cxgb4");
4970 if (!adapter->workq) {
4972 goto out_free_adapter;
4975 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
4976 (sizeof(struct mbox_cmd) *
4977 T4_OS_LOG_MBOX_CMDS),
4979 if (!adapter->mbox_log) {
4981 goto out_free_adapter;
4983 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
4985 /* PCI device has been enabled */
4986 adapter->flags |= DEV_ENABLED;
4988 adapter->regs = regs;
4989 adapter->pdev = pdev;
4990 adapter->pdev_dev = &pdev->dev;
4991 adapter->name = pci_name(pdev);
4992 adapter->mbox = func;
4994 adapter->msg_enable = dflt_msg_enable;
4995 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4997 spin_lock_init(&adapter->stats_lock);
4998 spin_lock_init(&adapter->tid_release_lock);
4999 spin_lock_init(&adapter->win0_lock);
5001 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5002 INIT_WORK(&adapter->db_full_task, process_db_full);
5003 INIT_WORK(&adapter->db_drop_task, process_db_drop);
5005 err = t4_prep_adapter(adapter);
5007 goto out_free_adapter;
5010 if (!is_t4(adapter->params.chip)) {
5011 s_qpp = (QUEUESPERPAGEPF0_S +
5012 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
5014 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
5015 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
5016 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5018 /* Each segment size is 128B. Write coalescing is enabled only
5019 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5020 * queue is less no of segments that can be accommodated in
5023 if (qpp > num_seg) {
5025 "Incorrect number of egress queues per page\n");
5027 goto out_free_adapter;
5029 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5030 pci_resource_len(pdev, 2));
5031 if (!adapter->bar2) {
5032 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5034 goto out_free_adapter;
5038 setup_memwin(adapter);
5039 err = adap_init0(adapter);
5040 #ifdef CONFIG_DEBUG_FS
5041 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5043 setup_memwin_rdma(adapter);
5047 /* configure SGE_STAT_CFG_A to read WC stats */
5048 if (!is_t4(adapter->params.chip))
5049 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5050 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5053 for_each_port(adapter, i) {
5054 struct net_device *netdev;
5056 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5063 SET_NETDEV_DEV(netdev, &pdev->dev);
5065 adapter->port[i] = netdev;
5066 pi = netdev_priv(netdev);
5067 pi->adapter = adapter;
5068 pi->xact_addr_filt = -1;
5070 netdev->irq = pdev->irq;
5072 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5073 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5074 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5075 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5077 netdev->hw_features |= NETIF_F_HIGHDMA;
5078 netdev->features |= netdev->hw_features;
5079 netdev->vlan_features = netdev->features & VLAN_FEAT;
5081 netdev->priv_flags |= IFF_UNICAST_FLT;
5083 netdev->netdev_ops = &cxgb4_netdev_ops;
5084 #ifdef CONFIG_CHELSIO_T4_DCB
5085 netdev->dcbnl_ops = &cxgb4_dcb_ops;
5086 cxgb4_dcb_state_init(netdev);
5088 cxgb4_set_ethtool_ops(netdev);
5091 pci_set_drvdata(pdev, adapter);
5093 if (adapter->flags & FW_OK) {
5094 err = t4_port_init(adapter, func, func, 0);
5097 } else if (adapter->params.nports == 1) {
5098 /* If we don't have a connection to the firmware -- possibly
5099 * because of an error -- grab the raw VPD parameters so we
5100 * can set the proper MAC Address on the debug network
5101 * interface that we've created.
5103 u8 hw_addr[ETH_ALEN];
5104 u8 *na = adapter->params.vpd.na;
5106 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5108 for (i = 0; i < ETH_ALEN; i++)
5109 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5110 hex2val(na[2 * i + 1]));
5111 t4_set_hw_addr(adapter, 0, hw_addr);
5115 /* Configure queues and allocate tables now, they can be needed as
5116 * soon as the first register_netdev completes.
5118 cfg_queues(adapter);
5120 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
5121 if (!adapter->l2t) {
5122 /* We tolerate a lack of L2T, giving up some functionality */
5123 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5124 adapter->params.offload = 0;
5127 #if IS_ENABLED(CONFIG_IPV6)
5128 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
5129 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5130 /* CLIP functionality is not present in hardware,
5131 * hence disable all offload features
5133 dev_warn(&pdev->dev,
5134 "CLIP not enabled in hardware, continuing\n");
5135 adapter->params.offload = 0;
5137 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5138 adapter->clipt_end);
5139 if (!adapter->clipt) {
5140 /* We tolerate a lack of clip_table, giving up
5141 * some functionality
5143 dev_warn(&pdev->dev,
5144 "could not allocate Clip table, continuing\n");
5145 adapter->params.offload = 0;
5149 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5150 dev_warn(&pdev->dev, "could not allocate TID table, "
5152 adapter->params.offload = 0;
5155 if (is_offload(adapter)) {
5156 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5157 u32 hash_base, hash_reg;
5159 if (chip <= CHELSIO_T5) {
5160 hash_reg = LE_DB_TID_HASHBASE_A;
5161 hash_base = t4_read_reg(adapter, hash_reg);
5162 adapter->tids.hash_base = hash_base / 4;
5164 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5165 hash_base = t4_read_reg(adapter, hash_reg);
5166 adapter->tids.hash_base = hash_base;
5171 /* See what interrupts we'll be using */
5172 if (msi > 1 && enable_msix(adapter) == 0)
5173 adapter->flags |= USING_MSIX;
5174 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5175 adapter->flags |= USING_MSI;
5177 /* check for PCI Express bandwidth capabiltites */
5178 cxgb4_check_pcie_caps(adapter);
5180 err = init_rss(adapter);
5185 * The card is now ready to go. If any errors occur during device
5186 * registration we do not fail the whole card but rather proceed only
5187 * with the ports we manage to register successfully. However we must
5188 * register at least one net device.
5190 for_each_port(adapter, i) {
5191 pi = adap2pinfo(adapter, i);
5192 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5193 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5195 err = register_netdev(adapter->port[i]);
5198 adapter->chan_map[pi->tx_chan] = i;
5199 print_port_info(adapter->port[i]);
5202 dev_err(&pdev->dev, "could not register any net devices\n");
5206 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5210 if (cxgb4_debugfs_root) {
5211 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5212 cxgb4_debugfs_root);
5213 setup_debugfs(adapter);
5216 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5217 pdev->needs_freset = 1;
5219 if (is_offload(adapter))
5220 attach_ulds(adapter);
5222 print_adapter_info(adapter);
5225 #ifdef CONFIG_PCI_IOV
5226 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
5227 dev_warn(&pdev->dev,
5228 "Enabling SR-IOV VFs using the num_vf module "
5229 "parameter is deprecated - please use the pci sysfs "
5230 "interface instead.\n");
5231 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5232 dev_info(&pdev->dev,
5233 "instantiated %u virtual functions\n",
5240 free_some_resources(adapter);
5242 if (!is_t4(adapter->params.chip))
5243 iounmap(adapter->bar2);
5246 destroy_workqueue(adapter->workq);
5248 kfree(adapter->mbox_log);
5253 pci_disable_pcie_error_reporting(pdev);
5254 pci_disable_device(pdev);
5255 out_release_regions:
5256 pci_release_regions(pdev);
5260 static void remove_one(struct pci_dev *pdev)
5262 struct adapter *adapter = pci_get_drvdata(pdev);
5264 #ifdef CONFIG_PCI_IOV
5265 pci_disable_sriov(pdev);
5272 /* Tear down per-adapter Work Queue first since it can contain
5273 * references to our adapter data structure.
5275 destroy_workqueue(adapter->workq);
5277 if (is_offload(adapter))
5278 detach_ulds(adapter);
5280 disable_interrupts(adapter);
5282 for_each_port(adapter, i)
5283 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5284 unregister_netdev(adapter->port[i]);
5286 debugfs_remove_recursive(adapter->debugfs_root);
5288 /* If we allocated filters, free up state associated with any
5291 if (adapter->tids.ftid_tab) {
5292 struct filter_entry *f = &adapter->tids.ftid_tab[0];
5293 for (i = 0; i < (adapter->tids.nftids +
5294 adapter->tids.nsftids); i++, f++)
5296 clear_filter(adapter, f);
5299 if (adapter->flags & FULL_INIT_DONE)
5302 free_some_resources(adapter);
5303 #if IS_ENABLED(CONFIG_IPV6)
5304 t4_cleanup_clip_tbl(adapter);
5306 iounmap(adapter->regs);
5307 if (!is_t4(adapter->params.chip))
5308 iounmap(adapter->bar2);
5309 pci_disable_pcie_error_reporting(pdev);
5310 if ((adapter->flags & DEV_ENABLED)) {
5311 pci_disable_device(pdev);
5312 adapter->flags &= ~DEV_ENABLED;
5314 pci_release_regions(pdev);
5315 kfree(adapter->mbox_log);
5319 pci_release_regions(pdev);
5322 static struct pci_driver cxgb4_driver = {
5323 .name = KBUILD_MODNAME,
5324 .id_table = cxgb4_pci_tbl,
5326 .remove = remove_one,
5327 .shutdown = remove_one,
5328 #ifdef CONFIG_PCI_IOV
5329 .sriov_configure = cxgb4_iov_configure,
5331 .err_handler = &cxgb4_eeh,
5334 static int __init cxgb4_init_module(void)
5338 /* Debugfs support is optional, just warn if this fails */
5339 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5340 if (!cxgb4_debugfs_root)
5341 pr_warn("could not create debugfs entry, continuing\n");
5343 ret = pci_register_driver(&cxgb4_driver);
5345 debugfs_remove(cxgb4_debugfs_root);
5347 #if IS_ENABLED(CONFIG_IPV6)
5348 if (!inet6addr_registered) {
5349 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5350 inet6addr_registered = true;
5357 static void __exit cxgb4_cleanup_module(void)
5359 #if IS_ENABLED(CONFIG_IPV6)
5360 if (inet6addr_registered) {
5361 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5362 inet6addr_registered = false;
5365 pci_unregister_driver(&cxgb4_driver);
5366 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5369 module_init(cxgb4_init_module);
5370 module_exit(cxgb4_cleanup_module);