2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
56 * Generic information about the driver.
58 #define DRV_VERSION "2.0.0-ko"
59 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
67 * Default ethtool "message level" for adapters.
69 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
73 static int dflt_msg_enable = DFLT_MSG_ENABLE;
75 module_param(dflt_msg_enable, int, 0644);
76 MODULE_PARM_DESC(dflt_msg_enable,
77 "default adapter ethtool message level bitmap");
80 * The driver uses the best interrupt scheme available on a platform in the
81 * order MSI-X then MSI. This parameter determines which of these schemes the
82 * driver may consider as follows:
84 * msi = 2: choose from among MSI-X and MSI
85 * msi = 1: only consider MSI interrupts
87 * Note that unlike the Physical Function driver, this Virtual Function driver
88 * does _not_ support legacy INTx interrupts (this limitation is mandated by
89 * the PCI-E SR-IOV standard).
93 #define MSI_DEFAULT MSI_MSIX
95 static int msi = MSI_DEFAULT;
97 module_param(msi, int, 0644);
98 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
101 * Fundamental constants.
102 * ======================
106 MAX_TXQ_ENTRIES = 16384,
107 MAX_RSPQ_ENTRIES = 16384,
108 MAX_RX_BUFFERS = 16384,
110 MIN_TXQ_ENTRIES = 32,
111 MIN_RSPQ_ENTRIES = 128,
115 * For purposes of manipulating the Free List size we need to
116 * recognize that Free Lists are actually Egress Queues (the host
117 * produces free buffers which the hardware consumes), Egress Queues
118 * indices are all in units of Egress Context Units bytes, and free
119 * list entries are 64-bit PCI DMA addresses. And since the state of
120 * the Producer Index == the Consumer Index implies an EMPTY list, we
121 * always have at least one Egress Unit's worth of Free List entries
122 * unused. See sge.c for more details ...
124 EQ_UNIT = SGE_EQ_IDXSIZE,
125 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
126 MIN_FL_RESID = FL_PER_EQ_UNIT,
130 * Global driver state.
131 * ====================
134 static struct dentry *cxgb4vf_debugfs_root;
137 * OS "Callback" functions.
138 * ========================
142 * The link status has changed on the indicated "port" (Virtual Interface).
144 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
146 struct net_device *dev = adapter->port[pidx];
149 * If the port is disabled or the current recorded "link up"
150 * status matches the new status, just return.
152 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
156 * Tell the OS that the link status has changed and print a short
157 * informative message on the console about the event.
162 const struct port_info *pi = netdev_priv(dev);
164 netif_carrier_on(dev);
166 switch (pi->link_cfg.speed) {
188 switch (pi->link_cfg.fc) {
197 case PAUSE_RX|PAUSE_TX:
206 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
208 netif_carrier_off(dev);
209 netdev_info(dev, "link down\n");
214 * THe port module type has changed on the indicated "port" (Virtual
217 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
219 static const char * const mod_str[] = {
220 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
222 const struct net_device *dev = adapter->port[pidx];
223 const struct port_info *pi = netdev_priv(dev);
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
228 else if (pi->mod_type < ARRAY_SIZE(mod_str))
229 dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
230 dev->name, mod_str[pi->mod_type]);
231 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
232 dev_info(adapter->pdev_dev, "%s: unsupported optical port "
233 "module inserted\n", dev->name);
234 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
235 dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
236 "forcing TWINAX\n", dev->name);
237 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
238 dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
241 dev_info(adapter->pdev_dev, "%s: unknown module type %d "
242 "inserted\n", dev->name, pi->mod_type);
246 * Net device operations.
247 * ======================
254 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
257 static int link_start(struct net_device *dev)
260 struct port_info *pi = netdev_priv(dev);
263 * We do not set address filters and promiscuity here, the stack does
264 * that step explicitly. Enable vlan accel.
266 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
269 ret = t4vf_change_mac(pi->adapter, pi->viid,
270 pi->xact_addr_filt, dev->dev_addr, true);
272 pi->xact_addr_filt = ret;
278 * We don't need to actually "start the link" itself since the
279 * firmware will do that for us when the first Virtual Interface
280 * is enabled on a port.
283 ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
288 * Name the MSI-X interrupts.
290 static void name_msix_vecs(struct adapter *adapter)
292 int namelen = sizeof(adapter->msix_info[0].desc) - 1;
298 snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
299 "%s-FWeventq", adapter->name);
300 adapter->msix_info[MSIX_FW].desc[namelen] = 0;
305 for_each_port(adapter, pidx) {
306 struct net_device *dev = adapter->port[pidx];
307 const struct port_info *pi = netdev_priv(dev);
310 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
311 snprintf(adapter->msix_info[msi].desc, namelen,
312 "%s-%d", dev->name, qs);
313 adapter->msix_info[msi].desc[namelen] = 0;
319 * Request all of our MSI-X resources.
321 static int request_msix_queue_irqs(struct adapter *adapter)
323 struct sge *s = &adapter->sge;
329 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
330 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
338 for_each_ethrxq(s, rxq) {
339 err = request_irq(adapter->msix_info[msi].vec,
340 t4vf_sge_intr_msix, 0,
341 adapter->msix_info[msi].desc,
342 &s->ethrxq[rxq].rspq);
351 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
352 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
357 * Free our MSI-X resources.
359 static void free_msix_queue_irqs(struct adapter *adapter)
361 struct sge *s = &adapter->sge;
364 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
366 for_each_ethrxq(s, rxq)
367 free_irq(adapter->msix_info[msi++].vec,
368 &s->ethrxq[rxq].rspq);
372 * Turn on NAPI and start up interrupts on a response queue.
374 static void qenable(struct sge_rspq *rspq)
376 napi_enable(&rspq->napi);
379 * 0-increment the Going To Sleep register to start the timer and
382 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
384 SEINTARM_V(rspq->intr_params) |
385 INGRESSQID_V(rspq->cntxt_id));
389 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
391 static void enable_rx(struct adapter *adapter)
394 struct sge *s = &adapter->sge;
396 for_each_ethrxq(s, rxq)
397 qenable(&s->ethrxq[rxq].rspq);
398 qenable(&s->fw_evtq);
401 * The interrupt queue doesn't use NAPI so we do the 0-increment of
402 * its Going To Sleep register here to get it started.
404 if (adapter->flags & USING_MSI)
405 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
407 SEINTARM_V(s->intrq.intr_params) |
408 INGRESSQID_V(s->intrq.cntxt_id));
413 * Wait until all NAPI handlers are descheduled.
415 static void quiesce_rx(struct adapter *adapter)
417 struct sge *s = &adapter->sge;
420 for_each_ethrxq(s, rxq)
421 napi_disable(&s->ethrxq[rxq].rspq.napi);
422 napi_disable(&s->fw_evtq.napi);
426 * Response queue handler for the firmware event queue.
428 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
429 const struct pkt_gl *gl)
432 * Extract response opcode and get pointer to CPL message body.
434 struct adapter *adapter = rspq->adapter;
435 u8 opcode = ((const struct rss_header *)rsp)->opcode;
436 void *cpl = (void *)(rsp + 1);
441 * We've received an asynchronous message from the firmware.
443 const struct cpl_fw6_msg *fw_msg = cpl;
444 if (fw_msg->type == FW6_TYPE_CMD_RPL)
445 t4vf_handle_fw_rpl(adapter, fw_msg->data);
450 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
452 const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
453 opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
454 if (opcode != CPL_SGE_EGR_UPDATE) {
455 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
463 case CPL_SGE_EGR_UPDATE: {
465 * We've received an Egress Queue Status Update message. We
466 * get these, if the SGE is configured to send these when the
467 * firmware passes certain points in processing our TX
468 * Ethernet Queue or if we make an explicit request for one.
469 * We use these updates to determine when we may need to
470 * restart a TX Ethernet Queue which was stopped for lack of
471 * free TX Queue Descriptors ...
473 const struct cpl_sge_egr_update *p = cpl;
474 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
475 struct sge *s = &adapter->sge;
477 struct sge_eth_txq *txq;
481 * Perform sanity checking on the Queue ID to make sure it
482 * really refers to one of our TX Ethernet Egress Queues which
483 * is active and matches the queue's ID. None of these error
484 * conditions should ever happen so we may want to either make
485 * them fatal and/or conditionalized under DEBUG.
487 eq_idx = EQ_IDX(s, qid);
488 if (unlikely(eq_idx >= MAX_EGRQ)) {
489 dev_err(adapter->pdev_dev,
490 "Egress Update QID %d out of range\n", qid);
493 tq = s->egr_map[eq_idx];
494 if (unlikely(tq == NULL)) {
495 dev_err(adapter->pdev_dev,
496 "Egress Update QID %d TXQ=NULL\n", qid);
499 txq = container_of(tq, struct sge_eth_txq, q);
500 if (unlikely(tq->abs_id != qid)) {
501 dev_err(adapter->pdev_dev,
502 "Egress Update QID %d refers to TXQ %d\n",
508 * Restart a stopped TX Queue which has less than half of its
512 netif_tx_wake_queue(txq->txq);
517 dev_err(adapter->pdev_dev,
518 "unexpected CPL %#x on FW event queue\n", opcode);
525 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
526 * to use and initializes them. We support multiple "Queue Sets" per port if
527 * we have MSI-X, otherwise just one queue set per port.
529 static int setup_sge_queues(struct adapter *adapter)
531 struct sge *s = &adapter->sge;
535 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
538 bitmap_zero(s->starving_fl, MAX_EGRQ);
541 * If we're using MSI interrupt mode we need to set up a "forwarded
542 * interrupt" queue which we'll set up with our MSI vector. The rest
543 * of the ingress queues will be set up to forward their interrupts to
544 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
545 * the intrq's queue ID as the interrupt forwarding queue for the
546 * subsequent calls ...
548 if (adapter->flags & USING_MSI) {
549 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
550 adapter->port[0], 0, NULL, NULL);
552 goto err_free_queues;
556 * Allocate our ingress queue for asynchronous firmware messages.
558 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
559 MSIX_FW, NULL, fwevtq_handler);
561 goto err_free_queues;
564 * Allocate each "port"'s initial Queue Sets. These can be changed
565 * later on ... up to the point where any interface on the adapter is
566 * brought up at which point lots of things get nailed down
570 for_each_port(adapter, pidx) {
571 struct net_device *dev = adapter->port[pidx];
572 struct port_info *pi = netdev_priv(dev);
573 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
574 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
577 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
578 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
580 &rxq->fl, t4vf_ethrx_handler);
582 goto err_free_queues;
584 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
585 netdev_get_tx_queue(dev, qs),
586 s->fw_evtq.cntxt_id);
588 goto err_free_queues;
591 memset(&rxq->stats, 0, sizeof(rxq->stats));
596 * Create the reverse mappings for the queues.
598 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
599 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
600 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
601 for_each_port(adapter, pidx) {
602 struct net_device *dev = adapter->port[pidx];
603 struct port_info *pi = netdev_priv(dev);
604 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
605 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
608 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
609 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
610 EQ_MAP(s, txq->q.abs_id) = &txq->q;
613 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
614 * for Free Lists but since all of the Egress Queues
615 * (including Free Lists) have Relative Queue IDs
616 * which are computed as Absolute - Base Queue ID, we
617 * can synthesize the Absolute Queue IDs for the Free
618 * Lists. This is useful for debugging purposes when
619 * we want to dump Queue Contexts via the PF Driver.
621 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
622 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
628 t4vf_free_sge_resources(adapter);
633 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
634 * queues. We configure the RSS CPU lookup table to distribute to the number
635 * of HW receive queues, and the response queue lookup table to narrow that
636 * down to the response queues actually configured for each "port" (Virtual
637 * Interface). We always configure the RSS mapping for all ports since the
638 * mapping table has plenty of entries.
640 static int setup_rss(struct adapter *adapter)
644 for_each_port(adapter, pidx) {
645 struct port_info *pi = adap2pinfo(adapter, pidx);
646 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
647 u16 rss[MAX_PORT_QSETS];
650 for (qs = 0; qs < pi->nqsets; qs++)
651 rss[qs] = rxq[qs].rspq.abs_id;
653 err = t4vf_config_rss_range(adapter, pi->viid,
654 0, pi->rss_size, rss, pi->nqsets);
659 * Perform Global RSS Mode-specific initialization.
661 switch (adapter->params.rss.mode) {
662 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
664 * If Tunnel All Lookup isn't specified in the global
665 * RSS Configuration, then we need to specify a
666 * default Ingress Queue for any ingress packets which
667 * aren't hashed. We'll use our first ingress queue
670 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
671 union rss_vi_config config;
672 err = t4vf_read_rss_vi_config(adapter,
677 config.basicvirtual.defaultq =
679 err = t4vf_write_rss_vi_config(adapter,
693 * Bring the adapter up. Called whenever we go from no "ports" open to having
694 * one open. This function performs the actions necessary to make an adapter
695 * operational, such as completing the initialization of HW modules, and
696 * enabling interrupts. Must be called with the rtnl lock held. (Note that
697 * this is called "cxgb_up" in the PF Driver.)
699 static int adapter_up(struct adapter *adapter)
704 * If this is the first time we've been called, perform basic
705 * adapter setup. Once we've done this, many of our adapter
706 * parameters can no longer be changed ...
708 if ((adapter->flags & FULL_INIT_DONE) == 0) {
709 err = setup_sge_queues(adapter);
712 err = setup_rss(adapter);
714 t4vf_free_sge_resources(adapter);
718 if (adapter->flags & USING_MSIX)
719 name_msix_vecs(adapter);
720 adapter->flags |= FULL_INIT_DONE;
724 * Acquire our interrupt resources. We only support MSI-X and MSI.
726 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
727 if (adapter->flags & USING_MSIX)
728 err = request_msix_queue_irqs(adapter);
730 err = request_irq(adapter->pdev->irq,
731 t4vf_intr_handler(adapter), 0,
732 adapter->name, adapter);
734 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
740 * Enable NAPI ingress processing and return success.
743 t4vf_sge_start(adapter);
745 /* Initialize hash mac addr list*/
746 INIT_LIST_HEAD(&adapter->mac_hlist);
751 * Bring the adapter down. Called whenever the last "port" (Virtual
752 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
755 static void adapter_down(struct adapter *adapter)
758 * Free interrupt resources.
760 if (adapter->flags & USING_MSIX)
761 free_msix_queue_irqs(adapter);
763 free_irq(adapter->pdev->irq, adapter);
766 * Wait for NAPI handlers to finish.
772 * Start up a net device.
774 static int cxgb4vf_open(struct net_device *dev)
777 struct port_info *pi = netdev_priv(dev);
778 struct adapter *adapter = pi->adapter;
781 * If this is the first interface that we're opening on the "adapter",
782 * bring the "adapter" up now.
784 if (adapter->open_device_map == 0) {
785 err = adapter_up(adapter);
791 * Note that this interface is up and start everything up ...
793 netif_set_real_num_tx_queues(dev, pi->nqsets);
794 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
797 err = link_start(dev);
801 netif_tx_start_all_queues(dev);
802 set_bit(pi->port_id, &adapter->open_device_map);
806 if (adapter->open_device_map == 0)
807 adapter_down(adapter);
812 * Shut down a net device. This routine is called "cxgb_close" in the PF
815 static int cxgb4vf_stop(struct net_device *dev)
817 struct port_info *pi = netdev_priv(dev);
818 struct adapter *adapter = pi->adapter;
820 netif_tx_stop_all_queues(dev);
821 netif_carrier_off(dev);
822 t4vf_enable_vi(adapter, pi->viid, false, false);
823 pi->link_cfg.link_ok = 0;
825 clear_bit(pi->port_id, &adapter->open_device_map);
826 if (adapter->open_device_map == 0)
827 adapter_down(adapter);
832 * Translate our basic statistics into the standard "ifconfig" statistics.
834 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
836 struct t4vf_port_stats stats;
837 struct port_info *pi = netdev2pinfo(dev);
838 struct adapter *adapter = pi->adapter;
839 struct net_device_stats *ns = &dev->stats;
842 spin_lock(&adapter->stats_lock);
843 err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
844 spin_unlock(&adapter->stats_lock);
846 memset(ns, 0, sizeof(*ns));
850 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
851 stats.tx_ucast_bytes + stats.tx_offload_bytes);
852 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
853 stats.tx_ucast_frames + stats.tx_offload_frames);
854 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
855 stats.rx_ucast_bytes);
856 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
857 stats.rx_ucast_frames);
858 ns->multicast = stats.rx_mcast_frames;
859 ns->tx_errors = stats.tx_drop_frames;
860 ns->rx_errors = stats.rx_err_frames;
866 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
867 * at a specified offset within the list, into an array of addrss pointers and
868 * return the number collected.
870 static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
873 unsigned int maxaddrs)
875 unsigned int index = 0;
876 unsigned int naddr = 0;
877 const struct netdev_hw_addr *ha;
879 for_each_dev_addr(dev, ha)
880 if (index++ >= offset) {
881 addr[naddr++] = ha->addr;
882 if (naddr >= maxaddrs)
889 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
890 * at a specified offset within the list, into an array of addrss pointers and
891 * return the number collected.
893 static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
896 unsigned int maxaddrs)
898 unsigned int index = 0;
899 unsigned int naddr = 0;
900 const struct netdev_hw_addr *ha;
902 netdev_for_each_mc_addr(ha, dev)
903 if (index++ >= offset) {
904 addr[naddr++] = ha->addr;
905 if (naddr >= maxaddrs)
911 static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
913 struct adapter *adapter = pi->adapter;
916 struct hash_mac_addr *entry;
918 /* Calculate the hash vector for the updated list and program it */
919 list_for_each_entry(entry, &adapter->mac_hlist, list) {
920 ucast |= is_unicast_ether_addr(entry->addr);
921 vec |= (1ULL << hash_mac_addr(entry->addr));
923 return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
926 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
928 struct port_info *pi = netdev_priv(netdev);
929 struct adapter *adapter = pi->adapter;
934 bool ucast = is_unicast_ether_addr(mac_addr);
935 const u8 *maclist[1] = {mac_addr};
936 struct hash_mac_addr *new_entry;
938 ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
939 NULL, ucast ? &uhash : &mhash, false);
942 /* if hash != 0, then add the addr to hash addr list
943 * so on the end we will calculate the hash for the
944 * list and program it
946 if (uhash || mhash) {
947 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
950 ether_addr_copy(new_entry->addr, mac_addr);
951 list_add_tail(&new_entry->list, &adapter->mac_hlist);
952 ret = cxgb4vf_set_addr_hash(pi);
955 return ret < 0 ? ret : 0;
958 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
960 struct port_info *pi = netdev_priv(netdev);
961 struct adapter *adapter = pi->adapter;
963 const u8 *maclist[1] = {mac_addr};
964 struct hash_mac_addr *entry, *tmp;
966 /* If the MAC address to be removed is in the hash addr
967 * list, delete it from the list and update hash vector
969 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
970 if (ether_addr_equal(entry->addr, mac_addr)) {
971 list_del(&entry->list);
973 return cxgb4vf_set_addr_hash(pi);
977 ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
978 return ret < 0 ? -EINVAL : 0;
982 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
983 * If @mtu is -1 it is left unchanged.
985 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
987 struct port_info *pi = netdev_priv(dev);
989 if (!(dev->flags & IFF_PROMISC)) {
990 __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
991 if (!(dev->flags & IFF_ALLMULTI))
992 __dev_mc_sync(dev, cxgb4vf_mac_sync,
995 return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
996 (dev->flags & IFF_PROMISC) != 0,
997 (dev->flags & IFF_ALLMULTI) != 0,
1002 * Set the current receive modes on the device.
1004 static void cxgb4vf_set_rxmode(struct net_device *dev)
1006 /* unfortunately we can't return errors to the stack */
1007 set_rxmode(dev, -1, false);
1011 * Find the entry in the interrupt holdoff timer value array which comes
1012 * closest to the specified interrupt holdoff value.
1014 static int closest_timer(const struct sge *s, int us)
1016 int i, timer_idx = 0, min_delta = INT_MAX;
1018 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1019 int delta = us - s->timer_val[i];
1022 if (delta < min_delta) {
1030 static int closest_thres(const struct sge *s, int thres)
1032 int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
1034 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1035 delta = thres - s->counter_val[i];
1038 if (delta < min_delta) {
1047 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1049 static unsigned int qtimer_val(const struct adapter *adapter,
1050 const struct sge_rspq *rspq)
1052 unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
1054 return timer_idx < SGE_NTIMERS
1055 ? adapter->sge.timer_val[timer_idx]
1060 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1061 * @adapter: the adapter
1062 * @rspq: the RX response queue
1063 * @us: the hold-off time in us, or 0 to disable timer
1064 * @cnt: the hold-off packet count, or 0 to disable counter
1066 * Sets an RX response queue's interrupt hold-off time and packet count.
1067 * At least one of the two needs to be enabled for the queue to generate
1070 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1071 unsigned int us, unsigned int cnt)
1073 unsigned int timer_idx;
1076 * If both the interrupt holdoff timer and count are specified as
1077 * zero, default to a holdoff count of 1 ...
1079 if ((us | cnt) == 0)
1083 * If an interrupt holdoff count has been specified, then find the
1084 * closest configured holdoff count and use that. If the response
1085 * queue has already been created, then update its queue context
1092 pktcnt_idx = closest_thres(&adapter->sge, cnt);
1093 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1094 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1095 FW_PARAMS_PARAM_X_V(
1096 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1097 FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1098 err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1102 rspq->pktcnt_idx = pktcnt_idx;
1106 * Compute the closest holdoff timer index from the supplied holdoff
1109 timer_idx = (us == 0
1110 ? SGE_TIMER_RSTRT_CNTR
1111 : closest_timer(&adapter->sge, us));
1114 * Update the response queue's interrupt coalescing parameters and
1117 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1118 QINTR_CNT_EN_V(cnt > 0));
1123 * Return a version number to identify the type of adapter. The scheme is:
1124 * - bits 0..9: chip version
1125 * - bits 10..15: chip revision
1127 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1130 * Chip version 4, revision 0x3f (cxgb4vf).
1132 return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1136 * Execute the specified ioctl command.
1138 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1144 * The VF Driver doesn't have access to any of the other
1145 * common Ethernet device ioctl()'s (like reading/writing
1146 * PHY registers, etc.
1157 * Change the device's MTU.
1159 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1162 struct port_info *pi = netdev_priv(dev);
1164 /* accommodate SACK */
1168 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1169 -1, -1, -1, -1, true);
1175 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1176 netdev_features_t features)
1179 * Since there is no support for separate rx/tx vlan accel
1180 * enable/disable make sure tx flag is always in same state as rx.
1182 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1183 features |= NETIF_F_HW_VLAN_CTAG_TX;
1185 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1190 static int cxgb4vf_set_features(struct net_device *dev,
1191 netdev_features_t features)
1193 struct port_info *pi = netdev_priv(dev);
1194 netdev_features_t changed = dev->features ^ features;
1196 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1197 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1198 features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1204 * Change the devices MAC address.
1206 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1209 struct sockaddr *addr = _addr;
1210 struct port_info *pi = netdev_priv(dev);
1212 if (!is_valid_ether_addr(addr->sa_data))
1213 return -EADDRNOTAVAIL;
1215 ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1216 addr->sa_data, true);
1220 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1221 pi->xact_addr_filt = ret;
1225 #ifdef CONFIG_NET_POLL_CONTROLLER
1227 * Poll all of our receive queues. This is called outside of normal interrupt
1230 static void cxgb4vf_poll_controller(struct net_device *dev)
1232 struct port_info *pi = netdev_priv(dev);
1233 struct adapter *adapter = pi->adapter;
1235 if (adapter->flags & USING_MSIX) {
1236 struct sge_eth_rxq *rxq;
1239 rxq = &adapter->sge.ethrxq[pi->first_qset];
1240 for (nqsets = pi->nqsets; nqsets; nqsets--) {
1241 t4vf_sge_intr_msix(0, &rxq->rspq);
1245 t4vf_intr_handler(adapter)(0, adapter);
1250 * Ethtool operations.
1251 * ===================
1253 * Note that we don't support any ethtool operations which change the physical
1254 * state of the port to which we're linked.
1257 static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
1262 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1263 type == FW_PORT_TYPE_BT_XAUI) {
1265 if (caps & FW_PORT_CAP_SPEED_100M)
1266 v |= SUPPORTED_100baseT_Full;
1267 if (caps & FW_PORT_CAP_SPEED_1G)
1268 v |= SUPPORTED_1000baseT_Full;
1269 if (caps & FW_PORT_CAP_SPEED_10G)
1270 v |= SUPPORTED_10000baseT_Full;
1271 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1272 v |= SUPPORTED_Backplane;
1273 if (caps & FW_PORT_CAP_SPEED_1G)
1274 v |= SUPPORTED_1000baseKX_Full;
1275 if (caps & FW_PORT_CAP_SPEED_10G)
1276 v |= SUPPORTED_10000baseKX4_Full;
1277 } else if (type == FW_PORT_TYPE_KR)
1278 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1279 else if (type == FW_PORT_TYPE_BP_AP)
1280 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1281 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1282 else if (type == FW_PORT_TYPE_BP4_AP)
1283 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1284 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1285 SUPPORTED_10000baseKX4_Full;
1286 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1287 type == FW_PORT_TYPE_FIBER_XAUI ||
1288 type == FW_PORT_TYPE_SFP ||
1289 type == FW_PORT_TYPE_QSFP_10G ||
1290 type == FW_PORT_TYPE_QSA) {
1291 v |= SUPPORTED_FIBRE;
1292 if (caps & FW_PORT_CAP_SPEED_1G)
1293 v |= SUPPORTED_1000baseT_Full;
1294 if (caps & FW_PORT_CAP_SPEED_10G)
1295 v |= SUPPORTED_10000baseT_Full;
1296 } else if (type == FW_PORT_TYPE_BP40_BA ||
1297 type == FW_PORT_TYPE_QSFP) {
1298 v |= SUPPORTED_40000baseSR4_Full;
1299 v |= SUPPORTED_FIBRE;
1302 if (caps & FW_PORT_CAP_ANEG)
1303 v |= SUPPORTED_Autoneg;
1307 static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1309 const struct port_info *p = netdev_priv(dev);
1311 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1312 p->port_type == FW_PORT_TYPE_BT_XFI ||
1313 p->port_type == FW_PORT_TYPE_BT_XAUI)
1314 cmd->port = PORT_TP;
1315 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1316 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1317 cmd->port = PORT_FIBRE;
1318 else if (p->port_type == FW_PORT_TYPE_SFP ||
1319 p->port_type == FW_PORT_TYPE_QSFP_10G ||
1320 p->port_type == FW_PORT_TYPE_QSA ||
1321 p->port_type == FW_PORT_TYPE_QSFP) {
1322 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
1323 p->mod_type == FW_PORT_MOD_TYPE_SR ||
1324 p->mod_type == FW_PORT_MOD_TYPE_ER ||
1325 p->mod_type == FW_PORT_MOD_TYPE_LRM)
1326 cmd->port = PORT_FIBRE;
1327 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1328 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1329 cmd->port = PORT_DA;
1331 cmd->port = PORT_OTHER;
1333 cmd->port = PORT_OTHER;
1335 if (p->mdio_addr >= 0) {
1336 cmd->phy_address = p->mdio_addr;
1337 cmd->transceiver = XCVR_EXTERNAL;
1338 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1339 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1341 cmd->phy_address = 0; /* not really, but no better option */
1342 cmd->transceiver = XCVR_INTERNAL;
1343 cmd->mdio_support = 0;
1346 cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
1347 p->link_cfg.supported);
1348 cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
1349 p->link_cfg.advertising);
1350 ethtool_cmd_speed_set(cmd,
1351 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1352 cmd->duplex = DUPLEX_FULL;
1353 cmd->autoneg = p->link_cfg.autoneg;
1360 * Return our driver information.
1362 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1363 struct ethtool_drvinfo *drvinfo)
1365 struct adapter *adapter = netdev2adap(dev);
1367 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1368 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1369 strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1370 sizeof(drvinfo->bus_info));
1371 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1372 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1373 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1374 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1375 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1376 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1377 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1378 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1379 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1380 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1384 * Return current adapter message level.
1386 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1388 return netdev2adap(dev)->msg_enable;
1392 * Set current adapter message level.
1394 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1396 netdev2adap(dev)->msg_enable = msglevel;
1400 * Return the device's current Queue Set ring size parameters along with the
1401 * allowed maximum values. Since ethtool doesn't understand the concept of
1402 * multi-queue devices, we just return the current values associated with the
1405 static void cxgb4vf_get_ringparam(struct net_device *dev,
1406 struct ethtool_ringparam *rp)
1408 const struct port_info *pi = netdev_priv(dev);
1409 const struct sge *s = &pi->adapter->sge;
1411 rp->rx_max_pending = MAX_RX_BUFFERS;
1412 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1413 rp->rx_jumbo_max_pending = 0;
1414 rp->tx_max_pending = MAX_TXQ_ENTRIES;
1416 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1417 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1418 rp->rx_jumbo_pending = 0;
1419 rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1423 * Set the Queue Set ring size parameters for the device. Again, since
1424 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1425 * apply these new values across all of the Queue Sets associated with the
1426 * device -- after vetting them of course!
1428 static int cxgb4vf_set_ringparam(struct net_device *dev,
1429 struct ethtool_ringparam *rp)
1431 const struct port_info *pi = netdev_priv(dev);
1432 struct adapter *adapter = pi->adapter;
1433 struct sge *s = &adapter->sge;
1436 if (rp->rx_pending > MAX_RX_BUFFERS ||
1437 rp->rx_jumbo_pending ||
1438 rp->tx_pending > MAX_TXQ_ENTRIES ||
1439 rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1440 rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1441 rp->rx_pending < MIN_FL_ENTRIES ||
1442 rp->tx_pending < MIN_TXQ_ENTRIES)
1445 if (adapter->flags & FULL_INIT_DONE)
1448 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1449 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1450 s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1451 s->ethtxq[qs].q.size = rp->tx_pending;
1457 * Return the interrupt holdoff timer and count for the first Queue Set on the
1458 * device. Our extension ioctl() (the cxgbtool interface) allows the
1459 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1461 static int cxgb4vf_get_coalesce(struct net_device *dev,
1462 struct ethtool_coalesce *coalesce)
1464 const struct port_info *pi = netdev_priv(dev);
1465 const struct adapter *adapter = pi->adapter;
1466 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1468 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1469 coalesce->rx_max_coalesced_frames =
1470 ((rspq->intr_params & QINTR_CNT_EN_F)
1471 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1477 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1478 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1479 * the interrupt holdoff timer on any of the device's Queue Sets.
1481 static int cxgb4vf_set_coalesce(struct net_device *dev,
1482 struct ethtool_coalesce *coalesce)
1484 const struct port_info *pi = netdev_priv(dev);
1485 struct adapter *adapter = pi->adapter;
1487 return set_rxq_intr_params(adapter,
1488 &adapter->sge.ethrxq[pi->first_qset].rspq,
1489 coalesce->rx_coalesce_usecs,
1490 coalesce->rx_max_coalesced_frames);
1494 * Report current port link pause parameter settings.
1496 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1497 struct ethtool_pauseparam *pauseparam)
1499 struct port_info *pi = netdev_priv(dev);
1501 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1502 pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1503 pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1507 * Identify the port by blinking the port's LED.
1509 static int cxgb4vf_phys_id(struct net_device *dev,
1510 enum ethtool_phys_id_state state)
1513 struct port_info *pi = netdev_priv(dev);
1515 if (state == ETHTOOL_ID_ACTIVE)
1517 else if (state == ETHTOOL_ID_INACTIVE)
1522 return t4vf_identify_port(pi->adapter, pi->viid, val);
1526 * Port stats maintained per queue of the port.
1528 struct queue_port_stats {
1539 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1540 * these need to match the order of statistics returned by
1541 * t4vf_get_port_stats().
1543 static const char stats_strings[][ETH_GSTRING_LEN] = {
1545 * These must match the layout of the t4vf_port_stats structure.
1547 "TxBroadcastBytes ",
1548 "TxBroadcastFrames ",
1549 "TxMulticastBytes ",
1550 "TxMulticastFrames ",
1556 "RxBroadcastBytes ",
1557 "RxBroadcastFrames ",
1558 "RxMulticastBytes ",
1559 "RxMulticastFrames ",
1565 * These are accumulated per-queue statistics and must match the
1566 * order of the fields in the queue_port_stats structure.
1578 * Return the number of statistics in the specified statistics set.
1580 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1584 return ARRAY_SIZE(stats_strings);
1592 * Return the strings for the specified statistics set.
1594 static void cxgb4vf_get_strings(struct net_device *dev,
1600 memcpy(data, stats_strings, sizeof(stats_strings));
1606 * Small utility routine to accumulate queue statistics across the queues of
1609 static void collect_sge_port_stats(const struct adapter *adapter,
1610 const struct port_info *pi,
1611 struct queue_port_stats *stats)
1613 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1614 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1617 memset(stats, 0, sizeof(*stats));
1618 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1619 stats->tso += txq->tso;
1620 stats->tx_csum += txq->tx_cso;
1621 stats->rx_csum += rxq->stats.rx_cso;
1622 stats->vlan_ex += rxq->stats.vlan_ex;
1623 stats->vlan_ins += txq->vlan_ins;
1624 stats->lro_pkts += rxq->stats.lro_pkts;
1625 stats->lro_merged += rxq->stats.lro_merged;
1630 * Return the ETH_SS_STATS statistics set.
1632 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1633 struct ethtool_stats *stats,
1636 struct port_info *pi = netdev2pinfo(dev);
1637 struct adapter *adapter = pi->adapter;
1638 int err = t4vf_get_port_stats(adapter, pi->pidx,
1639 (struct t4vf_port_stats *)data);
1641 memset(data, 0, sizeof(struct t4vf_port_stats));
1643 data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1644 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1648 * Return the size of our register map.
1650 static int cxgb4vf_get_regs_len(struct net_device *dev)
1652 return T4VF_REGMAP_SIZE;
1656 * Dump a block of registers, start to end inclusive, into a buffer.
1658 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1659 unsigned int start, unsigned int end)
1661 u32 *bp = regbuf + start - T4VF_REGMAP_START;
1663 for ( ; start <= end; start += sizeof(u32)) {
1665 * Avoid reading the Mailbox Control register since that
1666 * can trigger a Mailbox Ownership Arbitration cycle and
1667 * interfere with communication with the firmware.
1669 if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1672 *bp++ = t4_read_reg(adapter, start);
1677 * Copy our entire register map into the provided buffer.
1679 static void cxgb4vf_get_regs(struct net_device *dev,
1680 struct ethtool_regs *regs,
1683 struct adapter *adapter = netdev2adap(dev);
1685 regs->version = mk_adap_vers(adapter);
1688 * Fill in register buffer with our register map.
1690 memset(regbuf, 0, T4VF_REGMAP_SIZE);
1692 reg_block_dump(adapter, regbuf,
1693 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1694 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1695 reg_block_dump(adapter, regbuf,
1696 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1697 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1699 /* T5 adds new registers in the PL Register map.
1701 reg_block_dump(adapter, regbuf,
1702 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1703 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1704 ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1705 reg_block_dump(adapter, regbuf,
1706 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1707 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1709 reg_block_dump(adapter, regbuf,
1710 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1711 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1715 * Report current Wake On LAN settings.
1717 static void cxgb4vf_get_wol(struct net_device *dev,
1718 struct ethtool_wolinfo *wol)
1722 memset(&wol->sopass, 0, sizeof(wol->sopass));
1726 * TCP Segmentation Offload flags which we support.
1728 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1730 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1731 .get_settings = cxgb4vf_get_settings,
1732 .get_drvinfo = cxgb4vf_get_drvinfo,
1733 .get_msglevel = cxgb4vf_get_msglevel,
1734 .set_msglevel = cxgb4vf_set_msglevel,
1735 .get_ringparam = cxgb4vf_get_ringparam,
1736 .set_ringparam = cxgb4vf_set_ringparam,
1737 .get_coalesce = cxgb4vf_get_coalesce,
1738 .set_coalesce = cxgb4vf_set_coalesce,
1739 .get_pauseparam = cxgb4vf_get_pauseparam,
1740 .get_link = ethtool_op_get_link,
1741 .get_strings = cxgb4vf_get_strings,
1742 .set_phys_id = cxgb4vf_phys_id,
1743 .get_sset_count = cxgb4vf_get_sset_count,
1744 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1745 .get_regs_len = cxgb4vf_get_regs_len,
1746 .get_regs = cxgb4vf_get_regs,
1747 .get_wol = cxgb4vf_get_wol,
1751 * /sys/kernel/debug/cxgb4vf support code and data.
1752 * ================================================
1756 * Show SGE Queue Set information. We display QPL Queues Sets per line.
1760 static int sge_qinfo_show(struct seq_file *seq, void *v)
1762 struct adapter *adapter = seq->private;
1763 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1764 int qs, r = (uintptr_t)v - 1;
1767 seq_putc(seq, '\n');
1769 #define S3(fmt_spec, s, v) \
1771 seq_printf(seq, "%-12s", s); \
1772 for (qs = 0; qs < n; ++qs) \
1773 seq_printf(seq, " %16" fmt_spec, v); \
1774 seq_putc(seq, '\n'); \
1776 #define S(s, v) S3("s", s, v)
1777 #define T(s, v) S3("u", s, txq[qs].v)
1778 #define R(s, v) S3("u", s, rxq[qs].v)
1780 if (r < eth_entries) {
1781 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1782 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1783 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1785 S("QType:", "Ethernet");
1787 (rxq[qs].rspq.netdev
1788 ? rxq[qs].rspq.netdev->name
1791 (rxq[qs].rspq.netdev
1792 ? ((struct port_info *)
1793 netdev_priv(rxq[qs].rspq.netdev))->port_id
1795 T("TxQ ID:", q.abs_id);
1796 T("TxQ size:", q.size);
1797 T("TxQ inuse:", q.in_use);
1798 T("TxQ PIdx:", q.pidx);
1799 T("TxQ CIdx:", q.cidx);
1800 R("RspQ ID:", rspq.abs_id);
1801 R("RspQ size:", rspq.size);
1802 R("RspQE size:", rspq.iqe_len);
1803 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1804 S3("u", "Intr pktcnt:",
1805 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1806 R("RspQ CIdx:", rspq.cidx);
1807 R("RspQ Gen:", rspq.gen);
1808 R("FL ID:", fl.abs_id);
1809 R("FL size:", fl.size - MIN_FL_RESID);
1810 R("FL avail:", fl.avail);
1811 R("FL PIdx:", fl.pidx);
1812 R("FL CIdx:", fl.cidx);
1818 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1820 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1821 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1822 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1823 qtimer_val(adapter, evtq));
1824 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1825 adapter->sge.counter_val[evtq->pktcnt_idx]);
1826 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1827 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1828 } else if (r == 1) {
1829 const struct sge_rspq *intrq = &adapter->sge.intrq;
1831 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1832 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1833 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1834 qtimer_val(adapter, intrq));
1835 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1836 adapter->sge.counter_val[intrq->pktcnt_idx]);
1837 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1838 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1850 * Return the number of "entries" in our "file". We group the multi-Queue
1851 * sections with QPL Queue Sets per "entry". The sections of the output are:
1853 * Ethernet RX/TX Queue Sets
1854 * Firmware Event Queue
1855 * Forwarded Interrupt Queue (if in MSI mode)
1857 static int sge_queue_entries(const struct adapter *adapter)
1859 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1860 ((adapter->flags & USING_MSI) != 0);
1863 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1865 int entries = sge_queue_entries(seq->private);
1867 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1870 static void sge_queue_stop(struct seq_file *seq, void *v)
1874 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1876 int entries = sge_queue_entries(seq->private);
1879 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1882 static const struct seq_operations sge_qinfo_seq_ops = {
1883 .start = sge_queue_start,
1884 .next = sge_queue_next,
1885 .stop = sge_queue_stop,
1886 .show = sge_qinfo_show
1889 static int sge_qinfo_open(struct inode *inode, struct file *file)
1891 int res = seq_open(file, &sge_qinfo_seq_ops);
1894 struct seq_file *seq = file->private_data;
1895 seq->private = inode->i_private;
1900 static const struct file_operations sge_qinfo_debugfs_fops = {
1901 .owner = THIS_MODULE,
1902 .open = sge_qinfo_open,
1904 .llseek = seq_lseek,
1905 .release = seq_release,
1909 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1913 static int sge_qstats_show(struct seq_file *seq, void *v)
1915 struct adapter *adapter = seq->private;
1916 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1917 int qs, r = (uintptr_t)v - 1;
1920 seq_putc(seq, '\n');
1922 #define S3(fmt, s, v) \
1924 seq_printf(seq, "%-16s", s); \
1925 for (qs = 0; qs < n; ++qs) \
1926 seq_printf(seq, " %8" fmt, v); \
1927 seq_putc(seq, '\n'); \
1929 #define S(s, v) S3("s", s, v)
1931 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1932 #define T(s, v) T3("lu", s, v)
1934 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1935 #define R(s, v) R3("lu", s, v)
1937 if (r < eth_entries) {
1938 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1939 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1940 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1942 S("QType:", "Ethernet");
1944 (rxq[qs].rspq.netdev
1945 ? rxq[qs].rspq.netdev->name
1947 R3("u", "RspQNullInts:", rspq.unhandled_irqs);
1948 R("RxPackets:", stats.pkts);
1949 R("RxCSO:", stats.rx_cso);
1950 R("VLANxtract:", stats.vlan_ex);
1951 R("LROmerged:", stats.lro_merged);
1952 R("LROpackets:", stats.lro_pkts);
1953 R("RxDrops:", stats.rx_drops);
1955 T("TxCSO:", tx_cso);
1956 T("VLANins:", vlan_ins);
1957 T("TxQFull:", q.stops);
1958 T("TxQRestarts:", q.restarts);
1959 T("TxMapErr:", mapping_err);
1960 R("FLAllocErr:", fl.alloc_failed);
1961 R("FLLrgAlcErr:", fl.large_alloc_failed);
1962 R("FLStarving:", fl.starving);
1968 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1970 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
1971 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1972 evtq->unhandled_irqs);
1973 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
1974 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
1975 } else if (r == 1) {
1976 const struct sge_rspq *intrq = &adapter->sge.intrq;
1978 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
1979 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1980 intrq->unhandled_irqs);
1981 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
1982 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
1996 * Return the number of "entries" in our "file". We group the multi-Queue
1997 * sections with QPL Queue Sets per "entry". The sections of the output are:
1999 * Ethernet RX/TX Queue Sets
2000 * Firmware Event Queue
2001 * Forwarded Interrupt Queue (if in MSI mode)
2003 static int sge_qstats_entries(const struct adapter *adapter)
2005 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2006 ((adapter->flags & USING_MSI) != 0);
2009 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2011 int entries = sge_qstats_entries(seq->private);
2013 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2016 static void sge_qstats_stop(struct seq_file *seq, void *v)
2020 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2022 int entries = sge_qstats_entries(seq->private);
2025 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2028 static const struct seq_operations sge_qstats_seq_ops = {
2029 .start = sge_qstats_start,
2030 .next = sge_qstats_next,
2031 .stop = sge_qstats_stop,
2032 .show = sge_qstats_show
2035 static int sge_qstats_open(struct inode *inode, struct file *file)
2037 int res = seq_open(file, &sge_qstats_seq_ops);
2040 struct seq_file *seq = file->private_data;
2041 seq->private = inode->i_private;
2046 static const struct file_operations sge_qstats_proc_fops = {
2047 .owner = THIS_MODULE,
2048 .open = sge_qstats_open,
2050 .llseek = seq_lseek,
2051 .release = seq_release,
2055 * Show PCI-E SR-IOV Virtual Function Resource Limits.
2057 static int resources_show(struct seq_file *seq, void *v)
2059 struct adapter *adapter = seq->private;
2060 struct vf_resources *vfres = &adapter->params.vfres;
2062 #define S(desc, fmt, var) \
2063 seq_printf(seq, "%-60s " fmt "\n", \
2064 desc " (" #var "):", vfres->var)
2066 S("Virtual Interfaces", "%d", nvi);
2067 S("Egress Queues", "%d", neq);
2068 S("Ethernet Control", "%d", nethctrl);
2069 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2070 S("Ingress Queues", "%d", niq);
2071 S("Traffic Class", "%d", tc);
2072 S("Port Access Rights Mask", "%#x", pmask);
2073 S("MAC Address Filters", "%d", nexactf);
2074 S("Firmware Command Read Capabilities", "%#x", r_caps);
2075 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2082 static int resources_open(struct inode *inode, struct file *file)
2084 return single_open(file, resources_show, inode->i_private);
2087 static const struct file_operations resources_proc_fops = {
2088 .owner = THIS_MODULE,
2089 .open = resources_open,
2091 .llseek = seq_lseek,
2092 .release = single_release,
2096 * Show Virtual Interfaces.
2098 static int interfaces_show(struct seq_file *seq, void *v)
2100 if (v == SEQ_START_TOKEN) {
2101 seq_puts(seq, "Interface Port VIID\n");
2103 struct adapter *adapter = seq->private;
2104 int pidx = (uintptr_t)v - 2;
2105 struct net_device *dev = adapter->port[pidx];
2106 struct port_info *pi = netdev_priv(dev);
2108 seq_printf(seq, "%9s %4d %#5x\n",
2109 dev->name, pi->port_id, pi->viid);
2114 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2116 return pos <= adapter->params.nports
2117 ? (void *)(uintptr_t)(pos + 1)
2121 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2124 ? interfaces_get_idx(seq->private, *pos)
2128 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2131 return interfaces_get_idx(seq->private, *pos);
2134 static void interfaces_stop(struct seq_file *seq, void *v)
2138 static const struct seq_operations interfaces_seq_ops = {
2139 .start = interfaces_start,
2140 .next = interfaces_next,
2141 .stop = interfaces_stop,
2142 .show = interfaces_show
2145 static int interfaces_open(struct inode *inode, struct file *file)
2147 int res = seq_open(file, &interfaces_seq_ops);
2150 struct seq_file *seq = file->private_data;
2151 seq->private = inode->i_private;
2156 static const struct file_operations interfaces_proc_fops = {
2157 .owner = THIS_MODULE,
2158 .open = interfaces_open,
2160 .llseek = seq_lseek,
2161 .release = seq_release,
2165 * /sys/kernel/debugfs/cxgb4vf/ files list.
2167 struct cxgb4vf_debugfs_entry {
2168 const char *name; /* name of debugfs node */
2169 umode_t mode; /* file system mode */
2170 const struct file_operations *fops;
2173 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2174 { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
2175 { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2176 { "resources", S_IRUGO, &resources_proc_fops },
2177 { "interfaces", S_IRUGO, &interfaces_proc_fops },
2181 * Module and device initialization and cleanup code.
2182 * ==================================================
2186 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2187 * directory (debugfs_root) has already been set up.
2189 static int setup_debugfs(struct adapter *adapter)
2193 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2196 * Debugfs support is best effort.
2198 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2199 (void)debugfs_create_file(debugfs_files[i].name,
2200 debugfs_files[i].mode,
2201 adapter->debugfs_root,
2203 debugfs_files[i].fops);
2209 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2210 * it to our caller to tear down the directory (debugfs_root).
2212 static void cleanup_debugfs(struct adapter *adapter)
2214 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2217 * Unlike our sister routine cleanup_proc(), we don't need to remove
2218 * individual entries because a call will be made to
2219 * debugfs_remove_recursive(). We just need to clean up any ancillary
2226 * Perform early "adapter" initialization. This is where we discover what
2227 * adapter parameters we're going to be using and initialize basic adapter
2230 static int adap_init0(struct adapter *adapter)
2232 struct vf_resources *vfres = &adapter->params.vfres;
2233 struct sge_params *sge_params = &adapter->params.sge;
2234 struct sge *s = &adapter->sge;
2235 unsigned int ethqsets;
2240 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2241 * 2.6.31 and later we can't call pci_reset_function() in order to
2242 * issue an FLR because of a self- deadlock on the device semaphore.
2243 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2244 * cases where they're needed -- for instance, some versions of KVM
2245 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2246 * use the firmware based reset in order to reset any per function
2249 err = t4vf_fw_reset(adapter);
2251 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2256 * Grab basic operational parameters. These will predominantly have
2257 * been set up by the Physical Function Driver or will be hard coded
2258 * into the adapter. We just have to live with them ... Note that
2259 * we _must_ get our VPD parameters before our SGE parameters because
2260 * we need to know the adapter's core clock from the VPD in order to
2261 * properly decode the SGE Timer Values.
2263 err = t4vf_get_dev_params(adapter);
2265 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2266 " device parameters: err=%d\n", err);
2269 err = t4vf_get_vpd_params(adapter);
2271 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2272 " VPD parameters: err=%d\n", err);
2275 err = t4vf_get_sge_params(adapter);
2277 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2278 " SGE parameters: err=%d\n", err);
2281 err = t4vf_get_rss_glb_config(adapter);
2283 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2284 " RSS parameters: err=%d\n", err);
2287 if (adapter->params.rss.mode !=
2288 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2289 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2290 " mode %d\n", adapter->params.rss.mode);
2293 err = t4vf_sge_init(adapter);
2295 dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2300 /* If we're running on newer firmware, let it know that we're
2301 * prepared to deal with encapsulated CPL messages. Older
2302 * firmware won't understand this and we'll just get
2303 * unencapsulated messages ...
2305 param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2306 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2308 (void) t4vf_set_params(adapter, 1, ¶m, &val);
2311 * Retrieve our RX interrupt holdoff timer values and counter
2312 * threshold values from the SGE parameters.
2314 s->timer_val[0] = core_ticks_to_us(adapter,
2315 TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2316 s->timer_val[1] = core_ticks_to_us(adapter,
2317 TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2318 s->timer_val[2] = core_ticks_to_us(adapter,
2319 TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2320 s->timer_val[3] = core_ticks_to_us(adapter,
2321 TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2322 s->timer_val[4] = core_ticks_to_us(adapter,
2323 TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2324 s->timer_val[5] = core_ticks_to_us(adapter,
2325 TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2327 s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2328 s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2329 s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2330 s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2333 * Grab our Virtual Interface resource allocation, extract the
2334 * features that we're interested in and do a bit of sanity testing on
2337 err = t4vf_get_vfres(adapter);
2339 dev_err(adapter->pdev_dev, "unable to get virtual interface"
2340 " resources: err=%d\n", err);
2345 * The number of "ports" which we support is equal to the number of
2346 * Virtual Interfaces with which we've been provisioned.
2348 adapter->params.nports = vfres->nvi;
2349 if (adapter->params.nports > MAX_NPORTS) {
2350 dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
2351 " virtual interfaces\n", MAX_NPORTS,
2352 adapter->params.nports);
2353 adapter->params.nports = MAX_NPORTS;
2357 * We need to reserve a number of the ingress queues with Free List
2358 * and Interrupt capabilities for special interrupt purposes (like
2359 * asynchronous firmware messages, or forwarded interrupts if we're
2360 * using MSI). The rest of the FL/Intr-capable ingress queues will be
2361 * matched up one-for-one with Ethernet/Control egress queues in order
2362 * to form "Queue Sets" which will be aportioned between the "ports".
2363 * For each Queue Set, we'll need the ability to allocate two Egress
2364 * Contexts -- one for the Ingress Queue Free List and one for the TX
2367 ethqsets = vfres->niqflint - INGQ_EXTRAS;
2368 if (vfres->nethctrl != ethqsets) {
2369 dev_warn(adapter->pdev_dev, "unequal number of [available]"
2370 " ingress/egress queues (%d/%d); using minimum for"
2371 " number of Queue Sets\n", ethqsets, vfres->nethctrl);
2372 ethqsets = min(vfres->nethctrl, ethqsets);
2374 if (vfres->neq < ethqsets*2) {
2375 dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
2376 " to support Queue Sets (%d); reducing allowed Queue"
2377 " Sets\n", vfres->neq, ethqsets);
2378 ethqsets = vfres->neq/2;
2380 if (ethqsets > MAX_ETH_QSETS) {
2381 dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
2382 " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
2383 ethqsets = MAX_ETH_QSETS;
2385 if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
2386 dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
2387 " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
2389 adapter->sge.max_ethqsets = ethqsets;
2392 * Check for various parameter sanity issues. Most checks simply
2393 * result in us using fewer resources than our provissioning but we
2394 * do need at least one "port" with which to work ...
2396 if (adapter->sge.max_ethqsets < adapter->params.nports) {
2397 dev_warn(adapter->pdev_dev, "only using %d of %d available"
2398 " virtual interfaces (too few Queue Sets)\n",
2399 adapter->sge.max_ethqsets, adapter->params.nports);
2400 adapter->params.nports = adapter->sge.max_ethqsets;
2402 if (adapter->params.nports == 0) {
2403 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2410 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2411 u8 pkt_cnt_idx, unsigned int size,
2412 unsigned int iqe_size)
2414 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2415 (pkt_cnt_idx < SGE_NCOUNTERS ?
2416 QINTR_CNT_EN_F : 0));
2417 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2420 rspq->iqe_len = iqe_size;
2425 * Perform default configuration of DMA queues depending on the number and
2426 * type of ports we found and the number of available CPUs. Most settings can
2427 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2428 * being brought up for the first time.
2430 static void cfg_queues(struct adapter *adapter)
2432 struct sge *s = &adapter->sge;
2433 int q10g, n10g, qidx, pidx, qs;
2437 * We should not be called till we know how many Queue Sets we can
2438 * support. In particular, this means that we need to know what kind
2439 * of interrupts we'll be using ...
2441 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2444 * Count the number of 10GbE Virtual Interfaces that we have.
2447 for_each_port(adapter, pidx)
2448 n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2451 * We default to 1 queue per non-10G port and up to # of cores queues
2457 int n1g = (adapter->params.nports - n10g);
2458 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2459 if (q10g > num_online_cpus())
2460 q10g = num_online_cpus();
2464 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2465 * The layout will be established in setup_sge_queues() when the
2466 * adapter is brough up for the first time.
2469 for_each_port(adapter, pidx) {
2470 struct port_info *pi = adap2pinfo(adapter, pidx);
2472 pi->first_qset = qidx;
2473 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2479 * The Ingress Queue Entry Size for our various Response Queues needs
2480 * to be big enough to accommodate the largest message we can receive
2481 * from the chip/firmware; which is 64 bytes ...
2486 * Set up default Queue Set parameters ... Start off with the
2487 * shortest interrupt holdoff timer.
2489 for (qs = 0; qs < s->max_ethqsets; qs++) {
2490 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2491 struct sge_eth_txq *txq = &s->ethtxq[qs];
2493 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2499 * The firmware event queue is used for link state changes and
2500 * notifications of TX DMA completions.
2502 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2505 * The forwarded interrupt queue is used when we're in MSI interrupt
2506 * mode. In this mode all interrupts associated with RX queues will
2507 * be forwarded to a single queue which we'll associate with our MSI
2508 * interrupt vector. The messages dropped in the forwarded interrupt
2509 * queue will indicate which ingress queue needs servicing ... This
2510 * queue needs to be large enough to accommodate all of the ingress
2511 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2512 * from equalling the CIDX if every ingress queue has an outstanding
2513 * interrupt). The queue doesn't need to be any larger because no
2514 * ingress queue will ever have more than one outstanding interrupt at
2517 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2522 * Reduce the number of Ethernet queues across all ports to at most n.
2523 * n provides at least one queue per port.
2525 static void reduce_ethqs(struct adapter *adapter, int n)
2528 struct port_info *pi;
2531 * While we have too many active Ether Queue Sets, interate across the
2532 * "ports" and reduce their individual Queue Set allocations.
2534 BUG_ON(n < adapter->params.nports);
2535 while (n < adapter->sge.ethqsets)
2536 for_each_port(adapter, i) {
2537 pi = adap2pinfo(adapter, i);
2538 if (pi->nqsets > 1) {
2540 adapter->sge.ethqsets--;
2541 if (adapter->sge.ethqsets <= n)
2547 * Reassign the starting Queue Sets for each of the "ports" ...
2550 for_each_port(adapter, i) {
2551 pi = adap2pinfo(adapter, i);
2558 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2559 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2560 * need. Minimally we need one for every Virtual Interface plus those needed
2561 * for our "extras". Note that this process may lower the maximum number of
2562 * allowed Queue Sets ...
2564 static int enable_msix(struct adapter *adapter)
2566 int i, want, need, nqsets;
2567 struct msix_entry entries[MSIX_ENTRIES];
2568 struct sge *s = &adapter->sge;
2570 for (i = 0; i < MSIX_ENTRIES; ++i)
2571 entries[i].entry = i;
2574 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2575 * plus those needed for our "extras" (for example, the firmware
2576 * message queue). We _need_ at least one "Queue Set" per Virtual
2577 * Interface plus those needed for our "extras". So now we get to see
2578 * if the song is right ...
2580 want = s->max_ethqsets + MSIX_EXTRAS;
2581 need = adapter->params.nports + MSIX_EXTRAS;
2583 want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2587 nqsets = want - MSIX_EXTRAS;
2588 if (nqsets < s->max_ethqsets) {
2589 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2590 " for %d Queue Sets\n", nqsets);
2591 s->max_ethqsets = nqsets;
2592 if (nqsets < s->ethqsets)
2593 reduce_ethqs(adapter, nqsets);
2595 for (i = 0; i < want; ++i)
2596 adapter->msix_info[i].vec = entries[i].vector;
2601 static const struct net_device_ops cxgb4vf_netdev_ops = {
2602 .ndo_open = cxgb4vf_open,
2603 .ndo_stop = cxgb4vf_stop,
2604 .ndo_start_xmit = t4vf_eth_xmit,
2605 .ndo_get_stats = cxgb4vf_get_stats,
2606 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2607 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2608 .ndo_validate_addr = eth_validate_addr,
2609 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2610 .ndo_change_mtu = cxgb4vf_change_mtu,
2611 .ndo_fix_features = cxgb4vf_fix_features,
2612 .ndo_set_features = cxgb4vf_set_features,
2613 #ifdef CONFIG_NET_POLL_CONTROLLER
2614 .ndo_poll_controller = cxgb4vf_poll_controller,
2619 * "Probe" a device: initialize a device and construct all kernel and driver
2620 * state needed to manage the device. This routine is called "init_one" in
2623 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2624 const struct pci_device_id *ent)
2629 struct adapter *adapter;
2630 struct port_info *pi;
2631 struct net_device *netdev;
2634 * Print our driver banner the first time we're called to initialize a
2637 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
2640 * Initialize generic PCI device state.
2642 err = pci_enable_device(pdev);
2644 dev_err(&pdev->dev, "cannot enable PCI device\n");
2649 * Reserve PCI resources for the device. If we can't get them some
2650 * other driver may have already claimed the device ...
2652 err = pci_request_regions(pdev, KBUILD_MODNAME);
2654 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2655 goto err_disable_device;
2659 * Set up our DMA mask: try for 64-bit address masking first and
2660 * fall back to 32-bit if we can't get 64 bits ...
2662 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2664 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2666 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2667 " coherent allocations\n");
2668 goto err_release_regions;
2672 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2674 dev_err(&pdev->dev, "no usable DMA configuration\n");
2675 goto err_release_regions;
2681 * Enable bus mastering for the device ...
2683 pci_set_master(pdev);
2686 * Allocate our adapter data structure and attach it to the device.
2688 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2691 goto err_release_regions;
2693 pci_set_drvdata(pdev, adapter);
2694 adapter->pdev = pdev;
2695 adapter->pdev_dev = &pdev->dev;
2698 * Initialize SMP data synchronization resources.
2700 spin_lock_init(&adapter->stats_lock);
2703 * Map our I/O registers in BAR0.
2705 adapter->regs = pci_ioremap_bar(pdev, 0);
2706 if (!adapter->regs) {
2707 dev_err(&pdev->dev, "cannot map device registers\n");
2709 goto err_free_adapter;
2712 /* Wait for the device to become ready before proceeding ...
2714 err = t4vf_prep_adapter(adapter);
2716 dev_err(adapter->pdev_dev, "device didn't become ready:"
2718 goto err_unmap_bar0;
2721 /* For T5 and later we want to use the new BAR-based User Doorbells,
2722 * so we need to map BAR2 here ...
2724 if (!is_t4(adapter->params.chip)) {
2725 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2726 pci_resource_len(pdev, 2));
2727 if (!adapter->bar2) {
2728 dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2730 goto err_unmap_bar0;
2734 * Initialize adapter level features.
2736 adapter->name = pci_name(pdev);
2737 adapter->msg_enable = dflt_msg_enable;
2738 err = adap_init0(adapter);
2743 * Allocate our "adapter ports" and stitch everything together.
2745 pmask = adapter->params.vfres.pmask;
2746 for_each_port(adapter, pidx) {
2750 * We simplistically allocate our virtual interfaces
2751 * sequentially across the port numbers to which we have
2752 * access rights. This should be configurable in some manner
2757 port_id = ffs(pmask) - 1;
2758 pmask &= ~(1 << port_id);
2759 viid = t4vf_alloc_vi(adapter, port_id);
2761 dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2762 " err=%d\n", port_id, viid);
2768 * Allocate our network device and stitch things together.
2770 netdev = alloc_etherdev_mq(sizeof(struct port_info),
2772 if (netdev == NULL) {
2773 t4vf_free_vi(adapter, viid);
2777 adapter->port[pidx] = netdev;
2778 SET_NETDEV_DEV(netdev, &pdev->dev);
2779 pi = netdev_priv(netdev);
2780 pi->adapter = adapter;
2782 pi->port_id = port_id;
2786 * Initialize the starting state of our "port" and register
2789 pi->xact_addr_filt = -1;
2790 netif_carrier_off(netdev);
2791 netdev->irq = pdev->irq;
2793 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2794 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2795 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2796 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2797 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2799 netdev->features = netdev->hw_features |
2800 NETIF_F_HW_VLAN_CTAG_TX;
2802 netdev->features |= NETIF_F_HIGHDMA;
2804 netdev->priv_flags |= IFF_UNICAST_FLT;
2806 netdev->netdev_ops = &cxgb4vf_netdev_ops;
2807 netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
2810 * Initialize the hardware/software state for the port.
2812 err = t4vf_port_init(adapter, pidx);
2814 dev_err(&pdev->dev, "cannot initialize port %d\n",
2821 * The "card" is now ready to go. If any errors occur during device
2822 * registration we do not fail the whole "card" but rather proceed
2823 * only with the ports we manage to register successfully. However we
2824 * must register at least one net device.
2826 for_each_port(adapter, pidx) {
2827 netdev = adapter->port[pidx];
2831 err = register_netdev(netdev);
2833 dev_warn(&pdev->dev, "cannot register net device %s,"
2834 " skipping\n", netdev->name);
2838 set_bit(pidx, &adapter->registered_device_map);
2840 if (adapter->registered_device_map == 0) {
2841 dev_err(&pdev->dev, "could not register any net devices\n");
2846 * Set up our debugfs entries.
2848 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2849 adapter->debugfs_root =
2850 debugfs_create_dir(pci_name(pdev),
2851 cxgb4vf_debugfs_root);
2852 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2853 dev_warn(&pdev->dev, "could not create debugfs"
2856 setup_debugfs(adapter);
2860 * See what interrupts we'll be using. If we've been configured to
2861 * use MSI-X interrupts, try to enable them but fall back to using
2862 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2863 * get MSI interrupts we bail with the error.
2865 if (msi == MSI_MSIX && enable_msix(adapter) == 0)
2866 adapter->flags |= USING_MSIX;
2868 err = pci_enable_msi(pdev);
2870 dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
2872 msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
2873 goto err_free_debugfs;
2875 adapter->flags |= USING_MSI;
2879 * Now that we know how many "ports" we have and what their types are,
2880 * and how many Queue Sets we can support, we can configure our queue
2883 cfg_queues(adapter);
2886 * Print a short notice on the existence and configuration of the new
2887 * VF network device ...
2889 for_each_port(adapter, pidx) {
2890 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
2891 adapter->port[pidx]->name,
2892 (adapter->flags & USING_MSIX) ? "MSI-X" :
2893 (adapter->flags & USING_MSI) ? "MSI" : "");
2902 * Error recovery and exit code. Unwind state that's been created
2903 * so far and return the error.
2907 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2908 cleanup_debugfs(adapter);
2909 debugfs_remove_recursive(adapter->debugfs_root);
2913 for_each_port(adapter, pidx) {
2914 netdev = adapter->port[pidx];
2917 pi = netdev_priv(netdev);
2918 t4vf_free_vi(adapter, pi->viid);
2919 if (test_bit(pidx, &adapter->registered_device_map))
2920 unregister_netdev(netdev);
2921 free_netdev(netdev);
2925 if (!is_t4(adapter->params.chip))
2926 iounmap(adapter->bar2);
2929 iounmap(adapter->regs);
2934 err_release_regions:
2935 pci_release_regions(pdev);
2936 pci_clear_master(pdev);
2939 pci_disable_device(pdev);
2945 * "Remove" a device: tear down all kernel and driver state created in the
2946 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2947 * that this is called "remove_one" in the PF Driver.)
2949 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
2951 struct adapter *adapter = pci_get_drvdata(pdev);
2954 * Tear down driver state associated with device.
2960 * Stop all of our activity. Unregister network port,
2961 * disable interrupts, etc.
2963 for_each_port(adapter, pidx)
2964 if (test_bit(pidx, &adapter->registered_device_map))
2965 unregister_netdev(adapter->port[pidx]);
2966 t4vf_sge_stop(adapter);
2967 if (adapter->flags & USING_MSIX) {
2968 pci_disable_msix(adapter->pdev);
2969 adapter->flags &= ~USING_MSIX;
2970 } else if (adapter->flags & USING_MSI) {
2971 pci_disable_msi(adapter->pdev);
2972 adapter->flags &= ~USING_MSI;
2976 * Tear down our debugfs entries.
2978 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2979 cleanup_debugfs(adapter);
2980 debugfs_remove_recursive(adapter->debugfs_root);
2984 * Free all of the various resources which we've acquired ...
2986 t4vf_free_sge_resources(adapter);
2987 for_each_port(adapter, pidx) {
2988 struct net_device *netdev = adapter->port[pidx];
2989 struct port_info *pi;
2994 pi = netdev_priv(netdev);
2995 t4vf_free_vi(adapter, pi->viid);
2996 free_netdev(netdev);
2998 iounmap(adapter->regs);
2999 if (!is_t4(adapter->params.chip))
3000 iounmap(adapter->bar2);
3005 * Disable the device and release its PCI resources.
3007 pci_disable_device(pdev);
3008 pci_clear_master(pdev);
3009 pci_release_regions(pdev);
3013 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3016 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3018 struct adapter *adapter;
3021 adapter = pci_get_drvdata(pdev);
3025 /* Disable all Virtual Interfaces. This will shut down the
3026 * delivery of all ingress packets into the chip for these
3027 * Virtual Interfaces.
3029 for_each_port(adapter, pidx)
3030 if (test_bit(pidx, &adapter->registered_device_map))
3031 unregister_netdev(adapter->port[pidx]);
3033 /* Free up all Queues which will prevent further DMA and
3034 * Interrupts allowing various internal pathways to drain.
3036 t4vf_sge_stop(adapter);
3037 if (adapter->flags & USING_MSIX) {
3038 pci_disable_msix(adapter->pdev);
3039 adapter->flags &= ~USING_MSIX;
3040 } else if (adapter->flags & USING_MSI) {
3041 pci_disable_msi(adapter->pdev);
3042 adapter->flags &= ~USING_MSI;
3046 * Free up all Queues which will prevent further DMA and
3047 * Interrupts allowing various internal pathways to drain.
3049 t4vf_free_sge_resources(adapter);
3050 pci_set_drvdata(pdev, NULL);
3053 /* Macros needed to support the PCI Device ID Table ...
3055 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3056 static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3057 #define CH_PCI_DEVICE_ID_FUNCTION 0x8
3059 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3060 { PCI_VDEVICE(CHELSIO, (devid)), 0 }
3062 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3064 #include "../cxgb4/t4_pci_id_tbl.h"
3066 MODULE_DESCRIPTION(DRV_DESC);
3067 MODULE_AUTHOR("Chelsio Communications");
3068 MODULE_LICENSE("Dual BSD/GPL");
3069 MODULE_VERSION(DRV_VERSION);
3070 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3072 static struct pci_driver cxgb4vf_driver = {
3073 .name = KBUILD_MODNAME,
3074 .id_table = cxgb4vf_pci_tbl,
3075 .probe = cxgb4vf_pci_probe,
3076 .remove = cxgb4vf_pci_remove,
3077 .shutdown = cxgb4vf_pci_shutdown,
3081 * Initialize global driver state.
3083 static int __init cxgb4vf_module_init(void)
3088 * Vet our module parameters.
3090 if (msi != MSI_MSIX && msi != MSI_MSI) {
3091 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3092 msi, MSI_MSIX, MSI_MSI);
3096 /* Debugfs support is optional, just warn if this fails */
3097 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3098 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3099 pr_warn("could not create debugfs entry, continuing\n");
3101 ret = pci_register_driver(&cxgb4vf_driver);
3102 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3103 debugfs_remove(cxgb4vf_debugfs_root);
3108 * Tear down global driver state.
3110 static void __exit cxgb4vf_module_exit(void)
3112 pci_unregister_driver(&cxgb4vf_driver);
3113 debugfs_remove(cxgb4vf_debugfs_root);
3116 module_init(cxgb4vf_module_init);
3117 module_exit(cxgb4vf_module_exit);