2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/prefetch.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ktime.h>
42 #ifdef CONFIG_RFS_ACCEL
43 #include <linux/cpu_rmap.h>
45 #ifdef CONFIG_NET_RX_BUSY_POLL
46 #include <net/busy_poll.h>
49 #include "cq_enet_desc.h"
51 #include "vnic_intr.h"
52 #include "vnic_stats.h"
58 #include "enic_clsf.h"
60 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
61 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
62 #define MAX_TSO (1 << 16)
63 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
65 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
66 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
67 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
69 #define RX_COPYBREAK_DEFAULT 256
71 /* Supported devices */
72 static const struct pci_device_id enic_id_table[] = {
73 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
74 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
75 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
76 { 0, } /* end of table */
79 MODULE_DESCRIPTION(DRV_DESCRIPTION);
80 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_VERSION);
83 MODULE_DEVICE_TABLE(pci, enic_id_table);
85 #define ENIC_LARGE_PKT_THRESHOLD 1000
86 #define ENIC_MAX_COALESCE_TIMERS 10
87 /* Interrupt moderation table, which will be used to decide the
88 * coalescing timer values
89 * {rx_rate in Mbps, mapping percentage of the range}
91 struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
105 /* This table helps the driver to pick different ranges for rx coalescing
106 * timer depending on the link speed.
108 struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
109 {0, 0}, /* 0 - 4 Gbps */
110 {0, 3}, /* 4 - 10 Gbps */
111 {3, 6}, /* 10 - 40 Gbps */
114 int enic_is_dynamic(struct enic *enic)
116 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
119 int enic_sriov_enabled(struct enic *enic)
121 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
124 static int enic_is_sriov_vf(struct enic *enic)
126 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
129 int enic_is_valid_vf(struct enic *enic, int vf)
131 #ifdef CONFIG_PCI_IOV
132 return vf >= 0 && vf < enic->num_vfs;
138 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
140 struct enic *enic = vnic_dev_priv(wq->vdev);
143 pci_unmap_single(enic->pdev, buf->dma_addr,
144 buf->len, PCI_DMA_TODEVICE);
146 pci_unmap_page(enic->pdev, buf->dma_addr,
147 buf->len, PCI_DMA_TODEVICE);
150 dev_kfree_skb_any(buf->os_buf);
153 static void enic_wq_free_buf(struct vnic_wq *wq,
154 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
156 enic_free_wq_buf(wq, buf);
159 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
160 u8 type, u16 q_number, u16 completed_index, void *opaque)
162 struct enic *enic = vnic_dev_priv(vdev);
164 spin_lock(&enic->wq_lock[q_number]);
166 vnic_wq_service(&enic->wq[q_number], cq_desc,
167 completed_index, enic_wq_free_buf,
170 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
171 vnic_wq_desc_avail(&enic->wq[q_number]) >=
172 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
173 netif_wake_subqueue(enic->netdev, q_number);
175 spin_unlock(&enic->wq_lock[q_number]);
180 static void enic_log_q_error(struct enic *enic)
185 for (i = 0; i < enic->wq_count; i++) {
186 error_status = vnic_wq_error_status(&enic->wq[i]);
188 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
192 for (i = 0; i < enic->rq_count; i++) {
193 error_status = vnic_rq_error_status(&enic->rq[i]);
195 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
200 static void enic_msglvl_check(struct enic *enic)
202 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
204 if (msg_enable != enic->msg_enable) {
205 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
206 enic->msg_enable, msg_enable);
207 enic->msg_enable = msg_enable;
211 static void enic_mtu_check(struct enic *enic)
213 u32 mtu = vnic_dev_mtu(enic->vdev);
214 struct net_device *netdev = enic->netdev;
216 if (mtu && mtu != enic->port_mtu) {
217 enic->port_mtu = mtu;
218 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
219 mtu = max_t(int, ENIC_MIN_MTU,
220 min_t(int, ENIC_MAX_MTU, mtu));
221 if (mtu != netdev->mtu)
222 schedule_work(&enic->change_mtu_work);
224 if (mtu < netdev->mtu)
226 "interface MTU (%d) set higher "
227 "than switch port MTU (%d)\n",
233 static void enic_link_check(struct enic *enic)
235 int link_status = vnic_dev_link_status(enic->vdev);
236 int carrier_ok = netif_carrier_ok(enic->netdev);
238 if (link_status && !carrier_ok) {
239 netdev_info(enic->netdev, "Link UP\n");
240 netif_carrier_on(enic->netdev);
241 } else if (!link_status && carrier_ok) {
242 netdev_info(enic->netdev, "Link DOWN\n");
243 netif_carrier_off(enic->netdev);
247 static void enic_notify_check(struct enic *enic)
249 enic_msglvl_check(enic);
250 enic_mtu_check(enic);
251 enic_link_check(enic);
254 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
256 static irqreturn_t enic_isr_legacy(int irq, void *data)
258 struct net_device *netdev = data;
259 struct enic *enic = netdev_priv(netdev);
260 unsigned int io_intr = enic_legacy_io_intr();
261 unsigned int err_intr = enic_legacy_err_intr();
262 unsigned int notify_intr = enic_legacy_notify_intr();
265 vnic_intr_mask(&enic->intr[io_intr]);
267 pba = vnic_intr_legacy_pba(enic->legacy_pba);
269 vnic_intr_unmask(&enic->intr[io_intr]);
270 return IRQ_NONE; /* not our interrupt */
273 if (ENIC_TEST_INTR(pba, notify_intr)) {
274 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
275 enic_notify_check(enic);
278 if (ENIC_TEST_INTR(pba, err_intr)) {
279 vnic_intr_return_all_credits(&enic->intr[err_intr]);
280 enic_log_q_error(enic);
281 /* schedule recovery from WQ/RQ error */
282 schedule_work(&enic->reset);
286 if (ENIC_TEST_INTR(pba, io_intr))
287 napi_schedule_irqoff(&enic->napi[0]);
289 vnic_intr_unmask(&enic->intr[io_intr]);
294 static irqreturn_t enic_isr_msi(int irq, void *data)
296 struct enic *enic = data;
298 /* With MSI, there is no sharing of interrupts, so this is
299 * our interrupt and there is no need to ack it. The device
300 * is not providing per-vector masking, so the OS will not
301 * write to PCI config space to mask/unmask the interrupt.
302 * We're using mask_on_assertion for MSI, so the device
303 * automatically masks the interrupt when the interrupt is
304 * generated. Later, when exiting polling, the interrupt
305 * will be unmasked (see enic_poll).
307 * Also, the device uses the same PCIe Traffic Class (TC)
308 * for Memory Write data and MSI, so there are no ordering
309 * issues; the MSI will always arrive at the Root Complex
310 * _after_ corresponding Memory Writes (i.e. descriptor
314 napi_schedule_irqoff(&enic->napi[0]);
319 static irqreturn_t enic_isr_msix(int irq, void *data)
321 struct napi_struct *napi = data;
323 napi_schedule_irqoff(napi);
328 static irqreturn_t enic_isr_msix_err(int irq, void *data)
330 struct enic *enic = data;
331 unsigned int intr = enic_msix_err_intr(enic);
333 vnic_intr_return_all_credits(&enic->intr[intr]);
335 enic_log_q_error(enic);
337 /* schedule recovery from WQ/RQ error */
338 schedule_work(&enic->reset);
343 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
345 struct enic *enic = data;
346 unsigned int intr = enic_msix_notify_intr(enic);
348 vnic_intr_return_all_credits(&enic->intr[intr]);
349 enic_notify_check(enic);
354 static inline void enic_queue_wq_skb_cont(struct enic *enic,
355 struct vnic_wq *wq, struct sk_buff *skb,
356 unsigned int len_left, int loopback)
358 const skb_frag_t *frag;
360 /* Queue additional data fragments */
361 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
362 len_left -= skb_frag_size(frag);
363 enic_queue_wq_desc_cont(wq, skb,
364 skb_frag_dma_map(&enic->pdev->dev,
365 frag, 0, skb_frag_size(frag),
368 (len_left == 0), /* EOP? */
373 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
374 struct vnic_wq *wq, struct sk_buff *skb,
375 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
377 unsigned int head_len = skb_headlen(skb);
378 unsigned int len_left = skb->len - head_len;
379 int eop = (len_left == 0);
381 /* Queue the main skb fragment. The fragments are no larger
382 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
383 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
384 * per fragment is queued.
386 enic_queue_wq_desc(wq, skb,
387 pci_map_single(enic->pdev, skb->data,
388 head_len, PCI_DMA_TODEVICE),
390 vlan_tag_insert, vlan_tag,
394 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
397 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
398 struct vnic_wq *wq, struct sk_buff *skb,
399 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
401 unsigned int head_len = skb_headlen(skb);
402 unsigned int len_left = skb->len - head_len;
403 unsigned int hdr_len = skb_checksum_start_offset(skb);
404 unsigned int csum_offset = hdr_len + skb->csum_offset;
405 int eop = (len_left == 0);
407 /* Queue the main skb fragment. The fragments are no larger
408 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
409 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
410 * per fragment is queued.
412 enic_queue_wq_desc_csum_l4(wq, skb,
413 pci_map_single(enic->pdev, skb->data,
414 head_len, PCI_DMA_TODEVICE),
418 vlan_tag_insert, vlan_tag,
422 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
425 static inline void enic_queue_wq_skb_tso(struct enic *enic,
426 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
427 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
429 unsigned int frag_len_left = skb_headlen(skb);
430 unsigned int len_left = skb->len - frag_len_left;
431 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
432 int eop = (len_left == 0);
435 unsigned int offset = 0;
438 /* Preload TCP csum field with IP pseudo hdr calculated
439 * with IP length set to zero. HW will later add in length
440 * to each TCP segment resulting from the TSO.
443 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
444 ip_hdr(skb)->check = 0;
445 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
446 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
447 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
448 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
449 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
452 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
453 * for the main skb fragment
455 while (frag_len_left) {
456 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
457 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
458 len, PCI_DMA_TODEVICE);
459 enic_queue_wq_desc_tso(wq, skb,
463 vlan_tag_insert, vlan_tag,
464 eop && (len == frag_len_left), loopback);
465 frag_len_left -= len;
472 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
473 * for additional data fragments
475 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
476 len_left -= skb_frag_size(frag);
477 frag_len_left = skb_frag_size(frag);
480 while (frag_len_left) {
481 len = min(frag_len_left,
482 (unsigned int)WQ_ENET_MAX_DESC_LEN);
483 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
486 enic_queue_wq_desc_cont(wq, skb,
490 (len == frag_len_left), /* EOP? */
492 frag_len_left -= len;
498 static inline void enic_queue_wq_skb(struct enic *enic,
499 struct vnic_wq *wq, struct sk_buff *skb)
501 unsigned int mss = skb_shinfo(skb)->gso_size;
502 unsigned int vlan_tag = 0;
503 int vlan_tag_insert = 0;
506 if (vlan_tx_tag_present(skb)) {
507 /* VLAN tag from trunking driver */
509 vlan_tag = vlan_tx_tag_get(skb);
510 } else if (enic->loop_enable) {
511 vlan_tag = enic->loop_tag;
516 enic_queue_wq_skb_tso(enic, wq, skb, mss,
517 vlan_tag_insert, vlan_tag, loopback);
518 else if (skb->ip_summed == CHECKSUM_PARTIAL)
519 enic_queue_wq_skb_csum_l4(enic, wq, skb,
520 vlan_tag_insert, vlan_tag, loopback);
522 enic_queue_wq_skb_vlan(enic, wq, skb,
523 vlan_tag_insert, vlan_tag, loopback);
526 /* netif_tx_lock held, process context with BHs disabled, or BH */
527 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
528 struct net_device *netdev)
530 struct enic *enic = netdev_priv(netdev);
533 unsigned int txq_map;
534 struct netdev_queue *txq;
537 dev_kfree_skb_any(skb);
541 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
542 wq = &enic->wq[txq_map];
543 txq = netdev_get_tx_queue(netdev, txq_map);
545 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
546 * which is very likely. In the off chance it's going to take
547 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
550 if (skb_shinfo(skb)->gso_size == 0 &&
551 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
552 skb_linearize(skb)) {
553 dev_kfree_skb_any(skb);
557 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
559 if (vnic_wq_desc_avail(wq) <
560 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
561 netif_tx_stop_queue(txq);
562 /* This is a hard error, log it */
563 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
564 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
565 return NETDEV_TX_BUSY;
568 enic_queue_wq_skb(enic, wq, skb);
570 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
571 netif_tx_stop_queue(txq);
572 if (!skb->xmit_more || netif_xmit_stopped(txq))
573 vnic_wq_doorbell(wq);
575 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
580 /* dev_base_lock rwlock held, nominally process context */
581 static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
582 struct rtnl_link_stats64 *net_stats)
584 struct enic *enic = netdev_priv(netdev);
585 struct vnic_stats *stats;
587 enic_dev_stats_dump(enic, &stats);
589 net_stats->tx_packets = stats->tx.tx_frames_ok;
590 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
591 net_stats->tx_errors = stats->tx.tx_errors;
592 net_stats->tx_dropped = stats->tx.tx_drops;
594 net_stats->rx_packets = stats->rx.rx_frames_ok;
595 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
596 net_stats->rx_errors = stats->rx.rx_errors;
597 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
598 net_stats->rx_over_errors = enic->rq_truncated_pkts;
599 net_stats->rx_crc_errors = enic->rq_bad_fcs;
600 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
605 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
607 struct enic *enic = netdev_priv(netdev);
609 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
610 unsigned int mc_count = netdev_mc_count(netdev);
612 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
613 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
618 enic_dev_add_addr(enic, mc_addr);
624 static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
626 struct enic *enic = netdev_priv(netdev);
628 enic_dev_del_addr(enic, mc_addr);
634 static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
636 struct enic *enic = netdev_priv(netdev);
638 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
639 unsigned int uc_count = netdev_uc_count(netdev);
641 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
642 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
647 enic_dev_add_addr(enic, uc_addr);
653 static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
655 struct enic *enic = netdev_priv(netdev);
657 enic_dev_del_addr(enic, uc_addr);
663 void enic_reset_addr_lists(struct enic *enic)
665 struct net_device *netdev = enic->netdev;
667 __dev_uc_unsync(netdev, NULL);
668 __dev_mc_unsync(netdev, NULL);
675 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
677 struct enic *enic = netdev_priv(netdev);
679 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
680 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
681 return -EADDRNOTAVAIL;
683 if (!is_valid_ether_addr(addr))
684 return -EADDRNOTAVAIL;
687 memcpy(netdev->dev_addr, addr, netdev->addr_len);
692 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
694 struct enic *enic = netdev_priv(netdev);
695 struct sockaddr *saddr = p;
696 char *addr = saddr->sa_data;
699 if (netif_running(enic->netdev)) {
700 err = enic_dev_del_station_addr(enic);
705 err = enic_set_mac_addr(netdev, addr);
709 if (netif_running(enic->netdev)) {
710 err = enic_dev_add_station_addr(enic);
718 static int enic_set_mac_address(struct net_device *netdev, void *p)
720 struct sockaddr *saddr = p;
721 char *addr = saddr->sa_data;
722 struct enic *enic = netdev_priv(netdev);
725 err = enic_dev_del_station_addr(enic);
729 err = enic_set_mac_addr(netdev, addr);
733 return enic_dev_add_station_addr(enic);
736 /* netif_tx_lock held, BHs disabled */
737 static void enic_set_rx_mode(struct net_device *netdev)
739 struct enic *enic = netdev_priv(netdev);
741 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
742 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
743 int promisc = (netdev->flags & IFF_PROMISC) ||
744 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
745 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
746 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
747 unsigned int flags = netdev->flags |
748 (allmulti ? IFF_ALLMULTI : 0) |
749 (promisc ? IFF_PROMISC : 0);
751 if (enic->flags != flags) {
753 enic_dev_packet_filter(enic, directed,
754 multicast, broadcast, promisc, allmulti);
758 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
760 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
764 /* netif_tx_lock held, BHs disabled */
765 static void enic_tx_timeout(struct net_device *netdev)
767 struct enic *enic = netdev_priv(netdev);
768 schedule_work(&enic->reset);
771 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
773 struct enic *enic = netdev_priv(netdev);
774 struct enic_port_profile *pp;
777 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
781 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
782 if (vf == PORT_SELF_VF) {
783 memcpy(pp->vf_mac, mac, ETH_ALEN);
787 * For sriov vf's set the mac in hw
789 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
790 vnic_dev_set_mac_addr, mac);
791 return enic_dev_status_to_errno(err);
797 static int enic_set_vf_port(struct net_device *netdev, int vf,
798 struct nlattr *port[])
800 struct enic *enic = netdev_priv(netdev);
801 struct enic_port_profile prev_pp;
802 struct enic_port_profile *pp;
803 int err = 0, restore_pp = 1;
805 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
809 if (!port[IFLA_PORT_REQUEST])
812 memcpy(&prev_pp, pp, sizeof(*enic->pp));
813 memset(pp, 0, sizeof(*enic->pp));
815 pp->set |= ENIC_SET_REQUEST;
816 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
818 if (port[IFLA_PORT_PROFILE]) {
819 pp->set |= ENIC_SET_NAME;
820 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
824 if (port[IFLA_PORT_INSTANCE_UUID]) {
825 pp->set |= ENIC_SET_INSTANCE;
826 memcpy(pp->instance_uuid,
827 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
830 if (port[IFLA_PORT_HOST_UUID]) {
831 pp->set |= ENIC_SET_HOST;
832 memcpy(pp->host_uuid,
833 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
836 if (vf == PORT_SELF_VF) {
837 /* Special case handling: mac came from IFLA_VF_MAC */
838 if (!is_zero_ether_addr(prev_pp.vf_mac))
839 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
841 if (is_zero_ether_addr(netdev->dev_addr))
842 eth_hw_addr_random(netdev);
844 /* SR-IOV VF: get mac from adapter */
845 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
846 vnic_dev_get_mac_addr, pp->mac_addr);
848 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
849 memcpy(pp, &prev_pp, sizeof(*pp));
850 return enic_dev_status_to_errno(err);
854 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
857 /* Things are still the way they were: Implicit
858 * DISASSOCIATE failed
860 memcpy(pp, &prev_pp, sizeof(*pp));
862 memset(pp, 0, sizeof(*pp));
863 if (vf == PORT_SELF_VF)
864 memset(netdev->dev_addr, 0, ETH_ALEN);
867 /* Set flag to indicate that the port assoc/disassoc
868 * request has been sent out to fw
870 pp->set |= ENIC_PORT_REQUEST_APPLIED;
872 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
873 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
874 memset(pp->mac_addr, 0, ETH_ALEN);
875 if (vf == PORT_SELF_VF)
876 memset(netdev->dev_addr, 0, ETH_ALEN);
880 if (vf == PORT_SELF_VF)
881 memset(pp->vf_mac, 0, ETH_ALEN);
886 static int enic_get_vf_port(struct net_device *netdev, int vf,
889 struct enic *enic = netdev_priv(netdev);
890 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
891 struct enic_port_profile *pp;
894 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
898 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
901 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
905 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
906 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
907 ((pp->set & ENIC_SET_NAME) &&
908 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
909 ((pp->set & ENIC_SET_INSTANCE) &&
910 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
911 pp->instance_uuid)) ||
912 ((pp->set & ENIC_SET_HOST) &&
913 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
914 goto nla_put_failure;
921 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
923 struct enic *enic = vnic_dev_priv(rq->vdev);
928 pci_unmap_single(enic->pdev, buf->dma_addr,
929 buf->len, PCI_DMA_FROMDEVICE);
930 dev_kfree_skb_any(buf->os_buf);
934 static int enic_rq_alloc_buf(struct vnic_rq *rq)
936 struct enic *enic = vnic_dev_priv(rq->vdev);
937 struct net_device *netdev = enic->netdev;
939 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
940 unsigned int os_buf_index = 0;
942 struct vnic_rq_buf *buf = rq->to_use;
945 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
950 skb = netdev_alloc_skb_ip_align(netdev, len);
954 dma_addr = pci_map_single(enic->pdev, skb->data,
955 len, PCI_DMA_FROMDEVICE);
957 enic_queue_rq_desc(rq, skb, os_buf_index,
963 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
966 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
967 pkt_size->large_pkt_bytes_cnt += pkt_len;
969 pkt_size->small_pkt_bytes_cnt += pkt_len;
972 static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
973 struct vnic_rq_buf *buf, u16 len)
975 struct enic *enic = netdev_priv(netdev);
976 struct sk_buff *new_skb;
978 if (len > enic->rx_copybreak)
980 new_skb = netdev_alloc_skb_ip_align(netdev, len);
983 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
985 memcpy(new_skb->data, (*skb)->data, len);
991 static void enic_rq_indicate_buf(struct vnic_rq *rq,
992 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
993 int skipped, void *opaque)
995 struct enic *enic = vnic_dev_priv(rq->vdev);
996 struct net_device *netdev = enic->netdev;
998 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1000 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1001 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1002 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1003 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1005 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1013 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1014 &type, &color, &q_number, &completed_index,
1015 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1016 &csum_not_calc, &rss_hash, &bytes_written,
1017 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1018 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1019 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1020 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1026 if (bytes_written > 0)
1028 else if (bytes_written == 0)
1029 enic->rq_truncated_pkts++;
1032 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1033 PCI_DMA_FROMDEVICE);
1034 dev_kfree_skb_any(skb);
1040 if (eop && bytes_written > 0) {
1045 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1047 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1048 PCI_DMA_FROMDEVICE);
1050 prefetch(skb->data - NET_IP_ALIGN);
1052 skb_put(skb, bytes_written);
1053 skb->protocol = eth_type_trans(skb, netdev);
1054 skb_record_rx_queue(skb, q_number);
1055 if (netdev->features & NETIF_F_RXHASH) {
1056 skb_set_hash(skb, rss_hash,
1058 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1059 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1060 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1061 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1064 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1065 skb->csum = htons(checksum);
1066 skb->ip_summed = CHECKSUM_COMPLETE;
1070 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1072 skb_mark_napi_id(skb, &enic->napi[rq->index]);
1073 if (enic_poll_busy_polling(rq) ||
1074 !(netdev->features & NETIF_F_GRO))
1075 netif_receive_skb(skb);
1077 napi_gro_receive(&enic->napi[q_number], skb);
1078 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1079 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1086 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1087 PCI_DMA_FROMDEVICE);
1088 dev_kfree_skb_any(skb);
1093 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1094 u8 type, u16 q_number, u16 completed_index, void *opaque)
1096 struct enic *enic = vnic_dev_priv(vdev);
1098 vnic_rq_service(&enic->rq[q_number], cq_desc,
1099 completed_index, VNIC_RQ_RETURN_DESC,
1100 enic_rq_indicate_buf, opaque);
1105 static int enic_poll(struct napi_struct *napi, int budget)
1107 struct net_device *netdev = napi->dev;
1108 struct enic *enic = netdev_priv(netdev);
1109 unsigned int cq_rq = enic_cq_rq(enic, 0);
1110 unsigned int cq_wq = enic_cq_wq(enic, 0);
1111 unsigned int intr = enic_legacy_io_intr();
1112 unsigned int rq_work_to_do = budget;
1113 unsigned int wq_work_to_do = -1; /* no limit */
1114 unsigned int work_done, rq_work_done = 0, wq_work_done;
1117 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1118 enic_wq_service, NULL);
1120 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
1121 if (wq_work_done > 0)
1122 vnic_intr_return_credits(&enic->intr[intr],
1124 0 /* dont unmask intr */,
1125 0 /* dont reset intr timer */);
1126 return rq_work_done;
1130 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1131 rq_work_to_do, enic_rq_service, NULL);
1133 /* Accumulate intr event credits for this polling
1134 * cycle. An intr event is the completion of a
1135 * a WQ or RQ packet.
1138 work_done = rq_work_done + wq_work_done;
1141 vnic_intr_return_credits(&enic->intr[intr],
1143 0 /* don't unmask intr */,
1144 0 /* don't reset intr timer */);
1146 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1148 /* Buffer allocation failed. Stay in polling
1149 * mode so we can try to fill the ring again.
1153 rq_work_done = rq_work_to_do;
1155 if (rq_work_done < rq_work_to_do) {
1157 /* Some work done, but not enough to stay in polling,
1161 napi_complete(napi);
1162 vnic_intr_unmask(&enic->intr[intr]);
1164 enic_poll_unlock_napi(&enic->rq[cq_rq]);
1166 return rq_work_done;
1169 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1171 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1172 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1173 u32 timer = cq->tobe_rx_coal_timeval;
1175 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1176 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1177 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1181 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1183 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1184 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1185 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1191 ktime_t now = ktime_get();
1193 delta = ktime_us_delta(now, cq->prev_ts);
1194 if (delta < ENIC_AIC_TS_BREAK)
1198 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1199 pkt_size_counter->small_pkt_bytes_cnt;
1200 /* The table takes Mbps
1201 * traffic *= 8 => bits
1202 * traffic *= (10^6 / delta) => bps
1203 * traffic /= 10^6 => Mbps
1205 * Combining, traffic *= (8 / delta)
1209 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1211 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1212 if (traffic < mod_table[index].rx_rate)
1214 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1215 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1216 rx_coal->small_pkt_range_start :
1217 rx_coal->large_pkt_range_start;
1218 timer = range_start + ((rx_coal->range_end - range_start) *
1219 mod_table[index].range_percent / 100);
1221 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1223 pkt_size_counter->large_pkt_bytes_cnt = 0;
1224 pkt_size_counter->small_pkt_bytes_cnt = 0;
1227 #ifdef CONFIG_RFS_ACCEL
1228 static void enic_free_rx_cpu_rmap(struct enic *enic)
1230 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1231 enic->netdev->rx_cpu_rmap = NULL;
1234 static void enic_set_rx_cpu_rmap(struct enic *enic)
1238 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1239 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1240 if (unlikely(!enic->netdev->rx_cpu_rmap))
1242 for (i = 0; i < enic->rq_count; i++) {
1243 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1244 enic->msix_entry[i].vector);
1245 if (unlikely(res)) {
1246 enic_free_rx_cpu_rmap(enic);
1255 static void enic_free_rx_cpu_rmap(struct enic *enic)
1259 static void enic_set_rx_cpu_rmap(struct enic *enic)
1263 #endif /* CONFIG_RFS_ACCEL */
1265 #ifdef CONFIG_NET_RX_BUSY_POLL
1266 int enic_busy_poll(struct napi_struct *napi)
1268 struct net_device *netdev = napi->dev;
1269 struct enic *enic = netdev_priv(netdev);
1270 unsigned int rq = (napi - &enic->napi[0]);
1271 unsigned int cq = enic_cq_rq(enic, rq);
1272 unsigned int intr = enic_msix_rq_intr(enic, rq);
1273 unsigned int work_to_do = -1; /* clean all pkts possible */
1274 unsigned int work_done;
1276 if (!enic_poll_lock_poll(&enic->rq[rq]))
1277 return LL_FLUSH_BUSY;
1278 work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
1279 enic_rq_service, NULL);
1282 vnic_intr_return_credits(&enic->intr[intr],
1284 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1285 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1286 enic_calc_int_moderation(enic, &enic->rq[rq]);
1287 enic_poll_unlock_poll(&enic->rq[rq]);
1291 #endif /* CONFIG_NET_RX_BUSY_POLL */
1293 static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1295 struct net_device *netdev = napi->dev;
1296 struct enic *enic = netdev_priv(netdev);
1297 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1298 struct vnic_wq *wq = &enic->wq[wq_index];
1301 unsigned int wq_work_to_do = -1; /* clean all desc possible */
1302 unsigned int wq_work_done;
1303 unsigned int wq_irq;
1306 cq = enic_cq_wq(enic, wq_irq);
1307 intr = enic_msix_wq_intr(enic, wq_irq);
1308 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1309 enic_wq_service, NULL);
1311 vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1312 0 /* don't unmask intr */,
1313 1 /* reset intr timer */);
1314 if (!wq_work_done) {
1315 napi_complete(napi);
1316 vnic_intr_unmask(&enic->intr[intr]);
1323 static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1325 struct net_device *netdev = napi->dev;
1326 struct enic *enic = netdev_priv(netdev);
1327 unsigned int rq = (napi - &enic->napi[0]);
1328 unsigned int cq = enic_cq_rq(enic, rq);
1329 unsigned int intr = enic_msix_rq_intr(enic, rq);
1330 unsigned int work_to_do = budget;
1331 unsigned int work_done = 0;
1334 if (!enic_poll_lock_napi(&enic->rq[rq]))
1340 work_done = vnic_cq_service(&enic->cq[cq],
1341 work_to_do, enic_rq_service, NULL);
1343 /* Return intr event credits for this polling
1344 * cycle. An intr event is the completion of a
1349 vnic_intr_return_credits(&enic->intr[intr],
1351 0 /* don't unmask intr */,
1352 0 /* don't reset intr timer */);
1354 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1356 /* Buffer allocation failed. Stay in polling mode
1357 * so we can try to fill the ring again.
1361 work_done = work_to_do;
1362 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1363 /* Call the function which refreshes
1364 * the intr coalescing timer value based on
1365 * the traffic. This is supported only in
1366 * the case of MSI-x mode
1368 enic_calc_int_moderation(enic, &enic->rq[rq]);
1370 if (work_done < work_to_do) {
1372 /* Some work done, but not enough to stay in polling,
1376 napi_complete(napi);
1377 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1378 enic_set_int_moderation(enic, &enic->rq[rq]);
1379 vnic_intr_unmask(&enic->intr[intr]);
1381 enic_poll_unlock_napi(&enic->rq[rq]);
1386 static void enic_notify_timer(unsigned long data)
1388 struct enic *enic = (struct enic *)data;
1390 enic_notify_check(enic);
1392 mod_timer(&enic->notify_timer,
1393 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1396 static void enic_free_intr(struct enic *enic)
1398 struct net_device *netdev = enic->netdev;
1401 enic_free_rx_cpu_rmap(enic);
1402 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1403 case VNIC_DEV_INTR_MODE_INTX:
1404 free_irq(enic->pdev->irq, netdev);
1406 case VNIC_DEV_INTR_MODE_MSI:
1407 free_irq(enic->pdev->irq, enic);
1409 case VNIC_DEV_INTR_MODE_MSIX:
1410 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1411 if (enic->msix[i].requested)
1412 free_irq(enic->msix_entry[i].vector,
1413 enic->msix[i].devid);
1420 static int enic_request_intr(struct enic *enic)
1422 struct net_device *netdev = enic->netdev;
1423 unsigned int i, intr;
1426 enic_set_rx_cpu_rmap(enic);
1427 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1429 case VNIC_DEV_INTR_MODE_INTX:
1431 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1432 IRQF_SHARED, netdev->name, netdev);
1435 case VNIC_DEV_INTR_MODE_MSI:
1437 err = request_irq(enic->pdev->irq, enic_isr_msi,
1438 0, netdev->name, enic);
1441 case VNIC_DEV_INTR_MODE_MSIX:
1443 for (i = 0; i < enic->rq_count; i++) {
1444 intr = enic_msix_rq_intr(enic, i);
1445 snprintf(enic->msix[intr].devname,
1446 sizeof(enic->msix[intr].devname),
1447 "%.11s-rx-%d", netdev->name, i);
1448 enic->msix[intr].isr = enic_isr_msix;
1449 enic->msix[intr].devid = &enic->napi[i];
1452 for (i = 0; i < enic->wq_count; i++) {
1453 int wq = enic_cq_wq(enic, i);
1455 intr = enic_msix_wq_intr(enic, i);
1456 snprintf(enic->msix[intr].devname,
1457 sizeof(enic->msix[intr].devname),
1458 "%.11s-tx-%d", netdev->name, i);
1459 enic->msix[intr].isr = enic_isr_msix;
1460 enic->msix[intr].devid = &enic->napi[wq];
1463 intr = enic_msix_err_intr(enic);
1464 snprintf(enic->msix[intr].devname,
1465 sizeof(enic->msix[intr].devname),
1466 "%.11s-err", netdev->name);
1467 enic->msix[intr].isr = enic_isr_msix_err;
1468 enic->msix[intr].devid = enic;
1470 intr = enic_msix_notify_intr(enic);
1471 snprintf(enic->msix[intr].devname,
1472 sizeof(enic->msix[intr].devname),
1473 "%.11s-notify", netdev->name);
1474 enic->msix[intr].isr = enic_isr_msix_notify;
1475 enic->msix[intr].devid = enic;
1477 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1478 enic->msix[i].requested = 0;
1480 for (i = 0; i < enic->intr_count; i++) {
1481 err = request_irq(enic->msix_entry[i].vector,
1482 enic->msix[i].isr, 0,
1483 enic->msix[i].devname,
1484 enic->msix[i].devid);
1486 enic_free_intr(enic);
1489 enic->msix[i].requested = 1;
1501 static void enic_synchronize_irqs(struct enic *enic)
1505 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1506 case VNIC_DEV_INTR_MODE_INTX:
1507 case VNIC_DEV_INTR_MODE_MSI:
1508 synchronize_irq(enic->pdev->irq);
1510 case VNIC_DEV_INTR_MODE_MSIX:
1511 for (i = 0; i < enic->intr_count; i++)
1512 synchronize_irq(enic->msix_entry[i].vector);
1519 static void enic_set_rx_coal_setting(struct enic *enic)
1523 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1525 /* If intr mode is not MSIX, do not do adaptive coalescing */
1526 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
1527 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1531 /* 1. Read the link speed from fw
1532 * 2. Pick the default range for the speed
1533 * 3. Update it in enic->rx_coalesce_setting
1535 speed = vnic_dev_port_speed(enic->vdev);
1536 if (ENIC_LINK_SPEED_10G < speed)
1537 index = ENIC_LINK_40G_INDEX;
1538 else if (ENIC_LINK_SPEED_4G < speed)
1539 index = ENIC_LINK_10G_INDEX;
1541 index = ENIC_LINK_4G_INDEX;
1543 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1544 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1545 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1547 /* Start with the value provided by UCSM */
1548 for (index = 0; index < enic->rq_count; index++)
1549 enic->cq[index].cur_rx_coal_timeval =
1550 enic->config.intr_timer_usec;
1552 rx_coal->use_adaptive_rx_coalesce = 1;
1555 static int enic_dev_notify_set(struct enic *enic)
1559 spin_lock_bh(&enic->devcmd_lock);
1560 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1561 case VNIC_DEV_INTR_MODE_INTX:
1562 err = vnic_dev_notify_set(enic->vdev,
1563 enic_legacy_notify_intr());
1565 case VNIC_DEV_INTR_MODE_MSIX:
1566 err = vnic_dev_notify_set(enic->vdev,
1567 enic_msix_notify_intr(enic));
1570 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1573 spin_unlock_bh(&enic->devcmd_lock);
1578 static void enic_notify_timer_start(struct enic *enic)
1580 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1581 case VNIC_DEV_INTR_MODE_MSI:
1582 mod_timer(&enic->notify_timer, jiffies);
1585 /* Using intr for notification for INTx/MSI-X */
1590 /* rtnl lock is held, process context */
1591 static int enic_open(struct net_device *netdev)
1593 struct enic *enic = netdev_priv(netdev);
1597 err = enic_request_intr(enic);
1599 netdev_err(netdev, "Unable to request irq.\n");
1603 err = enic_dev_notify_set(enic);
1606 "Failed to alloc notify buffer, aborting.\n");
1607 goto err_out_free_intr;
1610 for (i = 0; i < enic->rq_count; i++) {
1611 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1612 /* Need at least one buffer on ring to get going */
1613 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1614 netdev_err(netdev, "Unable to alloc receive buffers\n");
1616 goto err_out_notify_unset;
1620 for (i = 0; i < enic->wq_count; i++)
1621 vnic_wq_enable(&enic->wq[i]);
1622 for (i = 0; i < enic->rq_count; i++)
1623 vnic_rq_enable(&enic->rq[i]);
1625 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1626 enic_dev_add_station_addr(enic);
1628 enic_set_rx_mode(netdev);
1630 netif_tx_wake_all_queues(netdev);
1632 for (i = 0; i < enic->rq_count; i++) {
1633 enic_busy_poll_init_lock(&enic->rq[i]);
1634 napi_enable(&enic->napi[i]);
1636 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1637 for (i = 0; i < enic->wq_count; i++)
1638 napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
1639 enic_dev_enable(enic);
1641 for (i = 0; i < enic->intr_count; i++)
1642 vnic_intr_unmask(&enic->intr[i]);
1644 enic_notify_timer_start(enic);
1645 enic_rfs_flw_tbl_init(enic);
1649 err_out_notify_unset:
1650 enic_dev_notify_unset(enic);
1652 enic_free_intr(enic);
1657 /* rtnl lock is held, process context */
1658 static int enic_stop(struct net_device *netdev)
1660 struct enic *enic = netdev_priv(netdev);
1664 for (i = 0; i < enic->intr_count; i++) {
1665 vnic_intr_mask(&enic->intr[i]);
1666 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1669 enic_synchronize_irqs(enic);
1671 del_timer_sync(&enic->notify_timer);
1672 enic_rfs_flw_tbl_free(enic);
1674 enic_dev_disable(enic);
1676 for (i = 0; i < enic->rq_count; i++) {
1677 napi_disable(&enic->napi[i]);
1679 while (!enic_poll_lock_napi(&enic->rq[i]))
1684 netif_carrier_off(netdev);
1685 netif_tx_disable(netdev);
1686 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1687 for (i = 0; i < enic->wq_count; i++)
1688 napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
1690 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1691 enic_dev_del_station_addr(enic);
1693 for (i = 0; i < enic->wq_count; i++) {
1694 err = vnic_wq_disable(&enic->wq[i]);
1698 for (i = 0; i < enic->rq_count; i++) {
1699 err = vnic_rq_disable(&enic->rq[i]);
1704 enic_dev_notify_unset(enic);
1705 enic_free_intr(enic);
1707 for (i = 0; i < enic->wq_count; i++)
1708 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1709 for (i = 0; i < enic->rq_count; i++)
1710 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1711 for (i = 0; i < enic->cq_count; i++)
1712 vnic_cq_clean(&enic->cq[i]);
1713 for (i = 0; i < enic->intr_count; i++)
1714 vnic_intr_clean(&enic->intr[i]);
1719 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1721 struct enic *enic = netdev_priv(netdev);
1722 int running = netif_running(netdev);
1724 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1727 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
1733 netdev->mtu = new_mtu;
1735 if (netdev->mtu > enic->port_mtu)
1737 "interface MTU (%d) set higher than port MTU (%d)\n",
1738 netdev->mtu, enic->port_mtu);
1746 static void enic_change_mtu_work(struct work_struct *work)
1748 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1749 struct net_device *netdev = enic->netdev;
1750 int new_mtu = vnic_dev_mtu(enic->vdev);
1754 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1759 del_timer_sync(&enic->notify_timer);
1761 for (i = 0; i < enic->rq_count; i++)
1762 napi_disable(&enic->napi[i]);
1764 vnic_intr_mask(&enic->intr[0]);
1765 enic_synchronize_irqs(enic);
1766 err = vnic_rq_disable(&enic->rq[0]);
1769 netdev_err(netdev, "Unable to disable RQ.\n");
1772 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1773 vnic_cq_clean(&enic->cq[0]);
1774 vnic_intr_clean(&enic->intr[0]);
1776 /* Fill RQ with new_mtu-sized buffers */
1777 netdev->mtu = new_mtu;
1778 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1779 /* Need at least one buffer on ring to get going */
1780 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
1782 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1787 vnic_rq_enable(&enic->rq[0]);
1788 napi_enable(&enic->napi[0]);
1789 vnic_intr_unmask(&enic->intr[0]);
1790 enic_notify_timer_start(enic);
1794 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1797 #ifdef CONFIG_NET_POLL_CONTROLLER
1798 static void enic_poll_controller(struct net_device *netdev)
1800 struct enic *enic = netdev_priv(netdev);
1801 struct vnic_dev *vdev = enic->vdev;
1802 unsigned int i, intr;
1804 switch (vnic_dev_get_intr_mode(vdev)) {
1805 case VNIC_DEV_INTR_MODE_MSIX:
1806 for (i = 0; i < enic->rq_count; i++) {
1807 intr = enic_msix_rq_intr(enic, i);
1808 enic_isr_msix(enic->msix_entry[intr].vector,
1812 for (i = 0; i < enic->wq_count; i++) {
1813 intr = enic_msix_wq_intr(enic, i);
1814 enic_isr_msix(enic->msix_entry[intr].vector,
1815 &enic->napi[enic_cq_wq(enic, i)]);
1819 case VNIC_DEV_INTR_MODE_MSI:
1820 enic_isr_msi(enic->pdev->irq, enic);
1822 case VNIC_DEV_INTR_MODE_INTX:
1823 enic_isr_legacy(enic->pdev->irq, netdev);
1831 static int enic_dev_wait(struct vnic_dev *vdev,
1832 int (*start)(struct vnic_dev *, int),
1833 int (*finished)(struct vnic_dev *, int *),
1840 BUG_ON(in_interrupt());
1842 err = start(vdev, arg);
1846 /* Wait for func to complete...2 seconds max
1849 time = jiffies + (HZ * 2);
1852 err = finished(vdev, &done);
1859 schedule_timeout_uninterruptible(HZ / 10);
1861 } while (time_after(time, jiffies));
1866 static int enic_dev_open(struct enic *enic)
1870 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1871 vnic_dev_open_done, 0);
1873 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1879 static int enic_dev_hang_reset(struct enic *enic)
1883 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1884 vnic_dev_hang_reset_done, 0);
1886 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1892 static int enic_set_rsskey(struct enic *enic)
1894 dma_addr_t rss_key_buf_pa;
1895 union vnic_rss_key *rss_key_buf_va = NULL;
1896 union vnic_rss_key rss_key = {
1897 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1898 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1899 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1900 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1904 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1905 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1906 if (!rss_key_buf_va)
1909 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1911 spin_lock_bh(&enic->devcmd_lock);
1912 err = enic_set_rss_key(enic,
1914 sizeof(union vnic_rss_key));
1915 spin_unlock_bh(&enic->devcmd_lock);
1917 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1918 rss_key_buf_va, rss_key_buf_pa);
1923 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1925 dma_addr_t rss_cpu_buf_pa;
1926 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1930 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1931 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1932 if (!rss_cpu_buf_va)
1935 for (i = 0; i < (1 << rss_hash_bits); i++)
1936 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1938 spin_lock_bh(&enic->devcmd_lock);
1939 err = enic_set_rss_cpu(enic,
1941 sizeof(union vnic_rss_cpu));
1942 spin_unlock_bh(&enic->devcmd_lock);
1944 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1945 rss_cpu_buf_va, rss_cpu_buf_pa);
1950 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1951 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1953 const u8 tso_ipid_split_en = 0;
1954 const u8 ig_vlan_strip_en = 1;
1957 /* Enable VLAN tag stripping.
1960 spin_lock_bh(&enic->devcmd_lock);
1961 err = enic_set_nic_cfg(enic,
1962 rss_default_cpu, rss_hash_type,
1963 rss_hash_bits, rss_base_cpu,
1964 rss_enable, tso_ipid_split_en,
1966 spin_unlock_bh(&enic->devcmd_lock);
1971 static int enic_set_rss_nic_cfg(struct enic *enic)
1973 struct device *dev = enic_get_dev(enic);
1974 const u8 rss_default_cpu = 0;
1975 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1976 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1977 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1978 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1979 const u8 rss_hash_bits = 7;
1980 const u8 rss_base_cpu = 0;
1981 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1984 if (!enic_set_rsskey(enic)) {
1985 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1987 dev_warn(dev, "RSS disabled, "
1988 "Failed to set RSS cpu indirection table.");
1992 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1996 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1997 rss_hash_bits, rss_base_cpu, rss_enable);
2000 static void enic_reset(struct work_struct *work)
2002 struct enic *enic = container_of(work, struct enic, reset);
2004 if (!netif_running(enic->netdev))
2009 spin_lock(&enic->enic_api_lock);
2010 enic_dev_hang_notify(enic);
2011 enic_stop(enic->netdev);
2012 enic_dev_hang_reset(enic);
2013 enic_reset_addr_lists(enic);
2014 enic_init_vnic_resources(enic);
2015 enic_set_rss_nic_cfg(enic);
2016 enic_dev_set_ig_vlan_rewrite_mode(enic);
2017 enic_open(enic->netdev);
2018 spin_unlock(&enic->enic_api_lock);
2019 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2024 static int enic_set_intr_mode(struct enic *enic)
2026 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2027 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2030 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2031 * on system capabilities.
2035 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2036 * (the second to last INTR is used for WQ/RQ errors)
2037 * (the last INTR is used for notifications)
2040 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2041 for (i = 0; i < n + m + 2; i++)
2042 enic->msix_entry[i].entry = i;
2044 /* Use multiple RQs if RSS is enabled
2047 if (ENIC_SETTING(enic, RSS) &&
2048 enic->config.intr_mode < 1 &&
2049 enic->rq_count >= n &&
2050 enic->wq_count >= m &&
2051 enic->cq_count >= n + m &&
2052 enic->intr_count >= n + m + 2) {
2054 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2055 n + m + 2, n + m + 2) > 0) {
2059 enic->cq_count = n + m;
2060 enic->intr_count = n + m + 2;
2062 vnic_dev_set_intr_mode(enic->vdev,
2063 VNIC_DEV_INTR_MODE_MSIX);
2069 if (enic->config.intr_mode < 1 &&
2070 enic->rq_count >= 1 &&
2071 enic->wq_count >= m &&
2072 enic->cq_count >= 1 + m &&
2073 enic->intr_count >= 1 + m + 2) {
2074 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2075 1 + m + 2, 1 + m + 2) > 0) {
2079 enic->cq_count = 1 + m;
2080 enic->intr_count = 1 + m + 2;
2082 vnic_dev_set_intr_mode(enic->vdev,
2083 VNIC_DEV_INTR_MODE_MSIX);
2091 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2094 if (enic->config.intr_mode < 2 &&
2095 enic->rq_count >= 1 &&
2096 enic->wq_count >= 1 &&
2097 enic->cq_count >= 2 &&
2098 enic->intr_count >= 1 &&
2099 !pci_enable_msi(enic->pdev)) {
2104 enic->intr_count = 1;
2106 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2113 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2114 * (the first INTR is used for WQ/RQ)
2115 * (the second INTR is used for WQ/RQ errors)
2116 * (the last INTR is used for notifications)
2119 if (enic->config.intr_mode < 3 &&
2120 enic->rq_count >= 1 &&
2121 enic->wq_count >= 1 &&
2122 enic->cq_count >= 2 &&
2123 enic->intr_count >= 3) {
2128 enic->intr_count = 3;
2130 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2135 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2140 static void enic_clear_intr_mode(struct enic *enic)
2142 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2143 case VNIC_DEV_INTR_MODE_MSIX:
2144 pci_disable_msix(enic->pdev);
2146 case VNIC_DEV_INTR_MODE_MSI:
2147 pci_disable_msi(enic->pdev);
2153 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2156 static const struct net_device_ops enic_netdev_dynamic_ops = {
2157 .ndo_open = enic_open,
2158 .ndo_stop = enic_stop,
2159 .ndo_start_xmit = enic_hard_start_xmit,
2160 .ndo_get_stats64 = enic_get_stats,
2161 .ndo_validate_addr = eth_validate_addr,
2162 .ndo_set_rx_mode = enic_set_rx_mode,
2163 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2164 .ndo_change_mtu = enic_change_mtu,
2165 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2166 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2167 .ndo_tx_timeout = enic_tx_timeout,
2168 .ndo_set_vf_port = enic_set_vf_port,
2169 .ndo_get_vf_port = enic_get_vf_port,
2170 .ndo_set_vf_mac = enic_set_vf_mac,
2171 #ifdef CONFIG_NET_POLL_CONTROLLER
2172 .ndo_poll_controller = enic_poll_controller,
2174 #ifdef CONFIG_RFS_ACCEL
2175 .ndo_rx_flow_steer = enic_rx_flow_steer,
2177 #ifdef CONFIG_NET_RX_BUSY_POLL
2178 .ndo_busy_poll = enic_busy_poll,
2182 static const struct net_device_ops enic_netdev_ops = {
2183 .ndo_open = enic_open,
2184 .ndo_stop = enic_stop,
2185 .ndo_start_xmit = enic_hard_start_xmit,
2186 .ndo_get_stats64 = enic_get_stats,
2187 .ndo_validate_addr = eth_validate_addr,
2188 .ndo_set_mac_address = enic_set_mac_address,
2189 .ndo_set_rx_mode = enic_set_rx_mode,
2190 .ndo_change_mtu = enic_change_mtu,
2191 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2192 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2193 .ndo_tx_timeout = enic_tx_timeout,
2194 .ndo_set_vf_port = enic_set_vf_port,
2195 .ndo_get_vf_port = enic_get_vf_port,
2196 .ndo_set_vf_mac = enic_set_vf_mac,
2197 #ifdef CONFIG_NET_POLL_CONTROLLER
2198 .ndo_poll_controller = enic_poll_controller,
2200 #ifdef CONFIG_RFS_ACCEL
2201 .ndo_rx_flow_steer = enic_rx_flow_steer,
2203 #ifdef CONFIG_NET_RX_BUSY_POLL
2204 .ndo_busy_poll = enic_busy_poll,
2208 static void enic_dev_deinit(struct enic *enic)
2212 for (i = 0; i < enic->rq_count; i++) {
2213 napi_hash_del(&enic->napi[i]);
2214 netif_napi_del(&enic->napi[i]);
2216 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2217 for (i = 0; i < enic->wq_count; i++)
2218 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
2220 enic_free_vnic_resources(enic);
2221 enic_clear_intr_mode(enic);
2224 static int enic_dev_init(struct enic *enic)
2226 struct device *dev = enic_get_dev(enic);
2227 struct net_device *netdev = enic->netdev;
2231 /* Get interrupt coalesce timer info */
2232 err = enic_dev_intr_coal_timer_info(enic);
2234 dev_warn(dev, "Using default conversion factor for "
2235 "interrupt coalesce timer\n");
2236 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2239 /* Get vNIC configuration
2242 err = enic_get_vnic_config(enic);
2244 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2248 /* Get available resource counts
2251 enic_get_res_counts(enic);
2253 /* Set interrupt mode based on resource counts and system
2257 err = enic_set_intr_mode(enic);
2259 dev_err(dev, "Failed to set intr mode based on resource "
2260 "counts and system capabilities, aborting\n");
2264 /* Allocate and configure vNIC resources
2267 err = enic_alloc_vnic_resources(enic);
2269 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2270 goto err_out_free_vnic_resources;
2273 enic_init_vnic_resources(enic);
2275 err = enic_set_rss_nic_cfg(enic);
2277 dev_err(dev, "Failed to config nic, aborting\n");
2278 goto err_out_free_vnic_resources;
2281 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2283 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2284 napi_hash_add(&enic->napi[0]);
2286 case VNIC_DEV_INTR_MODE_MSIX:
2287 for (i = 0; i < enic->rq_count; i++) {
2288 netif_napi_add(netdev, &enic->napi[i],
2289 enic_poll_msix_rq, NAPI_POLL_WEIGHT);
2290 napi_hash_add(&enic->napi[i]);
2292 for (i = 0; i < enic->wq_count; i++)
2293 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
2294 enic_poll_msix_wq, NAPI_POLL_WEIGHT);
2300 err_out_free_vnic_resources:
2301 enic_clear_intr_mode(enic);
2302 enic_free_vnic_resources(enic);
2307 static void enic_iounmap(struct enic *enic)
2311 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2312 if (enic->bar[i].vaddr)
2313 iounmap(enic->bar[i].vaddr);
2316 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2318 struct device *dev = &pdev->dev;
2319 struct net_device *netdev;
2324 #ifdef CONFIG_PCI_IOV
2329 /* Allocate net device structure and initialize. Private
2330 * instance data is initialized to zero.
2333 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2334 ENIC_RQ_MAX, ENIC_WQ_MAX);
2338 pci_set_drvdata(pdev, netdev);
2340 SET_NETDEV_DEV(netdev, &pdev->dev);
2342 enic = netdev_priv(netdev);
2343 enic->netdev = netdev;
2346 /* Setup PCI resources
2349 err = pci_enable_device_mem(pdev);
2351 dev_err(dev, "Cannot enable PCI device, aborting\n");
2352 goto err_out_free_netdev;
2355 err = pci_request_regions(pdev, DRV_NAME);
2357 dev_err(dev, "Cannot request PCI regions, aborting\n");
2358 goto err_out_disable_device;
2361 pci_set_master(pdev);
2363 /* Query PCI controller on system for DMA addressing
2364 * limitation for the device. Try 64-bit first, and
2368 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2370 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2372 dev_err(dev, "No usable DMA configuration, aborting\n");
2373 goto err_out_release_regions;
2375 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2377 dev_err(dev, "Unable to obtain %u-bit DMA "
2378 "for consistent allocations, aborting\n", 32);
2379 goto err_out_release_regions;
2382 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2384 dev_err(dev, "Unable to obtain %u-bit DMA "
2385 "for consistent allocations, aborting\n", 64);
2386 goto err_out_release_regions;
2391 /* Map vNIC resources from BAR0-5
2394 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2395 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2397 enic->bar[i].len = pci_resource_len(pdev, i);
2398 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2399 if (!enic->bar[i].vaddr) {
2400 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2402 goto err_out_iounmap;
2404 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2407 /* Register vNIC device
2410 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2411 ARRAY_SIZE(enic->bar));
2413 dev_err(dev, "vNIC registration failed, aborting\n");
2415 goto err_out_iounmap;
2418 #ifdef CONFIG_PCI_IOV
2419 /* Get number of subvnics */
2420 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2422 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2424 if (enic->num_vfs) {
2425 err = pci_enable_sriov(pdev, enic->num_vfs);
2427 dev_err(dev, "SRIOV enable failed, aborting."
2428 " pci_enable_sriov() returned %d\n",
2430 goto err_out_vnic_unregister;
2432 enic->priv_flags |= ENIC_SRIOV_ENABLED;
2433 num_pps = enic->num_vfs;
2438 /* Allocate structure for port profiles */
2439 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2442 goto err_out_disable_sriov_pp;
2445 /* Issue device open to get device in known state
2448 err = enic_dev_open(enic);
2450 dev_err(dev, "vNIC dev open failed, aborting\n");
2451 goto err_out_disable_sriov;
2454 /* Setup devcmd lock
2457 spin_lock_init(&enic->devcmd_lock);
2458 spin_lock_init(&enic->enic_api_lock);
2461 * Set ingress vlan rewrite mode before vnic initialization
2464 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2467 "Failed to set ingress vlan rewrite mode, aborting.\n");
2468 goto err_out_dev_close;
2471 /* Issue device init to initialize the vnic-to-switch link.
2472 * We'll start with carrier off and wait for link UP
2473 * notification later to turn on carrier. We don't need
2474 * to wait here for the vnic-to-switch link initialization
2475 * to complete; link UP notification is the indication that
2476 * the process is complete.
2479 netif_carrier_off(netdev);
2481 /* Do not call dev_init for a dynamic vnic.
2482 * For a dynamic vnic, init_prov_info will be
2483 * called later by an upper layer.
2486 if (!enic_is_dynamic(enic)) {
2487 err = vnic_dev_init(enic->vdev, 0);
2489 dev_err(dev, "vNIC dev init failed, aborting\n");
2490 goto err_out_dev_close;
2494 err = enic_dev_init(enic);
2496 dev_err(dev, "Device initialization failed, aborting\n");
2497 goto err_out_dev_close;
2500 netif_set_real_num_tx_queues(netdev, enic->wq_count);
2501 netif_set_real_num_rx_queues(netdev, enic->rq_count);
2503 /* Setup notification timer, HW reset task, and wq locks
2506 init_timer(&enic->notify_timer);
2507 enic->notify_timer.function = enic_notify_timer;
2508 enic->notify_timer.data = (unsigned long)enic;
2510 enic_set_rx_coal_setting(enic);
2511 INIT_WORK(&enic->reset, enic_reset);
2512 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2514 for (i = 0; i < enic->wq_count; i++)
2515 spin_lock_init(&enic->wq_lock[i]);
2517 /* Register net device
2520 enic->port_mtu = enic->config.mtu;
2521 (void)enic_change_mtu(netdev, enic->port_mtu);
2523 err = enic_set_mac_addr(netdev, enic->mac_addr);
2525 dev_err(dev, "Invalid MAC address, aborting\n");
2526 goto err_out_dev_deinit;
2529 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2530 /* rx coalesce time already got initialized. This gets used
2531 * if adaptive coal is turned off
2533 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2535 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2536 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2538 netdev->netdev_ops = &enic_netdev_ops;
2540 netdev->watchdog_timeo = 2 * HZ;
2541 enic_set_ethtool_ops(netdev);
2543 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2544 if (ENIC_SETTING(enic, LOOP)) {
2545 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2546 enic->loop_enable = 1;
2547 enic->loop_tag = enic->config.loop_tag;
2548 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2550 if (ENIC_SETTING(enic, TXCSUM))
2551 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2552 if (ENIC_SETTING(enic, TSO))
2553 netdev->hw_features |= NETIF_F_TSO |
2554 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2555 if (ENIC_SETTING(enic, RSS))
2556 netdev->hw_features |= NETIF_F_RXHASH;
2557 if (ENIC_SETTING(enic, RXCSUM))
2558 netdev->hw_features |= NETIF_F_RXCSUM;
2560 netdev->features |= netdev->hw_features;
2562 #ifdef CONFIG_RFS_ACCEL
2563 netdev->hw_features |= NETIF_F_NTUPLE;
2567 netdev->features |= NETIF_F_HIGHDMA;
2569 netdev->priv_flags |= IFF_UNICAST_FLT;
2571 err = register_netdev(netdev);
2573 dev_err(dev, "Cannot register net device, aborting\n");
2574 goto err_out_dev_deinit;
2576 enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
2581 enic_dev_deinit(enic);
2583 vnic_dev_close(enic->vdev);
2584 err_out_disable_sriov:
2586 err_out_disable_sriov_pp:
2587 #ifdef CONFIG_PCI_IOV
2588 if (enic_sriov_enabled(enic)) {
2589 pci_disable_sriov(pdev);
2590 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2592 err_out_vnic_unregister:
2594 vnic_dev_unregister(enic->vdev);
2597 err_out_release_regions:
2598 pci_release_regions(pdev);
2599 err_out_disable_device:
2600 pci_disable_device(pdev);
2601 err_out_free_netdev:
2602 free_netdev(netdev);
2607 static void enic_remove(struct pci_dev *pdev)
2609 struct net_device *netdev = pci_get_drvdata(pdev);
2612 struct enic *enic = netdev_priv(netdev);
2614 cancel_work_sync(&enic->reset);
2615 cancel_work_sync(&enic->change_mtu_work);
2616 unregister_netdev(netdev);
2617 enic_dev_deinit(enic);
2618 vnic_dev_close(enic->vdev);
2619 #ifdef CONFIG_PCI_IOV
2620 if (enic_sriov_enabled(enic)) {
2621 pci_disable_sriov(pdev);
2622 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2626 vnic_dev_unregister(enic->vdev);
2628 pci_release_regions(pdev);
2629 pci_disable_device(pdev);
2630 free_netdev(netdev);
2634 static struct pci_driver enic_driver = {
2636 .id_table = enic_id_table,
2637 .probe = enic_probe,
2638 .remove = enic_remove,
2641 static int __init enic_init_module(void)
2643 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2645 return pci_register_driver(&enic_driver);
2648 static void __exit enic_cleanup_module(void)
2650 pci_unregister_driver(&enic_driver);
2653 module_init(enic_init_module);
2654 module_exit(enic_cleanup_module);