2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/prefetch.h>
40 #include <net/ip6_checksum.h>
42 #include "cq_enet_desc.h"
44 #include "vnic_intr.h"
45 #include "vnic_stats.h"
52 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
53 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
54 #define MAX_TSO (1 << 16)
55 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
57 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
58 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
59 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
61 /* Supported devices */
62 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
63 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
64 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
65 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
66 { 0, } /* end of table */
69 MODULE_DESCRIPTION(DRV_DESCRIPTION);
70 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_VERSION);
73 MODULE_DEVICE_TABLE(pci, enic_id_table);
75 int enic_is_dynamic(struct enic *enic)
77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
80 int enic_sriov_enabled(struct enic *enic)
82 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
85 static int enic_is_sriov_vf(struct enic *enic)
87 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
90 int enic_is_valid_vf(struct enic *enic, int vf)
93 return vf >= 0 && vf < enic->num_vfs;
99 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
101 struct enic *enic = vnic_dev_priv(wq->vdev);
104 pci_unmap_single(enic->pdev, buf->dma_addr,
105 buf->len, PCI_DMA_TODEVICE);
107 pci_unmap_page(enic->pdev, buf->dma_addr,
108 buf->len, PCI_DMA_TODEVICE);
111 dev_kfree_skb_any(buf->os_buf);
114 static void enic_wq_free_buf(struct vnic_wq *wq,
115 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
117 enic_free_wq_buf(wq, buf);
120 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
121 u8 type, u16 q_number, u16 completed_index, void *opaque)
123 struct enic *enic = vnic_dev_priv(vdev);
125 spin_lock(&enic->wq_lock[q_number]);
127 vnic_wq_service(&enic->wq[q_number], cq_desc,
128 completed_index, enic_wq_free_buf,
131 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
132 vnic_wq_desc_avail(&enic->wq[q_number]) >=
133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
134 netif_wake_subqueue(enic->netdev, q_number);
136 spin_unlock(&enic->wq_lock[q_number]);
141 static void enic_log_q_error(struct enic *enic)
146 for (i = 0; i < enic->wq_count; i++) {
147 error_status = vnic_wq_error_status(&enic->wq[i]);
149 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
153 for (i = 0; i < enic->rq_count; i++) {
154 error_status = vnic_rq_error_status(&enic->rq[i]);
156 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
161 static void enic_msglvl_check(struct enic *enic)
163 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
165 if (msg_enable != enic->msg_enable) {
166 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
167 enic->msg_enable, msg_enable);
168 enic->msg_enable = msg_enable;
172 static void enic_mtu_check(struct enic *enic)
174 u32 mtu = vnic_dev_mtu(enic->vdev);
175 struct net_device *netdev = enic->netdev;
177 if (mtu && mtu != enic->port_mtu) {
178 enic->port_mtu = mtu;
179 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
180 mtu = max_t(int, ENIC_MIN_MTU,
181 min_t(int, ENIC_MAX_MTU, mtu));
182 if (mtu != netdev->mtu)
183 schedule_work(&enic->change_mtu_work);
185 if (mtu < netdev->mtu)
187 "interface MTU (%d) set higher "
188 "than switch port MTU (%d)\n",
194 static void enic_link_check(struct enic *enic)
196 int link_status = vnic_dev_link_status(enic->vdev);
197 int carrier_ok = netif_carrier_ok(enic->netdev);
199 if (link_status && !carrier_ok) {
200 netdev_info(enic->netdev, "Link UP\n");
201 netif_carrier_on(enic->netdev);
202 } else if (!link_status && carrier_ok) {
203 netdev_info(enic->netdev, "Link DOWN\n");
204 netif_carrier_off(enic->netdev);
208 static void enic_notify_check(struct enic *enic)
210 enic_msglvl_check(enic);
211 enic_mtu_check(enic);
212 enic_link_check(enic);
215 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
217 static irqreturn_t enic_isr_legacy(int irq, void *data)
219 struct net_device *netdev = data;
220 struct enic *enic = netdev_priv(netdev);
221 unsigned int io_intr = enic_legacy_io_intr();
222 unsigned int err_intr = enic_legacy_err_intr();
223 unsigned int notify_intr = enic_legacy_notify_intr();
226 vnic_intr_mask(&enic->intr[io_intr]);
228 pba = vnic_intr_legacy_pba(enic->legacy_pba);
230 vnic_intr_unmask(&enic->intr[io_intr]);
231 return IRQ_NONE; /* not our interrupt */
234 if (ENIC_TEST_INTR(pba, notify_intr)) {
235 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
236 enic_notify_check(enic);
239 if (ENIC_TEST_INTR(pba, err_intr)) {
240 vnic_intr_return_all_credits(&enic->intr[err_intr]);
241 enic_log_q_error(enic);
242 /* schedule recovery from WQ/RQ error */
243 schedule_work(&enic->reset);
247 if (ENIC_TEST_INTR(pba, io_intr)) {
248 if (napi_schedule_prep(&enic->napi[0]))
249 __napi_schedule(&enic->napi[0]);
251 vnic_intr_unmask(&enic->intr[io_intr]);
257 static irqreturn_t enic_isr_msi(int irq, void *data)
259 struct enic *enic = data;
261 /* With MSI, there is no sharing of interrupts, so this is
262 * our interrupt and there is no need to ack it. The device
263 * is not providing per-vector masking, so the OS will not
264 * write to PCI config space to mask/unmask the interrupt.
265 * We're using mask_on_assertion for MSI, so the device
266 * automatically masks the interrupt when the interrupt is
267 * generated. Later, when exiting polling, the interrupt
268 * will be unmasked (see enic_poll).
270 * Also, the device uses the same PCIe Traffic Class (TC)
271 * for Memory Write data and MSI, so there are no ordering
272 * issues; the MSI will always arrive at the Root Complex
273 * _after_ corresponding Memory Writes (i.e. descriptor
277 napi_schedule(&enic->napi[0]);
282 static irqreturn_t enic_isr_msix_rq(int irq, void *data)
284 struct napi_struct *napi = data;
286 /* schedule NAPI polling for RQ cleanup */
292 static irqreturn_t enic_isr_msix_wq(int irq, void *data)
294 struct enic *enic = data;
297 unsigned int wq_work_to_do = -1; /* no limit */
298 unsigned int wq_work_done;
301 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
302 cq = enic_cq_wq(enic, wq_irq);
303 intr = enic_msix_wq_intr(enic, wq_irq);
305 wq_work_done = vnic_cq_service(&enic->cq[cq],
306 wq_work_to_do, enic_wq_service, NULL);
308 vnic_intr_return_credits(&enic->intr[intr],
311 1 /* reset intr timer */);
316 static irqreturn_t enic_isr_msix_err(int irq, void *data)
318 struct enic *enic = data;
319 unsigned int intr = enic_msix_err_intr(enic);
321 vnic_intr_return_all_credits(&enic->intr[intr]);
323 enic_log_q_error(enic);
325 /* schedule recovery from WQ/RQ error */
326 schedule_work(&enic->reset);
331 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
333 struct enic *enic = data;
334 unsigned int intr = enic_msix_notify_intr(enic);
336 vnic_intr_return_all_credits(&enic->intr[intr]);
337 enic_notify_check(enic);
342 static inline void enic_queue_wq_skb_cont(struct enic *enic,
343 struct vnic_wq *wq, struct sk_buff *skb,
344 unsigned int len_left, int loopback)
346 const skb_frag_t *frag;
348 /* Queue additional data fragments */
349 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
350 len_left -= skb_frag_size(frag);
351 enic_queue_wq_desc_cont(wq, skb,
352 skb_frag_dma_map(&enic->pdev->dev,
353 frag, 0, skb_frag_size(frag),
356 (len_left == 0), /* EOP? */
361 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
362 struct vnic_wq *wq, struct sk_buff *skb,
363 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
365 unsigned int head_len = skb_headlen(skb);
366 unsigned int len_left = skb->len - head_len;
367 int eop = (len_left == 0);
369 /* Queue the main skb fragment. The fragments are no larger
370 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
371 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
372 * per fragment is queued.
374 enic_queue_wq_desc(wq, skb,
375 pci_map_single(enic->pdev, skb->data,
376 head_len, PCI_DMA_TODEVICE),
378 vlan_tag_insert, vlan_tag,
382 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
385 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
386 struct vnic_wq *wq, struct sk_buff *skb,
387 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
389 unsigned int head_len = skb_headlen(skb);
390 unsigned int len_left = skb->len - head_len;
391 unsigned int hdr_len = skb_checksum_start_offset(skb);
392 unsigned int csum_offset = hdr_len + skb->csum_offset;
393 int eop = (len_left == 0);
395 /* Queue the main skb fragment. The fragments are no larger
396 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
397 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
398 * per fragment is queued.
400 enic_queue_wq_desc_csum_l4(wq, skb,
401 pci_map_single(enic->pdev, skb->data,
402 head_len, PCI_DMA_TODEVICE),
406 vlan_tag_insert, vlan_tag,
410 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
413 static inline void enic_queue_wq_skb_tso(struct enic *enic,
414 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
415 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
417 unsigned int frag_len_left = skb_headlen(skb);
418 unsigned int len_left = skb->len - frag_len_left;
419 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
420 int eop = (len_left == 0);
423 unsigned int offset = 0;
426 /* Preload TCP csum field with IP pseudo hdr calculated
427 * with IP length set to zero. HW will later add in length
428 * to each TCP segment resulting from the TSO.
431 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
432 ip_hdr(skb)->check = 0;
433 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
434 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
435 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
436 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
437 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
440 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
441 * for the main skb fragment
443 while (frag_len_left) {
444 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
445 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
446 len, PCI_DMA_TODEVICE);
447 enic_queue_wq_desc_tso(wq, skb,
451 vlan_tag_insert, vlan_tag,
452 eop && (len == frag_len_left), loopback);
453 frag_len_left -= len;
460 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
461 * for additional data fragments
463 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
464 len_left -= skb_frag_size(frag);
465 frag_len_left = skb_frag_size(frag);
468 while (frag_len_left) {
469 len = min(frag_len_left,
470 (unsigned int)WQ_ENET_MAX_DESC_LEN);
471 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
474 enic_queue_wq_desc_cont(wq, skb,
478 (len == frag_len_left), /* EOP? */
480 frag_len_left -= len;
486 static inline void enic_queue_wq_skb(struct enic *enic,
487 struct vnic_wq *wq, struct sk_buff *skb)
489 unsigned int mss = skb_shinfo(skb)->gso_size;
490 unsigned int vlan_tag = 0;
491 int vlan_tag_insert = 0;
494 if (vlan_tx_tag_present(skb)) {
495 /* VLAN tag from trunking driver */
497 vlan_tag = vlan_tx_tag_get(skb);
498 } else if (enic->loop_enable) {
499 vlan_tag = enic->loop_tag;
504 enic_queue_wq_skb_tso(enic, wq, skb, mss,
505 vlan_tag_insert, vlan_tag, loopback);
506 else if (skb->ip_summed == CHECKSUM_PARTIAL)
507 enic_queue_wq_skb_csum_l4(enic, wq, skb,
508 vlan_tag_insert, vlan_tag, loopback);
510 enic_queue_wq_skb_vlan(enic, wq, skb,
511 vlan_tag_insert, vlan_tag, loopback);
514 /* netif_tx_lock held, process context with BHs disabled, or BH */
515 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
516 struct net_device *netdev)
518 struct enic *enic = netdev_priv(netdev);
521 unsigned int txq_map;
524 dev_kfree_skb_any(skb);
528 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
529 wq = &enic->wq[txq_map];
531 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
532 * which is very likely. In the off chance it's going to take
533 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
536 if (skb_shinfo(skb)->gso_size == 0 &&
537 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
538 skb_linearize(skb)) {
539 dev_kfree_skb_any(skb);
543 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
545 if (vnic_wq_desc_avail(wq) <
546 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
547 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
548 /* This is a hard error, log it */
549 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
550 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
551 return NETDEV_TX_BUSY;
554 enic_queue_wq_skb(enic, wq, skb);
556 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
557 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
559 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
564 /* dev_base_lock rwlock held, nominally process context */
565 static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
566 struct rtnl_link_stats64 *net_stats)
568 struct enic *enic = netdev_priv(netdev);
569 struct vnic_stats *stats;
571 enic_dev_stats_dump(enic, &stats);
573 net_stats->tx_packets = stats->tx.tx_frames_ok;
574 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
575 net_stats->tx_errors = stats->tx.tx_errors;
576 net_stats->tx_dropped = stats->tx.tx_drops;
578 net_stats->rx_packets = stats->rx.rx_frames_ok;
579 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
580 net_stats->rx_errors = stats->rx.rx_errors;
581 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
582 net_stats->rx_over_errors = enic->rq_truncated_pkts;
583 net_stats->rx_crc_errors = enic->rq_bad_fcs;
584 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
589 void enic_reset_addr_lists(struct enic *enic)
596 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
598 struct enic *enic = netdev_priv(netdev);
600 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
601 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
602 return -EADDRNOTAVAIL;
604 if (!is_valid_ether_addr(addr))
605 return -EADDRNOTAVAIL;
608 memcpy(netdev->dev_addr, addr, netdev->addr_len);
613 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
615 struct enic *enic = netdev_priv(netdev);
616 struct sockaddr *saddr = p;
617 char *addr = saddr->sa_data;
620 if (netif_running(enic->netdev)) {
621 err = enic_dev_del_station_addr(enic);
626 err = enic_set_mac_addr(netdev, addr);
630 if (netif_running(enic->netdev)) {
631 err = enic_dev_add_station_addr(enic);
639 static int enic_set_mac_address(struct net_device *netdev, void *p)
641 struct sockaddr *saddr = p;
642 char *addr = saddr->sa_data;
643 struct enic *enic = netdev_priv(netdev);
646 err = enic_dev_del_station_addr(enic);
650 err = enic_set_mac_addr(netdev, addr);
654 return enic_dev_add_station_addr(enic);
657 static void enic_update_multicast_addr_list(struct enic *enic)
659 struct net_device *netdev = enic->netdev;
660 struct netdev_hw_addr *ha;
661 unsigned int mc_count = netdev_mc_count(netdev);
662 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
665 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
666 netdev_warn(netdev, "Registering only %d out of %d "
667 "multicast addresses\n",
668 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
669 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
672 /* Is there an easier way? Trying to minimize to
673 * calls to add/del multicast addrs. We keep the
674 * addrs from the last call in enic->mc_addr and
675 * look for changes to add/del.
679 netdev_for_each_mc_addr(ha, netdev) {
682 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
685 for (i = 0; i < enic->mc_count; i++) {
686 for (j = 0; j < mc_count; j++)
687 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
690 enic_dev_del_addr(enic, enic->mc_addr[i]);
693 for (i = 0; i < mc_count; i++) {
694 for (j = 0; j < enic->mc_count; j++)
695 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
697 if (j == enic->mc_count)
698 enic_dev_add_addr(enic, mc_addr[i]);
701 /* Save the list to compare against next time
704 for (i = 0; i < mc_count; i++)
705 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
707 enic->mc_count = mc_count;
710 static void enic_update_unicast_addr_list(struct enic *enic)
712 struct net_device *netdev = enic->netdev;
713 struct netdev_hw_addr *ha;
714 unsigned int uc_count = netdev_uc_count(netdev);
715 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
718 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
719 netdev_warn(netdev, "Registering only %d out of %d "
720 "unicast addresses\n",
721 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
722 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
725 /* Is there an easier way? Trying to minimize to
726 * calls to add/del unicast addrs. We keep the
727 * addrs from the last call in enic->uc_addr and
728 * look for changes to add/del.
732 netdev_for_each_uc_addr(ha, netdev) {
735 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
738 for (i = 0; i < enic->uc_count; i++) {
739 for (j = 0; j < uc_count; j++)
740 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
743 enic_dev_del_addr(enic, enic->uc_addr[i]);
746 for (i = 0; i < uc_count; i++) {
747 for (j = 0; j < enic->uc_count; j++)
748 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
750 if (j == enic->uc_count)
751 enic_dev_add_addr(enic, uc_addr[i]);
754 /* Save the list to compare against next time
757 for (i = 0; i < uc_count; i++)
758 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
760 enic->uc_count = uc_count;
763 /* netif_tx_lock held, BHs disabled */
764 static void enic_set_rx_mode(struct net_device *netdev)
766 struct enic *enic = netdev_priv(netdev);
768 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
769 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
770 int promisc = (netdev->flags & IFF_PROMISC) ||
771 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
772 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
773 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
774 unsigned int flags = netdev->flags |
775 (allmulti ? IFF_ALLMULTI : 0) |
776 (promisc ? IFF_PROMISC : 0);
778 if (enic->flags != flags) {
780 enic_dev_packet_filter(enic, directed,
781 multicast, broadcast, promisc, allmulti);
785 enic_update_unicast_addr_list(enic);
787 enic_update_multicast_addr_list(enic);
791 /* netif_tx_lock held, BHs disabled */
792 static void enic_tx_timeout(struct net_device *netdev)
794 struct enic *enic = netdev_priv(netdev);
795 schedule_work(&enic->reset);
798 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
800 struct enic *enic = netdev_priv(netdev);
801 struct enic_port_profile *pp;
804 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
808 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
809 if (vf == PORT_SELF_VF) {
810 memcpy(pp->vf_mac, mac, ETH_ALEN);
814 * For sriov vf's set the mac in hw
816 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
817 vnic_dev_set_mac_addr, mac);
818 return enic_dev_status_to_errno(err);
824 static int enic_set_vf_port(struct net_device *netdev, int vf,
825 struct nlattr *port[])
827 struct enic *enic = netdev_priv(netdev);
828 struct enic_port_profile prev_pp;
829 struct enic_port_profile *pp;
830 int err = 0, restore_pp = 1;
832 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
836 if (!port[IFLA_PORT_REQUEST])
839 memcpy(&prev_pp, pp, sizeof(*enic->pp));
840 memset(pp, 0, sizeof(*enic->pp));
842 pp->set |= ENIC_SET_REQUEST;
843 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
845 if (port[IFLA_PORT_PROFILE]) {
846 pp->set |= ENIC_SET_NAME;
847 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
851 if (port[IFLA_PORT_INSTANCE_UUID]) {
852 pp->set |= ENIC_SET_INSTANCE;
853 memcpy(pp->instance_uuid,
854 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
857 if (port[IFLA_PORT_HOST_UUID]) {
858 pp->set |= ENIC_SET_HOST;
859 memcpy(pp->host_uuid,
860 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
863 if (vf == PORT_SELF_VF) {
864 /* Special case handling: mac came from IFLA_VF_MAC */
865 if (!is_zero_ether_addr(prev_pp.vf_mac))
866 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
868 if (is_zero_ether_addr(netdev->dev_addr))
869 eth_hw_addr_random(netdev);
871 /* SR-IOV VF: get mac from adapter */
872 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
873 vnic_dev_get_mac_addr, pp->mac_addr);
875 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
876 memcpy(pp, &prev_pp, sizeof(*pp));
877 return enic_dev_status_to_errno(err);
881 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
884 /* Things are still the way they were: Implicit
885 * DISASSOCIATE failed
887 memcpy(pp, &prev_pp, sizeof(*pp));
889 memset(pp, 0, sizeof(*pp));
890 if (vf == PORT_SELF_VF)
891 memset(netdev->dev_addr, 0, ETH_ALEN);
894 /* Set flag to indicate that the port assoc/disassoc
895 * request has been sent out to fw
897 pp->set |= ENIC_PORT_REQUEST_APPLIED;
899 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
900 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
901 memset(pp->mac_addr, 0, ETH_ALEN);
902 if (vf == PORT_SELF_VF)
903 memset(netdev->dev_addr, 0, ETH_ALEN);
907 if (vf == PORT_SELF_VF)
908 memset(pp->vf_mac, 0, ETH_ALEN);
913 static int enic_get_vf_port(struct net_device *netdev, int vf,
916 struct enic *enic = netdev_priv(netdev);
917 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
918 struct enic_port_profile *pp;
921 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
925 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
928 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
932 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
933 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
934 ((pp->set & ENIC_SET_NAME) &&
935 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
936 ((pp->set & ENIC_SET_INSTANCE) &&
937 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
938 pp->instance_uuid)) ||
939 ((pp->set & ENIC_SET_HOST) &&
940 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
941 goto nla_put_failure;
948 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
950 struct enic *enic = vnic_dev_priv(rq->vdev);
955 pci_unmap_single(enic->pdev, buf->dma_addr,
956 buf->len, PCI_DMA_FROMDEVICE);
957 dev_kfree_skb_any(buf->os_buf);
960 static int enic_rq_alloc_buf(struct vnic_rq *rq)
962 struct enic *enic = vnic_dev_priv(rq->vdev);
963 struct net_device *netdev = enic->netdev;
965 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
966 unsigned int os_buf_index = 0;
969 skb = netdev_alloc_skb_ip_align(netdev, len);
973 dma_addr = pci_map_single(enic->pdev, skb->data,
974 len, PCI_DMA_FROMDEVICE);
976 enic_queue_rq_desc(rq, skb, os_buf_index,
982 static void enic_rq_indicate_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque)
986 struct enic *enic = vnic_dev_priv(rq->vdev);
987 struct net_device *netdev = enic->netdev;
990 u8 type, color, eop, sop, ingress_port, vlan_stripped;
991 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
992 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
993 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
995 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1002 prefetch(skb->data - NET_IP_ALIGN);
1003 pci_unmap_single(enic->pdev, buf->dma_addr,
1004 buf->len, PCI_DMA_FROMDEVICE);
1006 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1007 &type, &color, &q_number, &completed_index,
1008 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1009 &csum_not_calc, &rss_hash, &bytes_written,
1010 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1011 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1012 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1013 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1019 if (bytes_written > 0)
1021 else if (bytes_written == 0)
1022 enic->rq_truncated_pkts++;
1025 dev_kfree_skb_any(skb);
1030 if (eop && bytes_written > 0) {
1035 skb_put(skb, bytes_written);
1036 skb->protocol = eth_type_trans(skb, netdev);
1037 skb_record_rx_queue(skb, q_number);
1038 if (netdev->features & NETIF_F_RXHASH) {
1039 skb_set_hash(skb, rss_hash,
1041 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1042 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1043 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1044 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1047 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1048 skb->csum = htons(checksum);
1049 skb->ip_summed = CHECKSUM_COMPLETE;
1053 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1055 if (netdev->features & NETIF_F_GRO)
1056 napi_gro_receive(&enic->napi[q_number], skb);
1058 netif_receive_skb(skb);
1064 dev_kfree_skb_any(skb);
1068 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1069 u8 type, u16 q_number, u16 completed_index, void *opaque)
1071 struct enic *enic = vnic_dev_priv(vdev);
1073 vnic_rq_service(&enic->rq[q_number], cq_desc,
1074 completed_index, VNIC_RQ_RETURN_DESC,
1075 enic_rq_indicate_buf, opaque);
1080 static int enic_poll(struct napi_struct *napi, int budget)
1082 struct net_device *netdev = napi->dev;
1083 struct enic *enic = netdev_priv(netdev);
1084 unsigned int cq_rq = enic_cq_rq(enic, 0);
1085 unsigned int cq_wq = enic_cq_wq(enic, 0);
1086 unsigned int intr = enic_legacy_io_intr();
1087 unsigned int rq_work_to_do = budget;
1088 unsigned int wq_work_to_do = -1; /* no limit */
1089 unsigned int work_done, rq_work_done = 0, wq_work_done;
1092 /* Service RQ (first) and WQ
1096 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1097 rq_work_to_do, enic_rq_service, NULL);
1099 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1100 wq_work_to_do, enic_wq_service, NULL);
1102 /* Accumulate intr event credits for this polling
1103 * cycle. An intr event is the completion of a
1104 * a WQ or RQ packet.
1107 work_done = rq_work_done + wq_work_done;
1110 vnic_intr_return_credits(&enic->intr[intr],
1112 0 /* don't unmask intr */,
1113 0 /* don't reset intr timer */);
1115 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1117 /* Buffer allocation failed. Stay in polling
1118 * mode so we can try to fill the ring again.
1122 rq_work_done = rq_work_to_do;
1124 if (rq_work_done < rq_work_to_do) {
1126 /* Some work done, but not enough to stay in polling,
1130 napi_complete(napi);
1131 vnic_intr_unmask(&enic->intr[intr]);
1134 return rq_work_done;
1137 static int enic_poll_msix(struct napi_struct *napi, int budget)
1139 struct net_device *netdev = napi->dev;
1140 struct enic *enic = netdev_priv(netdev);
1141 unsigned int rq = (napi - &enic->napi[0]);
1142 unsigned int cq = enic_cq_rq(enic, rq);
1143 unsigned int intr = enic_msix_rq_intr(enic, rq);
1144 unsigned int work_to_do = budget;
1145 unsigned int work_done = 0;
1152 work_done = vnic_cq_service(&enic->cq[cq],
1153 work_to_do, enic_rq_service, NULL);
1155 /* Return intr event credits for this polling
1156 * cycle. An intr event is the completion of a
1161 vnic_intr_return_credits(&enic->intr[intr],
1163 0 /* don't unmask intr */,
1164 0 /* don't reset intr timer */);
1166 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1168 /* Buffer allocation failed. Stay in polling mode
1169 * so we can try to fill the ring again.
1173 work_done = work_to_do;
1175 if (work_done < work_to_do) {
1177 /* Some work done, but not enough to stay in polling,
1181 napi_complete(napi);
1182 vnic_intr_unmask(&enic->intr[intr]);
1188 static void enic_notify_timer(unsigned long data)
1190 struct enic *enic = (struct enic *)data;
1192 enic_notify_check(enic);
1194 mod_timer(&enic->notify_timer,
1195 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1198 static void enic_free_intr(struct enic *enic)
1200 struct net_device *netdev = enic->netdev;
1203 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1204 case VNIC_DEV_INTR_MODE_INTX:
1205 free_irq(enic->pdev->irq, netdev);
1207 case VNIC_DEV_INTR_MODE_MSI:
1208 free_irq(enic->pdev->irq, enic);
1210 case VNIC_DEV_INTR_MODE_MSIX:
1211 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1212 if (enic->msix[i].requested)
1213 free_irq(enic->msix_entry[i].vector,
1214 enic->msix[i].devid);
1221 static int enic_request_intr(struct enic *enic)
1223 struct net_device *netdev = enic->netdev;
1224 unsigned int i, intr;
1227 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1229 case VNIC_DEV_INTR_MODE_INTX:
1231 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1232 IRQF_SHARED, netdev->name, netdev);
1235 case VNIC_DEV_INTR_MODE_MSI:
1237 err = request_irq(enic->pdev->irq, enic_isr_msi,
1238 0, netdev->name, enic);
1241 case VNIC_DEV_INTR_MODE_MSIX:
1243 for (i = 0; i < enic->rq_count; i++) {
1244 intr = enic_msix_rq_intr(enic, i);
1245 snprintf(enic->msix[intr].devname,
1246 sizeof(enic->msix[intr].devname),
1247 "%.11s-rx-%d", netdev->name, i);
1248 enic->msix[intr].isr = enic_isr_msix_rq;
1249 enic->msix[intr].devid = &enic->napi[i];
1252 for (i = 0; i < enic->wq_count; i++) {
1253 intr = enic_msix_wq_intr(enic, i);
1254 snprintf(enic->msix[intr].devname,
1255 sizeof(enic->msix[intr].devname),
1256 "%.11s-tx-%d", netdev->name, i);
1257 enic->msix[intr].isr = enic_isr_msix_wq;
1258 enic->msix[intr].devid = enic;
1261 intr = enic_msix_err_intr(enic);
1262 snprintf(enic->msix[intr].devname,
1263 sizeof(enic->msix[intr].devname),
1264 "%.11s-err", netdev->name);
1265 enic->msix[intr].isr = enic_isr_msix_err;
1266 enic->msix[intr].devid = enic;
1268 intr = enic_msix_notify_intr(enic);
1269 snprintf(enic->msix[intr].devname,
1270 sizeof(enic->msix[intr].devname),
1271 "%.11s-notify", netdev->name);
1272 enic->msix[intr].isr = enic_isr_msix_notify;
1273 enic->msix[intr].devid = enic;
1275 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1276 enic->msix[i].requested = 0;
1278 for (i = 0; i < enic->intr_count; i++) {
1279 err = request_irq(enic->msix_entry[i].vector,
1280 enic->msix[i].isr, 0,
1281 enic->msix[i].devname,
1282 enic->msix[i].devid);
1284 enic_free_intr(enic);
1287 enic->msix[i].requested = 1;
1299 static void enic_synchronize_irqs(struct enic *enic)
1303 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1304 case VNIC_DEV_INTR_MODE_INTX:
1305 case VNIC_DEV_INTR_MODE_MSI:
1306 synchronize_irq(enic->pdev->irq);
1308 case VNIC_DEV_INTR_MODE_MSIX:
1309 for (i = 0; i < enic->intr_count; i++)
1310 synchronize_irq(enic->msix_entry[i].vector);
1317 static int enic_dev_notify_set(struct enic *enic)
1321 spin_lock(&enic->devcmd_lock);
1322 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1323 case VNIC_DEV_INTR_MODE_INTX:
1324 err = vnic_dev_notify_set(enic->vdev,
1325 enic_legacy_notify_intr());
1327 case VNIC_DEV_INTR_MODE_MSIX:
1328 err = vnic_dev_notify_set(enic->vdev,
1329 enic_msix_notify_intr(enic));
1332 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1335 spin_unlock(&enic->devcmd_lock);
1340 static void enic_notify_timer_start(struct enic *enic)
1342 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1343 case VNIC_DEV_INTR_MODE_MSI:
1344 mod_timer(&enic->notify_timer, jiffies);
1347 /* Using intr for notification for INTx/MSI-X */
1352 /* rtnl lock is held, process context */
1353 static int enic_open(struct net_device *netdev)
1355 struct enic *enic = netdev_priv(netdev);
1359 err = enic_request_intr(enic);
1361 netdev_err(netdev, "Unable to request irq.\n");
1365 err = enic_dev_notify_set(enic);
1368 "Failed to alloc notify buffer, aborting.\n");
1369 goto err_out_free_intr;
1372 for (i = 0; i < enic->rq_count; i++) {
1373 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1374 /* Need at least one buffer on ring to get going */
1375 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1376 netdev_err(netdev, "Unable to alloc receive buffers\n");
1378 goto err_out_notify_unset;
1382 for (i = 0; i < enic->wq_count; i++)
1383 vnic_wq_enable(&enic->wq[i]);
1384 for (i = 0; i < enic->rq_count; i++)
1385 vnic_rq_enable(&enic->rq[i]);
1387 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1388 enic_dev_add_station_addr(enic);
1390 enic_set_rx_mode(netdev);
1392 netif_tx_wake_all_queues(netdev);
1394 for (i = 0; i < enic->rq_count; i++)
1395 napi_enable(&enic->napi[i]);
1397 enic_dev_enable(enic);
1399 for (i = 0; i < enic->intr_count; i++)
1400 vnic_intr_unmask(&enic->intr[i]);
1402 enic_notify_timer_start(enic);
1406 err_out_notify_unset:
1407 enic_dev_notify_unset(enic);
1409 enic_free_intr(enic);
1414 /* rtnl lock is held, process context */
1415 static int enic_stop(struct net_device *netdev)
1417 struct enic *enic = netdev_priv(netdev);
1421 for (i = 0; i < enic->intr_count; i++) {
1422 vnic_intr_mask(&enic->intr[i]);
1423 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1426 enic_synchronize_irqs(enic);
1428 del_timer_sync(&enic->notify_timer);
1430 enic_dev_disable(enic);
1432 for (i = 0; i < enic->rq_count; i++)
1433 napi_disable(&enic->napi[i]);
1435 netif_carrier_off(netdev);
1436 netif_tx_disable(netdev);
1438 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1439 enic_dev_del_station_addr(enic);
1441 for (i = 0; i < enic->wq_count; i++) {
1442 err = vnic_wq_disable(&enic->wq[i]);
1446 for (i = 0; i < enic->rq_count; i++) {
1447 err = vnic_rq_disable(&enic->rq[i]);
1452 enic_dev_notify_unset(enic);
1453 enic_free_intr(enic);
1455 for (i = 0; i < enic->wq_count; i++)
1456 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1457 for (i = 0; i < enic->rq_count; i++)
1458 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1459 for (i = 0; i < enic->cq_count; i++)
1460 vnic_cq_clean(&enic->cq[i]);
1461 for (i = 0; i < enic->intr_count; i++)
1462 vnic_intr_clean(&enic->intr[i]);
1467 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1469 struct enic *enic = netdev_priv(netdev);
1470 int running = netif_running(netdev);
1472 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1475 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
1481 netdev->mtu = new_mtu;
1483 if (netdev->mtu > enic->port_mtu)
1485 "interface MTU (%d) set higher than port MTU (%d)\n",
1486 netdev->mtu, enic->port_mtu);
1494 static void enic_change_mtu_work(struct work_struct *work)
1496 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1497 struct net_device *netdev = enic->netdev;
1498 int new_mtu = vnic_dev_mtu(enic->vdev);
1502 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1507 del_timer_sync(&enic->notify_timer);
1509 for (i = 0; i < enic->rq_count; i++)
1510 napi_disable(&enic->napi[i]);
1512 vnic_intr_mask(&enic->intr[0]);
1513 enic_synchronize_irqs(enic);
1514 err = vnic_rq_disable(&enic->rq[0]);
1517 netdev_err(netdev, "Unable to disable RQ.\n");
1520 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1521 vnic_cq_clean(&enic->cq[0]);
1522 vnic_intr_clean(&enic->intr[0]);
1524 /* Fill RQ with new_mtu-sized buffers */
1525 netdev->mtu = new_mtu;
1526 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1527 /* Need at least one buffer on ring to get going */
1528 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
1530 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1535 vnic_rq_enable(&enic->rq[0]);
1536 napi_enable(&enic->napi[0]);
1537 vnic_intr_unmask(&enic->intr[0]);
1538 enic_notify_timer_start(enic);
1542 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1545 #ifdef CONFIG_NET_POLL_CONTROLLER
1546 static void enic_poll_controller(struct net_device *netdev)
1548 struct enic *enic = netdev_priv(netdev);
1549 struct vnic_dev *vdev = enic->vdev;
1550 unsigned int i, intr;
1552 switch (vnic_dev_get_intr_mode(vdev)) {
1553 case VNIC_DEV_INTR_MODE_MSIX:
1554 for (i = 0; i < enic->rq_count; i++) {
1555 intr = enic_msix_rq_intr(enic, i);
1556 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1560 for (i = 0; i < enic->wq_count; i++) {
1561 intr = enic_msix_wq_intr(enic, i);
1562 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1566 case VNIC_DEV_INTR_MODE_MSI:
1567 enic_isr_msi(enic->pdev->irq, enic);
1569 case VNIC_DEV_INTR_MODE_INTX:
1570 enic_isr_legacy(enic->pdev->irq, netdev);
1578 static int enic_dev_wait(struct vnic_dev *vdev,
1579 int (*start)(struct vnic_dev *, int),
1580 int (*finished)(struct vnic_dev *, int *),
1587 BUG_ON(in_interrupt());
1589 err = start(vdev, arg);
1593 /* Wait for func to complete...2 seconds max
1596 time = jiffies + (HZ * 2);
1599 err = finished(vdev, &done);
1606 schedule_timeout_uninterruptible(HZ / 10);
1608 } while (time_after(time, jiffies));
1613 static int enic_dev_open(struct enic *enic)
1617 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1618 vnic_dev_open_done, 0);
1620 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1626 static int enic_dev_hang_reset(struct enic *enic)
1630 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1631 vnic_dev_hang_reset_done, 0);
1633 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1639 static int enic_set_rsskey(struct enic *enic)
1641 dma_addr_t rss_key_buf_pa;
1642 union vnic_rss_key *rss_key_buf_va = NULL;
1643 union vnic_rss_key rss_key = {
1644 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1645 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1646 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1647 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1651 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1652 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1653 if (!rss_key_buf_va)
1656 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1658 spin_lock(&enic->devcmd_lock);
1659 err = enic_set_rss_key(enic,
1661 sizeof(union vnic_rss_key));
1662 spin_unlock(&enic->devcmd_lock);
1664 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1665 rss_key_buf_va, rss_key_buf_pa);
1670 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1672 dma_addr_t rss_cpu_buf_pa;
1673 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1677 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1678 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1679 if (!rss_cpu_buf_va)
1682 for (i = 0; i < (1 << rss_hash_bits); i++)
1683 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1685 spin_lock(&enic->devcmd_lock);
1686 err = enic_set_rss_cpu(enic,
1688 sizeof(union vnic_rss_cpu));
1689 spin_unlock(&enic->devcmd_lock);
1691 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1692 rss_cpu_buf_va, rss_cpu_buf_pa);
1697 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1698 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1700 const u8 tso_ipid_split_en = 0;
1701 const u8 ig_vlan_strip_en = 1;
1704 /* Enable VLAN tag stripping.
1707 spin_lock(&enic->devcmd_lock);
1708 err = enic_set_nic_cfg(enic,
1709 rss_default_cpu, rss_hash_type,
1710 rss_hash_bits, rss_base_cpu,
1711 rss_enable, tso_ipid_split_en,
1713 spin_unlock(&enic->devcmd_lock);
1718 static int enic_set_rss_nic_cfg(struct enic *enic)
1720 struct device *dev = enic_get_dev(enic);
1721 const u8 rss_default_cpu = 0;
1722 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1723 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1724 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1725 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1726 const u8 rss_hash_bits = 7;
1727 const u8 rss_base_cpu = 0;
1728 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1731 if (!enic_set_rsskey(enic)) {
1732 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1734 dev_warn(dev, "RSS disabled, "
1735 "Failed to set RSS cpu indirection table.");
1739 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1743 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1744 rss_hash_bits, rss_base_cpu, rss_enable);
1747 static void enic_reset(struct work_struct *work)
1749 struct enic *enic = container_of(work, struct enic, reset);
1751 if (!netif_running(enic->netdev))
1756 spin_lock(&enic->enic_api_lock);
1757 enic_dev_hang_notify(enic);
1758 enic_stop(enic->netdev);
1759 enic_dev_hang_reset(enic);
1760 enic_reset_addr_lists(enic);
1761 enic_init_vnic_resources(enic);
1762 enic_set_rss_nic_cfg(enic);
1763 enic_dev_set_ig_vlan_rewrite_mode(enic);
1764 enic_open(enic->netdev);
1765 spin_unlock(&enic->enic_api_lock);
1766 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
1771 static int enic_set_intr_mode(struct enic *enic)
1773 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
1774 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
1777 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1778 * on system capabilities.
1782 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1783 * (the second to last INTR is used for WQ/RQ errors)
1784 * (the last INTR is used for notifications)
1787 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1788 for (i = 0; i < n + m + 2; i++)
1789 enic->msix_entry[i].entry = i;
1791 /* Use multiple RQs if RSS is enabled
1794 if (ENIC_SETTING(enic, RSS) &&
1795 enic->config.intr_mode < 1 &&
1796 enic->rq_count >= n &&
1797 enic->wq_count >= m &&
1798 enic->cq_count >= n + m &&
1799 enic->intr_count >= n + m + 2) {
1801 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1802 n + m + 2, n + m + 2) > 0) {
1806 enic->cq_count = n + m;
1807 enic->intr_count = n + m + 2;
1809 vnic_dev_set_intr_mode(enic->vdev,
1810 VNIC_DEV_INTR_MODE_MSIX);
1816 if (enic->config.intr_mode < 1 &&
1817 enic->rq_count >= 1 &&
1818 enic->wq_count >= m &&
1819 enic->cq_count >= 1 + m &&
1820 enic->intr_count >= 1 + m + 2) {
1821 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1822 1 + m + 2, 1 + m + 2) > 0) {
1826 enic->cq_count = 1 + m;
1827 enic->intr_count = 1 + m + 2;
1829 vnic_dev_set_intr_mode(enic->vdev,
1830 VNIC_DEV_INTR_MODE_MSIX);
1838 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1841 if (enic->config.intr_mode < 2 &&
1842 enic->rq_count >= 1 &&
1843 enic->wq_count >= 1 &&
1844 enic->cq_count >= 2 &&
1845 enic->intr_count >= 1 &&
1846 !pci_enable_msi(enic->pdev)) {
1851 enic->intr_count = 1;
1853 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
1860 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1861 * (the first INTR is used for WQ/RQ)
1862 * (the second INTR is used for WQ/RQ errors)
1863 * (the last INTR is used for notifications)
1866 if (enic->config.intr_mode < 3 &&
1867 enic->rq_count >= 1 &&
1868 enic->wq_count >= 1 &&
1869 enic->cq_count >= 2 &&
1870 enic->intr_count >= 3) {
1875 enic->intr_count = 3;
1877 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
1882 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1887 static void enic_clear_intr_mode(struct enic *enic)
1889 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1890 case VNIC_DEV_INTR_MODE_MSIX:
1891 pci_disable_msix(enic->pdev);
1893 case VNIC_DEV_INTR_MODE_MSI:
1894 pci_disable_msi(enic->pdev);
1900 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1903 static const struct net_device_ops enic_netdev_dynamic_ops = {
1904 .ndo_open = enic_open,
1905 .ndo_stop = enic_stop,
1906 .ndo_start_xmit = enic_hard_start_xmit,
1907 .ndo_get_stats64 = enic_get_stats,
1908 .ndo_validate_addr = eth_validate_addr,
1909 .ndo_set_rx_mode = enic_set_rx_mode,
1910 .ndo_set_mac_address = enic_set_mac_address_dynamic,
1911 .ndo_change_mtu = enic_change_mtu,
1912 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
1913 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
1914 .ndo_tx_timeout = enic_tx_timeout,
1915 .ndo_set_vf_port = enic_set_vf_port,
1916 .ndo_get_vf_port = enic_get_vf_port,
1917 .ndo_set_vf_mac = enic_set_vf_mac,
1918 #ifdef CONFIG_NET_POLL_CONTROLLER
1919 .ndo_poll_controller = enic_poll_controller,
1923 static const struct net_device_ops enic_netdev_ops = {
1924 .ndo_open = enic_open,
1925 .ndo_stop = enic_stop,
1926 .ndo_start_xmit = enic_hard_start_xmit,
1927 .ndo_get_stats64 = enic_get_stats,
1928 .ndo_validate_addr = eth_validate_addr,
1929 .ndo_set_mac_address = enic_set_mac_address,
1930 .ndo_set_rx_mode = enic_set_rx_mode,
1931 .ndo_change_mtu = enic_change_mtu,
1932 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
1933 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
1934 .ndo_tx_timeout = enic_tx_timeout,
1935 .ndo_set_vf_port = enic_set_vf_port,
1936 .ndo_get_vf_port = enic_get_vf_port,
1937 .ndo_set_vf_mac = enic_set_vf_mac,
1938 #ifdef CONFIG_NET_POLL_CONTROLLER
1939 .ndo_poll_controller = enic_poll_controller,
1943 static void enic_dev_deinit(struct enic *enic)
1947 for (i = 0; i < enic->rq_count; i++)
1948 netif_napi_del(&enic->napi[i]);
1950 enic_free_vnic_resources(enic);
1951 enic_clear_intr_mode(enic);
1954 static int enic_dev_init(struct enic *enic)
1956 struct device *dev = enic_get_dev(enic);
1957 struct net_device *netdev = enic->netdev;
1961 /* Get interrupt coalesce timer info */
1962 err = enic_dev_intr_coal_timer_info(enic);
1964 dev_warn(dev, "Using default conversion factor for "
1965 "interrupt coalesce timer\n");
1966 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1969 /* Get vNIC configuration
1972 err = enic_get_vnic_config(enic);
1974 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1978 /* Get available resource counts
1981 enic_get_res_counts(enic);
1983 /* Set interrupt mode based on resource counts and system
1987 err = enic_set_intr_mode(enic);
1989 dev_err(dev, "Failed to set intr mode based on resource "
1990 "counts and system capabilities, aborting\n");
1994 /* Allocate and configure vNIC resources
1997 err = enic_alloc_vnic_resources(enic);
1999 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2000 goto err_out_free_vnic_resources;
2003 enic_init_vnic_resources(enic);
2005 err = enic_set_rss_nic_cfg(enic);
2007 dev_err(dev, "Failed to config nic, aborting\n");
2008 goto err_out_free_vnic_resources;
2011 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2013 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2015 case VNIC_DEV_INTR_MODE_MSIX:
2016 for (i = 0; i < enic->rq_count; i++)
2017 netif_napi_add(netdev, &enic->napi[i],
2018 enic_poll_msix, 64);
2024 err_out_free_vnic_resources:
2025 enic_clear_intr_mode(enic);
2026 enic_free_vnic_resources(enic);
2031 static void enic_iounmap(struct enic *enic)
2035 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2036 if (enic->bar[i].vaddr)
2037 iounmap(enic->bar[i].vaddr);
2040 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2042 struct device *dev = &pdev->dev;
2043 struct net_device *netdev;
2048 #ifdef CONFIG_PCI_IOV
2053 /* Allocate net device structure and initialize. Private
2054 * instance data is initialized to zero.
2057 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2058 ENIC_RQ_MAX, ENIC_WQ_MAX);
2062 pci_set_drvdata(pdev, netdev);
2064 SET_NETDEV_DEV(netdev, &pdev->dev);
2066 enic = netdev_priv(netdev);
2067 enic->netdev = netdev;
2070 /* Setup PCI resources
2073 err = pci_enable_device_mem(pdev);
2075 dev_err(dev, "Cannot enable PCI device, aborting\n");
2076 goto err_out_free_netdev;
2079 err = pci_request_regions(pdev, DRV_NAME);
2081 dev_err(dev, "Cannot request PCI regions, aborting\n");
2082 goto err_out_disable_device;
2085 pci_set_master(pdev);
2087 /* Query PCI controller on system for DMA addressing
2088 * limitation for the device. Try 64-bit first, and
2092 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2094 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2096 dev_err(dev, "No usable DMA configuration, aborting\n");
2097 goto err_out_release_regions;
2099 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2101 dev_err(dev, "Unable to obtain %u-bit DMA "
2102 "for consistent allocations, aborting\n", 32);
2103 goto err_out_release_regions;
2106 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2108 dev_err(dev, "Unable to obtain %u-bit DMA "
2109 "for consistent allocations, aborting\n", 64);
2110 goto err_out_release_regions;
2115 /* Map vNIC resources from BAR0-5
2118 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2119 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2121 enic->bar[i].len = pci_resource_len(pdev, i);
2122 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2123 if (!enic->bar[i].vaddr) {
2124 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2126 goto err_out_iounmap;
2128 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2131 /* Register vNIC device
2134 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2135 ARRAY_SIZE(enic->bar));
2137 dev_err(dev, "vNIC registration failed, aborting\n");
2139 goto err_out_iounmap;
2142 #ifdef CONFIG_PCI_IOV
2143 /* Get number of subvnics */
2144 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2146 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2148 if (enic->num_vfs) {
2149 err = pci_enable_sriov(pdev, enic->num_vfs);
2151 dev_err(dev, "SRIOV enable failed, aborting."
2152 " pci_enable_sriov() returned %d\n",
2154 goto err_out_vnic_unregister;
2156 enic->priv_flags |= ENIC_SRIOV_ENABLED;
2157 num_pps = enic->num_vfs;
2162 /* Allocate structure for port profiles */
2163 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2166 goto err_out_disable_sriov_pp;
2169 /* Issue device open to get device in known state
2172 err = enic_dev_open(enic);
2174 dev_err(dev, "vNIC dev open failed, aborting\n");
2175 goto err_out_disable_sriov;
2178 /* Setup devcmd lock
2181 spin_lock_init(&enic->devcmd_lock);
2182 spin_lock_init(&enic->enic_api_lock);
2185 * Set ingress vlan rewrite mode before vnic initialization
2188 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2191 "Failed to set ingress vlan rewrite mode, aborting.\n");
2192 goto err_out_dev_close;
2195 /* Issue device init to initialize the vnic-to-switch link.
2196 * We'll start with carrier off and wait for link UP
2197 * notification later to turn on carrier. We don't need
2198 * to wait here for the vnic-to-switch link initialization
2199 * to complete; link UP notification is the indication that
2200 * the process is complete.
2203 netif_carrier_off(netdev);
2205 /* Do not call dev_init for a dynamic vnic.
2206 * For a dynamic vnic, init_prov_info will be
2207 * called later by an upper layer.
2210 if (!enic_is_dynamic(enic)) {
2211 err = vnic_dev_init(enic->vdev, 0);
2213 dev_err(dev, "vNIC dev init failed, aborting\n");
2214 goto err_out_dev_close;
2218 err = enic_dev_init(enic);
2220 dev_err(dev, "Device initialization failed, aborting\n");
2221 goto err_out_dev_close;
2224 netif_set_real_num_tx_queues(netdev, enic->wq_count);
2225 netif_set_real_num_rx_queues(netdev, enic->rq_count);
2227 /* Setup notification timer, HW reset task, and wq locks
2230 init_timer(&enic->notify_timer);
2231 enic->notify_timer.function = enic_notify_timer;
2232 enic->notify_timer.data = (unsigned long)enic;
2234 INIT_WORK(&enic->reset, enic_reset);
2235 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2237 for (i = 0; i < enic->wq_count; i++)
2238 spin_lock_init(&enic->wq_lock[i]);
2240 /* Register net device
2243 enic->port_mtu = enic->config.mtu;
2244 (void)enic_change_mtu(netdev, enic->port_mtu);
2246 err = enic_set_mac_addr(netdev, enic->mac_addr);
2248 dev_err(dev, "Invalid MAC address, aborting\n");
2249 goto err_out_dev_deinit;
2252 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2253 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2255 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2256 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2258 netdev->netdev_ops = &enic_netdev_ops;
2260 netdev->watchdog_timeo = 2 * HZ;
2261 enic_set_ethtool_ops(netdev);
2263 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2264 if (ENIC_SETTING(enic, LOOP)) {
2265 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2266 enic->loop_enable = 1;
2267 enic->loop_tag = enic->config.loop_tag;
2268 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2270 if (ENIC_SETTING(enic, TXCSUM))
2271 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2272 if (ENIC_SETTING(enic, TSO))
2273 netdev->hw_features |= NETIF_F_TSO |
2274 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2275 if (ENIC_SETTING(enic, RSS))
2276 netdev->hw_features |= NETIF_F_RXHASH;
2277 if (ENIC_SETTING(enic, RXCSUM))
2278 netdev->hw_features |= NETIF_F_RXCSUM;
2280 netdev->features |= netdev->hw_features;
2283 netdev->features |= NETIF_F_HIGHDMA;
2285 netdev->priv_flags |= IFF_UNICAST_FLT;
2287 err = register_netdev(netdev);
2289 dev_err(dev, "Cannot register net device, aborting\n");
2290 goto err_out_dev_deinit;
2296 enic_dev_deinit(enic);
2298 vnic_dev_close(enic->vdev);
2299 err_out_disable_sriov:
2301 err_out_disable_sriov_pp:
2302 #ifdef CONFIG_PCI_IOV
2303 if (enic_sriov_enabled(enic)) {
2304 pci_disable_sriov(pdev);
2305 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2307 err_out_vnic_unregister:
2309 vnic_dev_unregister(enic->vdev);
2312 err_out_release_regions:
2313 pci_release_regions(pdev);
2314 err_out_disable_device:
2315 pci_disable_device(pdev);
2316 err_out_free_netdev:
2317 free_netdev(netdev);
2322 static void enic_remove(struct pci_dev *pdev)
2324 struct net_device *netdev = pci_get_drvdata(pdev);
2327 struct enic *enic = netdev_priv(netdev);
2329 cancel_work_sync(&enic->reset);
2330 cancel_work_sync(&enic->change_mtu_work);
2331 unregister_netdev(netdev);
2332 enic_dev_deinit(enic);
2333 vnic_dev_close(enic->vdev);
2334 #ifdef CONFIG_PCI_IOV
2335 if (enic_sriov_enabled(enic)) {
2336 pci_disable_sriov(pdev);
2337 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2341 vnic_dev_unregister(enic->vdev);
2343 pci_release_regions(pdev);
2344 pci_disable_device(pdev);
2345 free_netdev(netdev);
2349 static struct pci_driver enic_driver = {
2351 .id_table = enic_id_table,
2352 .probe = enic_probe,
2353 .remove = enic_remove,
2356 static int __init enic_init_module(void)
2358 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2360 return pci_register_driver(&enic_driver);
2363 static void __exit enic_cleanup_module(void)
2365 pci_unregister_driver(&enic_driver);
2368 module_init(enic_init_module);
2369 module_exit(enic_cleanup_module);