2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 #include <linux/jhash.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
54 #include <linux/pci.h>
56 #define DRV_VERSION "1.0.0"
58 const char ipoib_driver_version[] = DRV_VERSION;
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_VERSION(DRV_VERSION);
65 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
66 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
68 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
69 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
70 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
71 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
73 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
74 int ipoib_debug_level;
76 module_param_named(debug_level, ipoib_debug_level, int, 0644);
77 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
80 struct ipoib_path_iter {
81 struct net_device *dev;
82 struct ipoib_path path;
85 static const u8 ipv4_bcast_addr[] = {
86 0x00, 0xff, 0xff, 0xff,
87 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
88 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
91 struct workqueue_struct *ipoib_workqueue;
93 struct ib_sa_client ipoib_sa_client;
95 static void ipoib_add_one(struct ib_device *device);
96 static void ipoib_remove_one(struct ib_device *device, void *client_data);
97 static void ipoib_neigh_reclaim(struct rcu_head *rp);
98 static struct net_device *ipoib_get_net_dev_by_params(
99 struct ib_device *dev, u8 port, u16 pkey,
100 const union ib_gid *gid, const struct sockaddr *addr,
103 static struct ib_client ipoib_client = {
105 .add = ipoib_add_one,
106 .remove = ipoib_remove_one,
107 .get_net_dev_by_params = ipoib_get_net_dev_by_params,
110 int ipoib_open(struct net_device *dev)
112 struct ipoib_dev_priv *priv = netdev_priv(dev);
114 ipoib_dbg(priv, "bringing up interface\n");
116 netif_carrier_off(dev);
118 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
120 priv->sm_fullmember_sendonly_support = false;
122 if (ipoib_ib_dev_open(dev)) {
123 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
128 if (ipoib_ib_dev_up(dev))
131 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
132 struct ipoib_dev_priv *cpriv;
134 /* Bring up any child interfaces too */
135 down_read(&priv->vlan_rwsem);
136 list_for_each_entry(cpriv, &priv->child_intfs, list) {
139 flags = cpriv->dev->flags;
143 dev_change_flags(cpriv->dev, flags | IFF_UP);
145 up_read(&priv->vlan_rwsem);
148 netif_start_queue(dev);
153 ipoib_ib_dev_stop(dev);
156 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
161 static int ipoib_stop(struct net_device *dev)
163 struct ipoib_dev_priv *priv = netdev_priv(dev);
165 ipoib_dbg(priv, "stopping interface\n");
167 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
169 netif_stop_queue(dev);
171 ipoib_ib_dev_down(dev);
172 ipoib_ib_dev_stop(dev);
174 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
175 struct ipoib_dev_priv *cpriv;
177 /* Bring down any child interfaces too */
178 down_read(&priv->vlan_rwsem);
179 list_for_each_entry(cpriv, &priv->child_intfs, list) {
182 flags = cpriv->dev->flags;
183 if (!(flags & IFF_UP))
186 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
188 up_read(&priv->vlan_rwsem);
194 static void ipoib_uninit(struct net_device *dev)
196 ipoib_dev_cleanup(dev);
199 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
201 struct ipoib_dev_priv *priv = netdev_priv(dev);
203 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
204 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
209 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
211 struct ipoib_dev_priv *priv = netdev_priv(dev);
213 /* dev->mtu > 2K ==> connected mode */
214 if (ipoib_cm_admin_enabled(dev)) {
215 if (new_mtu > ipoib_cm_max_mtu(dev))
218 if (new_mtu > priv->mcast_mtu)
219 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
226 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
229 priv->admin_mtu = new_mtu;
231 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
236 /* Called with an RCU read lock taken */
237 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
238 struct net_device *dev)
240 struct net *net = dev_net(dev);
241 struct in_device *in_dev;
242 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
243 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
246 switch (addr->sa_family) {
248 in_dev = in_dev_get(dev);
252 ret_addr = inet_confirm_addr(net, in_dev, 0,
253 addr_in->sin_addr.s_addr,
261 if (IS_ENABLED(CONFIG_IPV6) &&
262 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
271 * Find the master net_device on top of the given net_device.
272 * @dev: base IPoIB net_device
274 * Returns the master net_device with a reference held, or the same net_device
275 * if no master exists.
277 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
279 struct net_device *master;
282 master = netdev_master_upper_dev_get_rcu(dev);
295 * Find a net_device matching the given address, which is an upper device of
296 * the given net_device.
297 * @addr: IP address to look for.
298 * @dev: base IPoIB net_device
300 * If found, returns the net_device with a reference held. Otherwise return
303 static struct net_device *ipoib_get_net_dev_match_addr(
304 const struct sockaddr *addr, struct net_device *dev)
306 struct net_device *upper,
308 struct list_head *iter;
311 if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
317 netdev_for_each_all_upper_dev_rcu(dev, upper, iter) {
318 if (ipoib_is_dev_match_addr_rcu(addr, upper)) {
329 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
330 * pkey_index and address, if one exists.
332 * @found_net_dev: contains a matching net_device if the return value >= 1,
333 * with a reference held. */
334 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
335 const union ib_gid *gid,
337 const struct sockaddr *addr,
339 struct net_device **found_net_dev)
341 struct ipoib_dev_priv *child_priv;
342 struct net_device *net_dev = NULL;
345 if (priv->pkey_index == pkey_index &&
346 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
348 net_dev = ipoib_get_master_net_dev(priv->dev);
350 /* Verify the net_device matches the IP address, as
351 * IPoIB child devices currently share a GID. */
352 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
356 *found_net_dev = net_dev;
363 /* Check child interfaces */
364 down_read_nested(&priv->vlan_rwsem, nesting);
365 list_for_each_entry(child_priv, &priv->child_intfs, list) {
366 matches += ipoib_match_gid_pkey_addr(child_priv, gid,
373 up_read(&priv->vlan_rwsem);
378 /* Returns the number of matching net_devs found (between 0 and 2). Also
379 * return the matching net_device in the @net_dev parameter, holding a
380 * reference to the net_device, if the number of matches >= 1 */
381 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
383 const union ib_gid *gid,
384 const struct sockaddr *addr,
385 struct net_device **net_dev)
387 struct ipoib_dev_priv *priv;
392 list_for_each_entry(priv, dev_list, list) {
393 if (priv->port != port)
396 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
405 static struct net_device *ipoib_get_net_dev_by_params(
406 struct ib_device *dev, u8 port, u16 pkey,
407 const union ib_gid *gid, const struct sockaddr *addr,
410 struct net_device *net_dev;
411 struct list_head *dev_list = client_data;
416 if (!rdma_protocol_ib(dev, port))
419 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
426 /* See if we can find a unique device matching the L2 parameters */
427 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
428 gid, NULL, &net_dev);
439 /* Couldn't find a unique device with L2 parameters only. Use L3
440 * address to uniquely match the net device */
441 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
442 gid, addr, &net_dev);
447 dev_warn_ratelimited(&dev->dev,
448 "duplicate IP address detected\n");
455 int ipoib_set_mode(struct net_device *dev, const char *buf)
457 struct ipoib_dev_priv *priv = netdev_priv(dev);
459 /* flush paths if we switch modes so that connections are restarted */
460 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
461 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
462 ipoib_warn(priv, "enabling connected mode "
463 "will cause multicast packet drops\n");
464 netdev_update_features(dev);
465 dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
467 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
469 ipoib_flush_paths(dev);
474 if (!strcmp(buf, "datagram\n")) {
475 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
476 netdev_update_features(dev);
477 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
479 ipoib_flush_paths(dev);
487 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
489 struct ipoib_dev_priv *priv = netdev_priv(dev);
490 struct rb_node *n = priv->path_tree.rb_node;
491 struct ipoib_path *path;
495 path = rb_entry(n, struct ipoib_path, rb_node);
497 ret = memcmp(gid, path->pathrec.dgid.raw,
498 sizeof (union ib_gid));
511 static int __path_add(struct net_device *dev, struct ipoib_path *path)
513 struct ipoib_dev_priv *priv = netdev_priv(dev);
514 struct rb_node **n = &priv->path_tree.rb_node;
515 struct rb_node *pn = NULL;
516 struct ipoib_path *tpath;
521 tpath = rb_entry(pn, struct ipoib_path, rb_node);
523 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
524 sizeof (union ib_gid));
533 rb_link_node(&path->rb_node, pn, n);
534 rb_insert_color(&path->rb_node, &priv->path_tree);
536 list_add_tail(&path->list, &priv->path_list);
541 static void path_free(struct net_device *dev, struct ipoib_path *path)
545 while ((skb = __skb_dequeue(&path->queue)))
546 dev_kfree_skb_irq(skb);
548 ipoib_dbg(netdev_priv(dev), "path_free\n");
550 /* remove all neigh connected to this path */
551 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
554 ipoib_put_ah(path->ah);
559 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
561 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
563 struct ipoib_path_iter *iter;
565 iter = kmalloc(sizeof *iter, GFP_KERNEL);
570 memset(iter->path.pathrec.dgid.raw, 0, 16);
572 if (ipoib_path_iter_next(iter)) {
580 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
582 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
584 struct ipoib_path *path;
587 spin_lock_irq(&priv->lock);
589 n = rb_first(&priv->path_tree);
592 path = rb_entry(n, struct ipoib_path, rb_node);
594 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
595 sizeof (union ib_gid)) < 0) {
604 spin_unlock_irq(&priv->lock);
609 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
610 struct ipoib_path *path)
615 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
617 void ipoib_mark_paths_invalid(struct net_device *dev)
619 struct ipoib_dev_priv *priv = netdev_priv(dev);
620 struct ipoib_path *path, *tp;
622 spin_lock_irq(&priv->lock);
624 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
625 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
626 be16_to_cpu(path->pathrec.dlid),
627 path->pathrec.dgid.raw);
631 spin_unlock_irq(&priv->lock);
634 struct classport_info_context {
635 struct ipoib_dev_priv *priv;
636 struct completion done;
637 struct ib_sa_query *sa_query;
640 static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
643 struct classport_info_context *cb_ctx = context;
644 struct ipoib_dev_priv *priv;
650 if (status || !rec) {
651 pr_debug("device: %s failed query classport_info status: %d\n",
652 priv->dev->name, status);
653 /* keeps the default, will try next mcast_restart */
654 priv->sm_fullmember_sendonly_support = false;
658 if (ib_get_cpi_capmask2(rec) &
659 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
660 pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
662 priv->sm_fullmember_sendonly_support = true;
664 pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
666 priv->sm_fullmember_sendonly_support = false;
670 complete(&cb_ctx->done);
673 int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
675 struct classport_info_context *callback_context;
678 callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
679 if (!callback_context)
682 callback_context->priv = priv;
683 init_completion(&callback_context->done);
685 ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
686 priv->ca, priv->port, 3000,
688 classport_info_query_cb,
690 &callback_context->sa_query);
692 pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
693 priv->dev->name, ret);
694 kfree(callback_context);
698 /* waiting for the callback to finish before returnning */
699 wait_for_completion(&callback_context->done);
700 kfree(callback_context);
705 void ipoib_flush_paths(struct net_device *dev)
707 struct ipoib_dev_priv *priv = netdev_priv(dev);
708 struct ipoib_path *path, *tp;
709 LIST_HEAD(remove_list);
712 netif_tx_lock_bh(dev);
713 spin_lock_irqsave(&priv->lock, flags);
715 list_splice_init(&priv->path_list, &remove_list);
717 list_for_each_entry(path, &remove_list, list)
718 rb_erase(&path->rb_node, &priv->path_tree);
720 list_for_each_entry_safe(path, tp, &remove_list, list) {
722 ib_sa_cancel_query(path->query_id, path->query);
723 spin_unlock_irqrestore(&priv->lock, flags);
724 netif_tx_unlock_bh(dev);
725 wait_for_completion(&path->done);
726 path_free(dev, path);
727 netif_tx_lock_bh(dev);
728 spin_lock_irqsave(&priv->lock, flags);
731 spin_unlock_irqrestore(&priv->lock, flags);
732 netif_tx_unlock_bh(dev);
735 static void path_rec_completion(int status,
736 struct ib_sa_path_rec *pathrec,
739 struct ipoib_path *path = path_ptr;
740 struct net_device *dev = path->dev;
741 struct ipoib_dev_priv *priv = netdev_priv(dev);
742 struct ipoib_ah *ah = NULL;
743 struct ipoib_ah *old_ah = NULL;
744 struct ipoib_neigh *neigh, *tn;
745 struct sk_buff_head skqueue;
750 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
751 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
753 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
754 status, path->pathrec.dgid.raw);
756 skb_queue_head_init(&skqueue);
759 struct ib_ah_attr av;
761 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
762 ah = ipoib_create_ah(dev, priv->pd, &av);
765 spin_lock_irqsave(&priv->lock, flags);
767 if (!IS_ERR_OR_NULL(ah)) {
768 path->pathrec = *pathrec;
773 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
774 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
776 while ((skb = __skb_dequeue(&path->queue)))
777 __skb_queue_tail(&skqueue, skb);
779 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
781 WARN_ON(neigh->ah != old_ah);
783 * Dropping the ah reference inside
784 * priv->lock is safe here, because we
785 * will hold one more reference from
786 * the original value of path->ah (ie
789 ipoib_put_ah(neigh->ah);
791 kref_get(&path->ah->ref);
792 neigh->ah = path->ah;
794 if (ipoib_cm_enabled(dev, neigh->daddr)) {
795 if (!ipoib_cm_get(neigh))
796 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
799 if (!ipoib_cm_get(neigh)) {
800 ipoib_neigh_free(neigh);
805 while ((skb = __skb_dequeue(&neigh->queue)))
806 __skb_queue_tail(&skqueue, skb);
812 complete(&path->done);
814 spin_unlock_irqrestore(&priv->lock, flags);
816 if (IS_ERR_OR_NULL(ah))
817 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
820 ipoib_put_ah(old_ah);
822 while ((skb = __skb_dequeue(&skqueue))) {
824 if (dev_queue_xmit(skb))
825 ipoib_warn(priv, "dev_queue_xmit failed "
826 "to requeue packet\n");
830 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
832 struct ipoib_dev_priv *priv = netdev_priv(dev);
833 struct ipoib_path *path;
835 if (!priv->broadcast)
838 path = kzalloc(sizeof *path, GFP_ATOMIC);
844 skb_queue_head_init(&path->queue);
846 INIT_LIST_HEAD(&path->neigh_list);
848 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
849 path->pathrec.sgid = priv->local_gid;
850 path->pathrec.pkey = cpu_to_be16(priv->pkey);
851 path->pathrec.numb_path = 1;
852 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
857 static int path_rec_start(struct net_device *dev,
858 struct ipoib_path *path)
860 struct ipoib_dev_priv *priv = netdev_priv(dev);
862 ipoib_dbg(priv, "Start path record lookup for %pI6\n",
863 path->pathrec.dgid.raw);
865 init_completion(&path->done);
868 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
870 IB_SA_PATH_REC_DGID |
871 IB_SA_PATH_REC_SGID |
872 IB_SA_PATH_REC_NUMB_PATH |
873 IB_SA_PATH_REC_TRAFFIC_CLASS |
878 if (path->query_id < 0) {
879 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
881 complete(&path->done);
882 return path->query_id;
888 static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
889 struct net_device *dev)
891 struct ipoib_dev_priv *priv = netdev_priv(dev);
892 struct ipoib_path *path;
893 struct ipoib_neigh *neigh;
896 spin_lock_irqsave(&priv->lock, flags);
897 neigh = ipoib_neigh_alloc(daddr, dev);
899 spin_unlock_irqrestore(&priv->lock, flags);
900 ++dev->stats.tx_dropped;
901 dev_kfree_skb_any(skb);
905 path = __path_find(dev, daddr + 4);
907 path = path_rec_create(dev, daddr + 4);
911 __path_add(dev, path);
914 list_add_tail(&neigh->list, &path->neigh_list);
917 kref_get(&path->ah->ref);
918 neigh->ah = path->ah;
920 if (ipoib_cm_enabled(dev, neigh->daddr)) {
921 if (!ipoib_cm_get(neigh))
922 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
923 if (!ipoib_cm_get(neigh)) {
924 ipoib_neigh_free(neigh);
927 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
928 __skb_queue_tail(&neigh->queue, skb);
930 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
931 skb_queue_len(&neigh->queue));
935 spin_unlock_irqrestore(&priv->lock, flags);
936 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
937 ipoib_neigh_put(neigh);
943 if (!path->query && path_rec_start(dev, path))
945 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
946 __skb_queue_tail(&neigh->queue, skb);
951 spin_unlock_irqrestore(&priv->lock, flags);
952 ipoib_neigh_put(neigh);
956 ipoib_neigh_free(neigh);
958 ++dev->stats.tx_dropped;
959 dev_kfree_skb_any(skb);
961 spin_unlock_irqrestore(&priv->lock, flags);
962 ipoib_neigh_put(neigh);
965 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
968 struct ipoib_dev_priv *priv = netdev_priv(dev);
969 struct ipoib_path *path;
972 spin_lock_irqsave(&priv->lock, flags);
974 path = __path_find(dev, cb->hwaddr + 4);
975 if (!path || !path->valid) {
979 path = path_rec_create(dev, cb->hwaddr + 4);
983 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
984 __skb_queue_tail(&path->queue, skb);
986 ++dev->stats.tx_dropped;
987 dev_kfree_skb_any(skb);
990 if (!path->query && path_rec_start(dev, path)) {
991 spin_unlock_irqrestore(&priv->lock, flags);
993 path_free(dev, path);
996 __path_add(dev, path);
998 ++dev->stats.tx_dropped;
999 dev_kfree_skb_any(skb);
1002 spin_unlock_irqrestore(&priv->lock, flags);
1007 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
1008 be16_to_cpu(path->pathrec.dlid));
1010 spin_unlock_irqrestore(&priv->lock, flags);
1011 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
1013 } else if ((path->query || !path_rec_start(dev, path)) &&
1014 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1015 __skb_queue_tail(&path->queue, skb);
1017 ++dev->stats.tx_dropped;
1018 dev_kfree_skb_any(skb);
1021 spin_unlock_irqrestore(&priv->lock, flags);
1024 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026 struct ipoib_dev_priv *priv = netdev_priv(dev);
1027 struct ipoib_neigh *neigh;
1028 struct ipoib_cb *cb = ipoib_skb_cb(skb);
1029 struct ipoib_header *header;
1030 unsigned long flags;
1032 header = (struct ipoib_header *) skb->data;
1034 if (unlikely(cb->hwaddr[4] == 0xff)) {
1035 /* multicast, arrange "if" according to probability */
1036 if ((header->proto != htons(ETH_P_IP)) &&
1037 (header->proto != htons(ETH_P_IPV6)) &&
1038 (header->proto != htons(ETH_P_ARP)) &&
1039 (header->proto != htons(ETH_P_RARP)) &&
1040 (header->proto != htons(ETH_P_TIPC))) {
1041 /* ethertype not supported by IPoIB */
1042 ++dev->stats.tx_dropped;
1043 dev_kfree_skb_any(skb);
1044 return NETDEV_TX_OK;
1046 /* Add in the P_Key for multicast*/
1047 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1048 cb->hwaddr[9] = priv->pkey & 0xff;
1050 neigh = ipoib_neigh_get(dev, cb->hwaddr);
1052 goto send_using_neigh;
1053 ipoib_mcast_send(dev, cb->hwaddr, skb);
1054 return NETDEV_TX_OK;
1057 /* unicast, arrange "switch" according to probability */
1058 switch (header->proto) {
1059 case htons(ETH_P_IP):
1060 case htons(ETH_P_IPV6):
1061 case htons(ETH_P_TIPC):
1062 neigh = ipoib_neigh_get(dev, cb->hwaddr);
1063 if (unlikely(!neigh)) {
1064 neigh_add_path(skb, cb->hwaddr, dev);
1065 return NETDEV_TX_OK;
1068 case htons(ETH_P_ARP):
1069 case htons(ETH_P_RARP):
1070 /* for unicast ARP and RARP should always perform path find */
1071 unicast_arp_send(skb, dev, cb);
1072 return NETDEV_TX_OK;
1074 /* ethertype not supported by IPoIB */
1075 ++dev->stats.tx_dropped;
1076 dev_kfree_skb_any(skb);
1077 return NETDEV_TX_OK;
1081 /* note we now hold a ref to neigh */
1082 if (ipoib_cm_get(neigh)) {
1083 if (ipoib_cm_up(neigh)) {
1084 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1087 } else if (neigh->ah) {
1088 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
1092 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1093 spin_lock_irqsave(&priv->lock, flags);
1094 __skb_queue_tail(&neigh->queue, skb);
1095 spin_unlock_irqrestore(&priv->lock, flags);
1097 ++dev->stats.tx_dropped;
1098 dev_kfree_skb_any(skb);
1102 ipoib_neigh_put(neigh);
1104 return NETDEV_TX_OK;
1107 static void ipoib_timeout(struct net_device *dev)
1109 struct ipoib_dev_priv *priv = netdev_priv(dev);
1111 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1112 jiffies_to_msecs(jiffies - dev->trans_start));
1113 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1114 netif_queue_stopped(dev),
1115 priv->tx_head, priv->tx_tail);
1116 /* XXX reset QP, etc. */
1119 static int ipoib_hard_header(struct sk_buff *skb,
1120 struct net_device *dev,
1121 unsigned short type,
1122 const void *daddr, const void *saddr, unsigned len)
1124 struct ipoib_header *header;
1125 struct ipoib_cb *cb = ipoib_skb_cb(skb);
1127 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
1129 header->proto = htons(type);
1130 header->reserved = 0;
1133 * we don't rely on dst_entry structure, always stuff the
1134 * destination address into skb->cb so we can figure out where
1135 * to send the packet later.
1137 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
1139 return sizeof *header;
1142 static void ipoib_set_mcast_list(struct net_device *dev)
1144 struct ipoib_dev_priv *priv = netdev_priv(dev);
1146 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1147 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1151 queue_work(priv->wq, &priv->restart_task);
1154 static int ipoib_get_iflink(const struct net_device *dev)
1156 struct ipoib_dev_priv *priv = netdev_priv(dev);
1158 /* parent interface */
1159 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1160 return dev->ifindex;
1162 /* child/vlan interface */
1163 return priv->parent->ifindex;
1166 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1169 * Use only the address parts that contributes to spreading
1170 * The subnet prefix is not used as one can not connect to
1171 * same remote port (GUID) using the same remote QPN via two
1172 * different subnets.
1174 /* qpn octets[1:4) & port GUID octets[12:20) */
1175 u32 *d32 = (u32 *) daddr;
1178 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1179 return hv & htbl->mask;
1182 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1184 struct ipoib_dev_priv *priv = netdev_priv(dev);
1185 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1186 struct ipoib_neigh_hash *htbl;
1187 struct ipoib_neigh *neigh = NULL;
1192 htbl = rcu_dereference_bh(ntbl->htbl);
1197 hash_val = ipoib_addr_hash(htbl, daddr);
1198 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1200 neigh = rcu_dereference_bh(neigh->hnext)) {
1201 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1202 /* found, take one ref on behalf of the caller */
1203 if (!atomic_inc_not_zero(&neigh->refcnt)) {
1208 neigh->alive = jiffies;
1214 rcu_read_unlock_bh();
1218 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1220 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1221 struct ipoib_neigh_hash *htbl;
1222 unsigned long neigh_obsolete;
1224 unsigned long flags;
1226 LIST_HEAD(remove_list);
1228 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1231 spin_lock_irqsave(&priv->lock, flags);
1233 htbl = rcu_dereference_protected(ntbl->htbl,
1234 lockdep_is_held(&priv->lock));
1239 /* neigh is obsolete if it was idle for two GC periods */
1240 dt = 2 * arp_tbl.gc_interval;
1241 neigh_obsolete = jiffies - dt;
1242 /* handle possible race condition */
1243 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1246 for (i = 0; i < htbl->size; i++) {
1247 struct ipoib_neigh *neigh;
1248 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1250 while ((neigh = rcu_dereference_protected(*np,
1251 lockdep_is_held(&priv->lock))) != NULL) {
1252 /* was the neigh idle for two GC periods */
1253 if (time_after(neigh_obsolete, neigh->alive)) {
1255 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1257 rcu_assign_pointer(*np,
1258 rcu_dereference_protected(neigh->hnext,
1259 lockdep_is_held(&priv->lock)));
1260 /* remove from path/mc list */
1261 list_del(&neigh->list);
1262 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1271 spin_unlock_irqrestore(&priv->lock, flags);
1272 ipoib_mcast_remove_list(&remove_list);
1275 static void ipoib_reap_neigh(struct work_struct *work)
1277 struct ipoib_dev_priv *priv =
1278 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1280 __ipoib_reap_neigh(priv);
1282 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1283 queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1284 arp_tbl.gc_interval);
1288 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1289 struct net_device *dev)
1291 struct ipoib_neigh *neigh;
1293 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
1298 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1299 skb_queue_head_init(&neigh->queue);
1300 INIT_LIST_HEAD(&neigh->list);
1301 ipoib_cm_set(neigh, NULL);
1302 /* one ref on behalf of the caller */
1303 atomic_set(&neigh->refcnt, 1);
1308 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1309 struct net_device *dev)
1311 struct ipoib_dev_priv *priv = netdev_priv(dev);
1312 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1313 struct ipoib_neigh_hash *htbl;
1314 struct ipoib_neigh *neigh;
1317 htbl = rcu_dereference_protected(ntbl->htbl,
1318 lockdep_is_held(&priv->lock));
1324 /* need to add a new neigh, but maybe some other thread succeeded?
1325 * recalc hash, maybe hash resize took place so we do a search
1327 hash_val = ipoib_addr_hash(htbl, daddr);
1328 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1329 lockdep_is_held(&priv->lock));
1331 neigh = rcu_dereference_protected(neigh->hnext,
1332 lockdep_is_held(&priv->lock))) {
1333 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1334 /* found, take one ref on behalf of the caller */
1335 if (!atomic_inc_not_zero(&neigh->refcnt)) {
1340 neigh->alive = jiffies;
1345 neigh = ipoib_neigh_ctor(daddr, dev);
1349 /* one ref on behalf of the hash table */
1350 atomic_inc(&neigh->refcnt);
1351 neigh->alive = jiffies;
1353 rcu_assign_pointer(neigh->hnext,
1354 rcu_dereference_protected(htbl->buckets[hash_val],
1355 lockdep_is_held(&priv->lock)));
1356 rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1357 atomic_inc(&ntbl->entries);
1364 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1366 /* neigh reference count was dropprd to zero */
1367 struct net_device *dev = neigh->dev;
1368 struct ipoib_dev_priv *priv = netdev_priv(dev);
1369 struct sk_buff *skb;
1371 ipoib_put_ah(neigh->ah);
1372 while ((skb = __skb_dequeue(&neigh->queue))) {
1373 ++dev->stats.tx_dropped;
1374 dev_kfree_skb_any(skb);
1376 if (ipoib_cm_get(neigh))
1377 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1378 ipoib_dbg(netdev_priv(dev),
1379 "neigh free for %06x %pI6\n",
1380 IPOIB_QPN(neigh->daddr),
1383 if (atomic_dec_and_test(&priv->ntbl.entries)) {
1384 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1385 complete(&priv->ntbl.flushed);
1389 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1391 /* Called as a result of removal from hash table */
1392 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1393 /* note TX context may hold another ref */
1394 ipoib_neigh_put(neigh);
1397 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1399 struct net_device *dev = neigh->dev;
1400 struct ipoib_dev_priv *priv = netdev_priv(dev);
1401 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1402 struct ipoib_neigh_hash *htbl;
1403 struct ipoib_neigh __rcu **np;
1404 struct ipoib_neigh *n;
1407 htbl = rcu_dereference_protected(ntbl->htbl,
1408 lockdep_is_held(&priv->lock));
1412 hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1413 np = &htbl->buckets[hash_val];
1414 for (n = rcu_dereference_protected(*np,
1415 lockdep_is_held(&priv->lock));
1417 n = rcu_dereference_protected(*np,
1418 lockdep_is_held(&priv->lock))) {
1421 rcu_assign_pointer(*np,
1422 rcu_dereference_protected(neigh->hnext,
1423 lockdep_is_held(&priv->lock)));
1424 /* remove from parent list */
1425 list_del(&neigh->list);
1426 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1434 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1436 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1437 struct ipoib_neigh_hash *htbl;
1438 struct ipoib_neigh __rcu **buckets;
1441 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1443 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1446 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1447 size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1448 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1454 htbl->mask = (size - 1);
1455 htbl->buckets = buckets;
1456 RCU_INIT_POINTER(ntbl->htbl, htbl);
1458 atomic_set(&ntbl->entries, 0);
1460 /* start garbage collection */
1461 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1462 queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1463 arp_tbl.gc_interval);
1468 static void neigh_hash_free_rcu(struct rcu_head *head)
1470 struct ipoib_neigh_hash *htbl = container_of(head,
1471 struct ipoib_neigh_hash,
1473 struct ipoib_neigh __rcu **buckets = htbl->buckets;
1474 struct ipoib_neigh_table *ntbl = htbl->ntbl;
1478 complete(&ntbl->deleted);
1481 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1483 struct ipoib_dev_priv *priv = netdev_priv(dev);
1484 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1485 struct ipoib_neigh_hash *htbl;
1486 unsigned long flags;
1489 /* remove all neigh connected to a given path or mcast */
1490 spin_lock_irqsave(&priv->lock, flags);
1492 htbl = rcu_dereference_protected(ntbl->htbl,
1493 lockdep_is_held(&priv->lock));
1498 for (i = 0; i < htbl->size; i++) {
1499 struct ipoib_neigh *neigh;
1500 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1502 while ((neigh = rcu_dereference_protected(*np,
1503 lockdep_is_held(&priv->lock))) != NULL) {
1504 /* delete neighs belong to this parent */
1505 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1506 rcu_assign_pointer(*np,
1507 rcu_dereference_protected(neigh->hnext,
1508 lockdep_is_held(&priv->lock)));
1509 /* remove from parent list */
1510 list_del(&neigh->list);
1511 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1519 spin_unlock_irqrestore(&priv->lock, flags);
1522 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1524 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1525 struct ipoib_neigh_hash *htbl;
1526 unsigned long flags;
1527 int i, wait_flushed = 0;
1529 init_completion(&priv->ntbl.flushed);
1531 spin_lock_irqsave(&priv->lock, flags);
1533 htbl = rcu_dereference_protected(ntbl->htbl,
1534 lockdep_is_held(&priv->lock));
1538 wait_flushed = atomic_read(&priv->ntbl.entries);
1542 for (i = 0; i < htbl->size; i++) {
1543 struct ipoib_neigh *neigh;
1544 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1546 while ((neigh = rcu_dereference_protected(*np,
1547 lockdep_is_held(&priv->lock))) != NULL) {
1548 rcu_assign_pointer(*np,
1549 rcu_dereference_protected(neigh->hnext,
1550 lockdep_is_held(&priv->lock)));
1551 /* remove from path/mc list */
1552 list_del(&neigh->list);
1553 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1558 rcu_assign_pointer(ntbl->htbl, NULL);
1559 call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1562 spin_unlock_irqrestore(&priv->lock, flags);
1564 wait_for_completion(&priv->ntbl.flushed);
1567 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1569 struct ipoib_dev_priv *priv = netdev_priv(dev);
1572 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1573 init_completion(&priv->ntbl.deleted);
1574 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1576 /* Stop GC if called at init fail need to cancel work */
1577 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1579 cancel_delayed_work(&priv->neigh_reap_task);
1581 ipoib_flush_neighs(priv);
1583 wait_for_completion(&priv->ntbl.deleted);
1587 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1589 struct ipoib_dev_priv *priv = netdev_priv(dev);
1591 /* Allocate RX/TX "rings" to hold queued skbs */
1592 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1594 if (!priv->rx_ring) {
1595 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1596 ca->name, ipoib_recvq_size);
1600 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1601 if (!priv->tx_ring) {
1602 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
1603 ca->name, ipoib_sendq_size);
1604 goto out_rx_ring_cleanup;
1607 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1609 if (ipoib_ib_dev_init(dev, ca, port))
1610 goto out_tx_ring_cleanup;
1613 * Must be after ipoib_ib_dev_init so we can allocate a per
1614 * device wq there and use it here
1616 if (ipoib_neigh_hash_init(priv) < 0)
1617 goto out_dev_uninit;
1622 ipoib_ib_dev_cleanup(dev);
1624 out_tx_ring_cleanup:
1625 vfree(priv->tx_ring);
1627 out_rx_ring_cleanup:
1628 kfree(priv->rx_ring);
1634 void ipoib_dev_cleanup(struct net_device *dev)
1636 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1641 ipoib_delete_debug_files(dev);
1643 /* Delete any child interfaces first */
1644 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1645 /* Stop GC on child */
1646 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1647 cancel_delayed_work(&cpriv->neigh_reap_task);
1648 unregister_netdevice_queue(cpriv->dev, &head);
1650 unregister_netdevice_many(&head);
1653 * Must be before ipoib_ib_dev_cleanup or we delete an in use
1656 ipoib_neigh_hash_uninit(dev);
1658 ipoib_ib_dev_cleanup(dev);
1660 kfree(priv->rx_ring);
1661 vfree(priv->tx_ring);
1663 priv->rx_ring = NULL;
1664 priv->tx_ring = NULL;
1667 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
1669 struct ipoib_dev_priv *priv = netdev_priv(dev);
1671 return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
1674 static int ipoib_get_vf_config(struct net_device *dev, int vf,
1675 struct ifla_vf_info *ivf)
1677 struct ipoib_dev_priv *priv = netdev_priv(dev);
1680 err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
1689 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
1691 struct ipoib_dev_priv *priv = netdev_priv(dev);
1693 if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
1696 return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
1699 static int ipoib_get_vf_stats(struct net_device *dev, int vf,
1700 struct ifla_vf_stats *vf_stats)
1702 struct ipoib_dev_priv *priv = netdev_priv(dev);
1704 return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
1707 static const struct header_ops ipoib_header_ops = {
1708 .create = ipoib_hard_header,
1711 static const struct net_device_ops ipoib_netdev_ops_pf = {
1712 .ndo_uninit = ipoib_uninit,
1713 .ndo_open = ipoib_open,
1714 .ndo_stop = ipoib_stop,
1715 .ndo_change_mtu = ipoib_change_mtu,
1716 .ndo_fix_features = ipoib_fix_features,
1717 .ndo_start_xmit = ipoib_start_xmit,
1718 .ndo_tx_timeout = ipoib_timeout,
1719 .ndo_set_rx_mode = ipoib_set_mcast_list,
1720 .ndo_get_iflink = ipoib_get_iflink,
1721 .ndo_set_vf_link_state = ipoib_set_vf_link_state,
1722 .ndo_get_vf_config = ipoib_get_vf_config,
1723 .ndo_get_vf_stats = ipoib_get_vf_stats,
1724 .ndo_set_vf_guid = ipoib_set_vf_guid,
1727 static const struct net_device_ops ipoib_netdev_ops_vf = {
1728 .ndo_uninit = ipoib_uninit,
1729 .ndo_open = ipoib_open,
1730 .ndo_stop = ipoib_stop,
1731 .ndo_change_mtu = ipoib_change_mtu,
1732 .ndo_fix_features = ipoib_fix_features,
1733 .ndo_start_xmit = ipoib_start_xmit,
1734 .ndo_tx_timeout = ipoib_timeout,
1735 .ndo_set_rx_mode = ipoib_set_mcast_list,
1736 .ndo_get_iflink = ipoib_get_iflink,
1739 void ipoib_setup(struct net_device *dev)
1741 struct ipoib_dev_priv *priv = netdev_priv(dev);
1743 if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
1744 dev->netdev_ops = &ipoib_netdev_ops_vf;
1746 dev->netdev_ops = &ipoib_netdev_ops_pf;
1748 dev->header_ops = &ipoib_header_ops;
1750 ipoib_set_ethtool_ops(dev);
1752 netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
1754 dev->watchdog_timeo = HZ;
1756 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1758 dev->hard_header_len = IPOIB_ENCAP_LEN;
1759 dev->addr_len = INFINIBAND_ALEN;
1760 dev->type = ARPHRD_INFINIBAND;
1761 dev->tx_queue_len = ipoib_sendq_size * 2;
1762 dev->features = (NETIF_F_VLAN_CHALLENGED |
1764 netif_keep_dst(dev);
1766 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1770 spin_lock_init(&priv->lock);
1772 init_rwsem(&priv->vlan_rwsem);
1774 INIT_LIST_HEAD(&priv->path_list);
1775 INIT_LIST_HEAD(&priv->child_intfs);
1776 INIT_LIST_HEAD(&priv->dead_ahs);
1777 INIT_LIST_HEAD(&priv->multicast_list);
1779 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1780 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1781 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1782 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1783 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
1784 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1785 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1786 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1789 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1791 struct net_device *dev;
1793 dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
1794 NET_NAME_UNKNOWN, ipoib_setup);
1798 return netdev_priv(dev);
1801 static ssize_t show_pkey(struct device *dev,
1802 struct device_attribute *attr, char *buf)
1804 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1806 return sprintf(buf, "0x%04x\n", priv->pkey);
1808 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1810 static ssize_t show_umcast(struct device *dev,
1811 struct device_attribute *attr, char *buf)
1813 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1815 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1818 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1820 struct ipoib_dev_priv *priv = netdev_priv(ndev);
1822 if (umcast_val > 0) {
1823 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1824 ipoib_warn(priv, "ignoring multicast groups joined directly "
1827 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1830 static ssize_t set_umcast(struct device *dev,
1831 struct device_attribute *attr,
1832 const char *buf, size_t count)
1834 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1836 ipoib_set_umcast(to_net_dev(dev), umcast_val);
1840 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1842 int ipoib_add_umcast_attr(struct net_device *dev)
1844 return device_create_file(&dev->dev, &dev_attr_umcast);
1847 static ssize_t create_child(struct device *dev,
1848 struct device_attribute *attr,
1849 const char *buf, size_t count)
1854 if (sscanf(buf, "%i", &pkey) != 1)
1857 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
1861 * Set the full membership bit, so that we join the right
1862 * broadcast group, etc.
1866 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1868 return ret ? ret : count;
1870 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1872 static ssize_t delete_child(struct device *dev,
1873 struct device_attribute *attr,
1874 const char *buf, size_t count)
1879 if (sscanf(buf, "%i", &pkey) != 1)
1882 if (pkey < 0 || pkey > 0xffff)
1885 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1887 return ret ? ret : count;
1890 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1892 int ipoib_add_pkey_attr(struct net_device *dev)
1894 return device_create_file(&dev->dev, &dev_attr_pkey);
1897 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1899 priv->hca_caps = hca->attrs.device_cap_flags;
1901 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1902 priv->dev->hw_features = NETIF_F_SG |
1903 NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1905 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1906 priv->dev->hw_features |= NETIF_F_TSO;
1908 priv->dev->features |= priv->dev->hw_features;
1914 static struct net_device *ipoib_add_port(const char *format,
1915 struct ib_device *hca, u8 port)
1917 struct ipoib_dev_priv *priv;
1918 struct ib_port_attr attr;
1919 int result = -ENOMEM;
1921 priv = ipoib_intf_alloc(format);
1923 goto alloc_mem_failed;
1925 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1926 priv->dev->dev_id = port - 1;
1928 result = ib_query_port(hca, port, &attr);
1930 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1932 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1934 goto device_init_failed;
1937 /* MTU will be reset when mcast join happens */
1938 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1939 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1941 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
1943 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1945 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1946 hca->name, port, result);
1947 goto device_init_failed;
1950 result = ipoib_set_dev_features(priv, hca);
1952 goto device_init_failed;
1955 * Set the full membership bit, so that we join the right
1956 * broadcast group, etc.
1958 priv->pkey |= 0x8000;
1960 priv->dev->broadcast[8] = priv->pkey >> 8;
1961 priv->dev->broadcast[9] = priv->pkey & 0xff;
1963 result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
1965 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1966 hca->name, port, result);
1967 goto device_init_failed;
1969 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1971 result = ipoib_dev_init(priv->dev, hca, port);
1973 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1974 hca->name, port, result);
1975 goto device_init_failed;
1978 INIT_IB_EVENT_HANDLER(&priv->event_handler,
1979 priv->ca, ipoib_event);
1980 result = ib_register_event_handler(&priv->event_handler);
1982 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1983 "port %d (ret = %d)\n",
1984 hca->name, port, result);
1988 result = register_netdev(priv->dev);
1990 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1991 hca->name, port, result);
1992 goto register_failed;
1995 ipoib_create_debug_files(priv->dev);
1997 if (ipoib_cm_add_mode_attr(priv->dev))
1999 if (ipoib_add_pkey_attr(priv->dev))
2001 if (ipoib_add_umcast_attr(priv->dev))
2003 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
2005 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
2011 ipoib_delete_debug_files(priv->dev);
2012 unregister_netdev(priv->dev);
2015 ib_unregister_event_handler(&priv->event_handler);
2016 flush_workqueue(ipoib_workqueue);
2017 /* Stop GC if started before flush */
2018 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2019 cancel_delayed_work(&priv->neigh_reap_task);
2020 flush_workqueue(priv->wq);
2023 ipoib_dev_cleanup(priv->dev);
2026 free_netdev(priv->dev);
2029 return ERR_PTR(result);
2032 static void ipoib_add_one(struct ib_device *device)
2034 struct list_head *dev_list;
2035 struct net_device *dev;
2036 struct ipoib_dev_priv *priv;
2040 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
2044 INIT_LIST_HEAD(dev_list);
2046 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
2047 if (!rdma_protocol_ib(device, p))
2049 dev = ipoib_add_port("ib%d", device, p);
2051 priv = netdev_priv(dev);
2052 list_add_tail(&priv->list, dev_list);
2062 ib_set_client_data(device, &ipoib_client, dev_list);
2065 static void ipoib_remove_one(struct ib_device *device, void *client_data)
2067 struct ipoib_dev_priv *priv, *tmp;
2068 struct list_head *dev_list = client_data;
2073 list_for_each_entry_safe(priv, tmp, dev_list, list) {
2074 ib_unregister_event_handler(&priv->event_handler);
2075 flush_workqueue(ipoib_workqueue);
2078 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
2082 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2083 cancel_delayed_work(&priv->neigh_reap_task);
2084 flush_workqueue(priv->wq);
2086 unregister_netdev(priv->dev);
2087 free_netdev(priv->dev);
2093 static int __init ipoib_init_module(void)
2097 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2098 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2099 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2101 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2102 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2103 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2104 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2105 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2109 * When copying small received packets, we only copy from the
2110 * linear data part of the SKB, so we rely on this condition.
2112 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2114 ret = ipoib_register_debugfs();
2119 * We create a global workqueue here that is used for all flush
2120 * operations. However, if you attempt to flush a workqueue
2121 * from a task on that same workqueue, it deadlocks the system.
2122 * We want to be able to flush the tasks associated with a
2123 * specific net device, so we also create a workqueue for each
2124 * netdevice. We queue up the tasks for that device only on
2125 * its private workqueue, and we only queue up flush events
2126 * on our global flush workqueue. This avoids the deadlocks.
2128 ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
2129 if (!ipoib_workqueue) {
2134 ib_sa_register_client(&ipoib_sa_client);
2136 ret = ib_register_client(&ipoib_client);
2140 ret = ipoib_netlink_init();
2147 ib_unregister_client(&ipoib_client);
2150 ib_sa_unregister_client(&ipoib_sa_client);
2151 destroy_workqueue(ipoib_workqueue);
2154 ipoib_unregister_debugfs();
2159 static void __exit ipoib_cleanup_module(void)
2161 ipoib_netlink_fini();
2162 ib_unregister_client(&ipoib_client);
2163 ib_sa_unregister_client(&ipoib_sa_client);
2164 ipoib_unregister_debugfs();
2165 destroy_workqueue(ipoib_workqueue);
2168 module_init(ipoib_init_module);
2169 module_exit(ipoib_cleanup_module);