2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/jhash.h>
40 #include <linux/module.h>
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <net/neighbour.h>
49 #include "t4_values.h"
51 #define VLAN_NONE 0xfff
53 /* identifies sync vs async L2T_WRITE_REQs */
55 #define SYNC_WR_V(x) ((x) << SYNC_WR_S)
56 #define SYNC_WR_F SYNC_WR_V(1)
59 unsigned int l2t_start; /* start index of our piece of the L2T */
60 unsigned int l2t_size; /* number of entries in l2tab */
62 atomic_t nfree; /* number of free entries */
63 struct l2t_entry *rover; /* starting point for next allocation */
64 struct l2t_entry l2tab[0]; /* MUST BE LAST */
67 static inline unsigned int vlan_prio(const struct l2t_entry *e)
69 return e->vlan >> VLAN_PRIO_SHIFT;
72 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
74 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
75 atomic_dec(&d->nfree);
79 * To avoid having to check address families we do not allow v4 and v6
80 * neighbors to be on the same hash chain. We keep v4 entries in the first
81 * half of available hash buckets and v6 in the second. We need at least two
82 * entries in our L2T for this scheme to work.
85 L2T_MIN_HASH_BUCKETS = 2,
88 static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
91 unsigned int l2t_size_half = d->l2t_size / 2;
93 return jhash_2words(*key, ifindex, 0) % l2t_size_half;
96 static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
99 unsigned int l2t_size_half = d->l2t_size / 2;
100 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
102 return (l2t_size_half +
103 (jhash_2words(xor, ifindex, 0) % l2t_size_half));
106 static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
107 int addr_len, int ifindex)
109 return addr_len == 4 ? arp_hash(d, addr, ifindex) :
110 ipv6_hash(d, addr, ifindex);
114 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
115 * whether the L2T entry and the address are of the same address family.
116 * Callers ensure an address is only checked against L2T entries of the same
117 * family, something made trivial by the separation of IP and IPv6 hash chains
118 * mentioned above. Returns 0 if there's a match,
120 static int addreq(const struct l2t_entry *e, const u32 *addr)
123 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
124 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
125 return e->addr[0] ^ addr[0];
128 static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
132 neigh_release(e->neigh);
137 * Write an L2T entry. Must be called with the entry locked.
138 * The write may be synchronous or asynchronous.
140 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
142 struct l2t_data *d = adap->l2t;
143 unsigned int l2t_idx = e->idx + d->l2t_start;
145 struct cpl_l2t_write_req *req;
147 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
151 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
154 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
155 l2t_idx | (sync ? SYNC_WR_F : 0) |
156 TID_QID_V(adap->sge.fw_evtq.abs_id)));
157 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
158 req->l2t_idx = htons(l2t_idx);
159 req->vlan = htons(e->vlan);
160 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
161 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
162 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
164 t4_mgmt_tx(adap, skb);
166 if (sync && e->state != L2T_STATE_SWITCHING)
167 e->state = L2T_STATE_SYNC_WRITE;
172 * Send packets waiting in an L2T entry's ARP queue. Must be called with the
175 static void send_pending(struct adapter *adap, struct l2t_entry *e)
179 while ((skb = __skb_dequeue(&e->arpq)) != NULL)
180 t4_ofld_send(adap, skb);
184 * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
185 * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
186 * index it refers to.
188 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
190 struct l2t_data *d = adap->l2t;
191 unsigned int tid = GET_TID(rpl);
192 unsigned int l2t_idx = tid % L2T_SIZE;
194 if (unlikely(rpl->status != CPL_ERR_NONE)) {
195 dev_err(adap->pdev_dev,
196 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
197 rpl->status, l2t_idx);
201 if (tid & SYNC_WR_F) {
202 struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
205 if (e->state != L2T_STATE_SWITCHING) {
206 send_pending(adap, e);
207 e->state = (e->neigh->nud_state & NUD_STALE) ?
208 L2T_STATE_STALE : L2T_STATE_VALID;
210 spin_unlock(&e->lock);
215 * Add a packet to an L2T entry's queue of packets awaiting resolution.
216 * Must be called with the entry's lock held.
218 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
220 __skb_queue_tail(&e->arpq, skb);
223 int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
226 struct adapter *adap = netdev2adap(dev);
230 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
231 neigh_event_send(e->neigh, NULL);
232 spin_lock_bh(&e->lock);
233 if (e->state == L2T_STATE_STALE)
234 e->state = L2T_STATE_VALID;
235 spin_unlock_bh(&e->lock);
236 case L2T_STATE_VALID: /* fast-path, send the packet on */
237 return t4_ofld_send(adap, skb);
238 case L2T_STATE_RESOLVING:
239 case L2T_STATE_SYNC_WRITE:
240 spin_lock_bh(&e->lock);
241 if (e->state != L2T_STATE_SYNC_WRITE &&
242 e->state != L2T_STATE_RESOLVING) {
243 spin_unlock_bh(&e->lock);
246 arpq_enqueue(e, skb);
247 spin_unlock_bh(&e->lock);
249 if (e->state == L2T_STATE_RESOLVING &&
250 !neigh_event_send(e->neigh, NULL)) {
251 spin_lock_bh(&e->lock);
252 if (e->state == L2T_STATE_RESOLVING &&
253 !skb_queue_empty(&e->arpq))
254 write_l2e(adap, e, 1);
255 spin_unlock_bh(&e->lock);
260 EXPORT_SYMBOL(cxgb4_l2t_send);
263 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
265 static struct l2t_entry *alloc_l2e(struct l2t_data *d)
267 struct l2t_entry *end, *e, **p;
269 if (!atomic_read(&d->nfree))
272 /* there's definitely a free entry */
273 for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
274 if (atomic_read(&e->refcnt) == 0)
277 for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
281 atomic_dec(&d->nfree);
284 * The entry we found may be an inactive entry that is
285 * presently in the hash table. We need to remove it.
287 if (e->state < L2T_STATE_SWITCHING)
288 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
295 e->state = L2T_STATE_UNUSED;
299 static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
302 struct l2t_entry *end, *e, **p;
303 struct l2t_entry *first_free = NULL;
305 for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
306 if (atomic_read(&e->refcnt) == 0) {
310 if (e->state == L2T_STATE_SWITCHING) {
311 if (ether_addr_equal(e->dmac, dmac) &&
312 (e->vlan == vlan) && (e->lport == port))
326 /* The entry we found may be an inactive entry that is
327 * presently in the hash table. We need to remove it.
329 if (e->state < L2T_STATE_SWITCHING)
330 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
336 e->state = L2T_STATE_UNUSED;
342 /* Called when an L2T entry has no more users. The entry is left in the hash
343 * table since it is likely to be reused but we also bump nfree to indicate
344 * that the entry can be reallocated for a different neighbor. We also drop
345 * the existing neighbor reference in case the neighbor is going away and is
346 * waiting on our reference.
348 * Because entries can be reallocated to other neighbors once their ref count
349 * drops to 0 we need to take the entry's lock to avoid races with a new
352 static void _t4_l2e_free(struct l2t_entry *e)
357 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
359 neigh_release(e->neigh);
362 while ((skb = __skb_dequeue(&e->arpq)) != NULL)
366 d = container_of(e, struct l2t_data, l2tab[e->idx]);
367 atomic_inc(&d->nfree);
370 /* Locked version of _t4_l2e_free */
371 static void t4_l2e_free(struct l2t_entry *e)
376 spin_lock_bh(&e->lock);
377 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
379 neigh_release(e->neigh);
382 while ((skb = __skb_dequeue(&e->arpq)) != NULL)
385 spin_unlock_bh(&e->lock);
387 d = container_of(e, struct l2t_data, l2tab[e->idx]);
388 atomic_inc(&d->nfree);
391 void cxgb4_l2t_release(struct l2t_entry *e)
393 if (atomic_dec_and_test(&e->refcnt))
396 EXPORT_SYMBOL(cxgb4_l2t_release);
399 * Update an L2T entry that was previously used for the same next hop as neigh.
400 * Must be called with softirqs disabled.
402 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
404 unsigned int nud_state;
406 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
407 if (neigh != e->neigh)
408 neigh_replace(e, neigh);
409 nud_state = neigh->nud_state;
410 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
411 !(nud_state & NUD_VALID))
412 e->state = L2T_STATE_RESOLVING;
413 else if (nud_state & NUD_CONNECTED)
414 e->state = L2T_STATE_VALID;
416 e->state = L2T_STATE_STALE;
417 spin_unlock(&e->lock);
420 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
421 const struct net_device *physdev,
422 unsigned int priority)
427 int addr_len = neigh->tbl->key_len;
428 u32 *addr = (u32 *)neigh->primary_key;
429 int ifidx = neigh->dev->ifindex;
430 int hash = addr_hash(d, addr, addr_len, ifidx);
432 if (neigh->dev->flags & IFF_LOOPBACK)
433 lport = netdev2pinfo(physdev)->tx_chan + 4;
435 lport = netdev2pinfo(physdev)->lport;
437 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
438 vlan = vlan_dev_vlan_id(neigh->dev);
442 write_lock_bh(&d->lock);
443 for (e = d->l2tab[hash].first; e; e = e->next)
444 if (!addreq(e, addr) && e->ifindex == ifidx &&
445 e->vlan == vlan && e->lport == lport) {
447 if (atomic_read(&e->refcnt) == 1)
448 reuse_entry(e, neigh);
452 /* Need to allocate a new entry */
455 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
456 e->state = L2T_STATE_RESOLVING;
457 if (neigh->dev->flags & IFF_LOOPBACK)
458 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
459 memcpy(e->addr, addr, addr_len);
463 e->v6 = addr_len == 16;
464 atomic_set(&e->refcnt, 1);
465 neigh_replace(e, neigh);
467 e->next = d->l2tab[hash].first;
468 d->l2tab[hash].first = e;
469 spin_unlock(&e->lock);
472 write_unlock_bh(&d->lock);
475 EXPORT_SYMBOL(cxgb4_l2t_get);
477 u64 cxgb4_select_ntuple(struct net_device *dev,
478 const struct l2t_entry *l2t)
480 struct adapter *adap = netdev2adap(dev);
481 struct tp_params *tp = &adap->params.tp;
484 /* Initialize each of the fields which we care about which are present
485 * in the Compressed Filter Tuple.
487 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
488 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
490 if (tp->port_shift >= 0)
491 ntuple |= (u64)l2t->lport << tp->port_shift;
493 if (tp->protocol_shift >= 0)
494 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
496 if (tp->vnic_shift >= 0) {
497 u32 viid = cxgb4_port_viid(dev);
498 u32 vf = FW_VIID_VIN_G(viid);
499 u32 pf = FW_VIID_PFN_G(viid);
500 u32 vld = FW_VIID_VIVLD_G(viid);
502 ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
503 FT_VNID_ID_PF_V(pf) |
504 FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
509 EXPORT_SYMBOL(cxgb4_select_ntuple);
512 * Called when address resolution fails for an L2T entry to handle packets
513 * on the arpq head. If a packet specifies a failure handler it is invoked,
514 * otherwise the packet is sent to the device.
516 static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
520 while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
521 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
523 spin_unlock(&e->lock);
524 if (cb->arp_err_handler)
525 cb->arp_err_handler(cb->handle, skb);
527 t4_ofld_send(adap, skb);
533 * Called when the host's neighbor layer makes a change to some entry that is
534 * loaded into the HW L2 table.
536 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
539 struct sk_buff_head *arpq = NULL;
540 struct l2t_data *d = adap->l2t;
541 int addr_len = neigh->tbl->key_len;
542 u32 *addr = (u32 *) neigh->primary_key;
543 int ifidx = neigh->dev->ifindex;
544 int hash = addr_hash(d, addr, addr_len, ifidx);
546 read_lock_bh(&d->lock);
547 for (e = d->l2tab[hash].first; e; e = e->next)
548 if (!addreq(e, addr) && e->ifindex == ifidx) {
550 if (atomic_read(&e->refcnt))
552 spin_unlock(&e->lock);
555 read_unlock_bh(&d->lock);
559 read_unlock(&d->lock);
561 if (neigh != e->neigh)
562 neigh_replace(e, neigh);
564 if (e->state == L2T_STATE_RESOLVING) {
565 if (neigh->nud_state & NUD_FAILED) {
567 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
568 !skb_queue_empty(&e->arpq)) {
569 write_l2e(adap, e, 1);
572 e->state = neigh->nud_state & NUD_CONNECTED ?
573 L2T_STATE_VALID : L2T_STATE_STALE;
574 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
575 write_l2e(adap, e, 0);
579 handle_failed_resolution(adap, e);
580 spin_unlock_bh(&e->lock);
583 /* Allocate an L2T entry for use by a switching rule. Such need to be
584 * explicitly freed and while busy they are not on any hash chain, so normal
585 * address resolution updates do not see them.
587 struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
588 u8 port, u8 *eth_addr)
590 struct l2t_data *d = adap->l2t;
594 write_lock_bh(&d->lock);
595 e = find_or_alloc_l2e(d, vlan, port, eth_addr);
597 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
598 if (!atomic_read(&e->refcnt)) {
599 e->state = L2T_STATE_SWITCHING;
602 ether_addr_copy(e->dmac, eth_addr);
603 atomic_set(&e->refcnt, 1);
604 ret = write_l2e(adap, e, 0);
607 spin_unlock(&e->lock);
608 write_unlock_bh(&d->lock);
612 atomic_inc(&e->refcnt);
615 spin_unlock(&e->lock);
617 write_unlock_bh(&d->lock);
622 * @dev: net_device pointer
624 * @port: Associated port
625 * @dmac: Destination MAC address to add to L2T
626 * Returns pointer to the allocated l2t entry
628 * Allocates an L2T entry for use by switching rule of a filter
630 struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
633 struct adapter *adap = netdev2adap(dev);
635 return t4_l2t_alloc_switching(adap, vlan, port, dmac);
637 EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
639 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
641 unsigned int l2t_size;
645 if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
647 l2t_size = l2t_end - l2t_start + 1;
648 if (l2t_size < L2T_MIN_HASH_BUCKETS)
651 d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
655 d->l2t_start = l2t_start;
656 d->l2t_size = l2t_size;
659 atomic_set(&d->nfree, l2t_size);
660 rwlock_init(&d->lock);
662 for (i = 0; i < d->l2t_size; ++i) {
664 d->l2tab[i].state = L2T_STATE_UNUSED;
665 spin_lock_init(&d->l2tab[i].lock);
666 atomic_set(&d->l2tab[i].refcnt, 0);
667 skb_queue_head_init(&d->l2tab[i].arpq);
672 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
674 struct l2t_data *d = seq->private;
676 return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
679 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
681 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
684 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
686 v = l2t_get_idx(seq, *pos);
692 static void l2t_seq_stop(struct seq_file *seq, void *v)
696 static char l2e_state(const struct l2t_entry *e)
699 case L2T_STATE_VALID: return 'V';
700 case L2T_STATE_STALE: return 'S';
701 case L2T_STATE_SYNC_WRITE: return 'W';
702 case L2T_STATE_RESOLVING:
703 return skb_queue_empty(&e->arpq) ? 'R' : 'A';
704 case L2T_STATE_SWITCHING: return 'X';
710 static int l2t_seq_show(struct seq_file *seq, void *v)
712 if (v == SEQ_START_TOKEN)
713 seq_puts(seq, " Idx IP address "
714 "Ethernet address VLAN/P LP State Users Port\n");
717 struct l2t_data *d = seq->private;
718 struct l2t_entry *e = v;
720 spin_lock_bh(&e->lock);
721 if (e->state == L2T_STATE_SWITCHING)
724 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
725 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
726 e->idx + d->l2t_start, ip, e->dmac,
727 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
728 l2e_state(e), atomic_read(&e->refcnt),
729 e->neigh ? e->neigh->dev->name : "");
730 spin_unlock_bh(&e->lock);
735 static const struct seq_operations l2t_seq_ops = {
736 .start = l2t_seq_start,
737 .next = l2t_seq_next,
738 .stop = l2t_seq_stop,
742 static int l2t_seq_open(struct inode *inode, struct file *file)
744 int rc = seq_open(file, &l2t_seq_ops);
747 struct adapter *adap = inode->i_private;
748 struct seq_file *seq = file->private_data;
750 seq->private = adap->l2t;
755 const struct file_operations t4_l2t_fops = {
756 .owner = THIS_MODULE,
757 .open = l2t_seq_open,
760 .release = seq_release,