2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include "cxgb4_filter.h"
41 static inline bool is_field_set(u32 val, u32 mask)
46 static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
48 return !(conf & conf_mask) && is_field_set(val, mask);
51 /* Validate filter spec against configuration done on the card. */
52 static int validate_filter(struct net_device *dev,
53 struct ch_filter_specification *fs)
55 struct adapter *adapter = netdev2adap(dev);
58 /* Check for unconfigured fields being used. */
59 fconf = adapter->params.tp.vlan_pri_map;
60 iconf = adapter->params.tp.ingress_config;
62 if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
63 unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
64 unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
65 unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
67 unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
68 unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
69 fs->mask.matchtype) ||
70 unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
71 unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
72 unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
74 unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
75 fs->mask.ovlan_vld) ||
76 unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
79 /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
80 * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
81 * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
82 * below. Additionally, since the T4 firmware interface also
83 * carries that overlap, we need to translate any PF/VF
84 * specification into that internal format below.
86 if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
87 is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
89 if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
90 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
93 if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
98 /* If the user is requesting that the filter action loop
99 * matching packets back out one of our ports, make sure that
100 * the egress port is in range.
102 if (fs->action == FILTER_SWITCH &&
103 fs->eport >= adapter->params.nports)
106 /* Don't allow various trivially obvious bogus out-of-range values... */
107 if (fs->val.iport >= adapter->params.nports)
110 /* T4 doesn't support removing VLAN Tags for loop back filters. */
111 if (is_t4(adapter->params.chip) &&
112 fs->action == FILTER_SWITCH &&
113 (fs->newvlan == VLAN_REMOVE ||
114 fs->newvlan == VLAN_REWRITE))
120 static int get_filter_steerq(struct net_device *dev,
121 struct ch_filter_specification *fs)
123 struct adapter *adapter = netdev2adap(dev);
126 /* If the user has requested steering matching Ingress Packets
127 * to a specific Queue Set, we need to make sure it's in range
128 * for the port and map that into the Absolute Queue ID of the
129 * Queue Set's Response Queue.
136 struct port_info *pi = netdev_priv(dev);
138 /* If the iq id is greater than the number of qsets,
139 * then assume it is an absolute qid.
141 if (fs->iq < pi->nqsets)
142 iq = adapter->sge.ethrxq[pi->first_qset +
151 static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
153 spin_lock_bh(&t->ftid_lock);
155 if (test_bit(fidx, t->ftid_bmap)) {
156 spin_unlock_bh(&t->ftid_lock);
160 if (family == PF_INET)
161 __set_bit(fidx, t->ftid_bmap);
163 bitmap_allocate_region(t->ftid_bmap, fidx, 2);
165 spin_unlock_bh(&t->ftid_lock);
169 static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
171 spin_lock_bh(&t->ftid_lock);
172 if (family == PF_INET)
173 __clear_bit(fidx, t->ftid_bmap);
175 bitmap_release_region(t->ftid_bmap, fidx, 2);
176 spin_unlock_bh(&t->ftid_lock);
179 /* Delete the filter at a specified index. */
180 static int del_filter_wr(struct adapter *adapter, int fidx)
182 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
183 struct fw_filter_wr *fwr;
189 skb = alloc_skb(len, GFP_KERNEL);
193 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
194 t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
196 /* Mark the filter as "pending" and ship off the Filter Work Request.
197 * When we get the Work Request Reply we'll clear the pending status.
200 t4_mgmt_tx(adapter, skb);
204 /* Send a Work Request to write the filter at a specified index. We construct
205 * a Firmware Filter Work Request to have the work done and put the indicated
206 * filter into "pending" mode which will prevent any further actions against
207 * it till we get a reply from the firmware on the completion status of the
210 int set_filter_wr(struct adapter *adapter, int fidx)
212 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
213 struct fw_filter_wr *fwr;
216 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
220 /* If the new filter requires loopback Destination MAC and/or VLAN
221 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
224 if (f->fs.newdmac || f->fs.newvlan) {
225 /* allocate L2T entry for new filter */
226 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
227 f->fs.eport, f->fs.dmac);
234 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
235 memset(fwr, 0, sizeof(*fwr));
237 /* It would be nice to put most of the following in t4_hw.c but most
238 * of the work is translating the cxgbtool ch_filter_specification
239 * into the Work Request and the definition of that structure is
240 * currently in cxgbtool.h which isn't appropriate to pull into the
241 * common code. We may eventually try to come up with a more neutral
242 * filter specification structure but for now it's easiest to simply
243 * put this fairly direct code in line ...
245 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
246 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
248 htonl(FW_FILTER_WR_TID_V(f->tid) |
249 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
250 FW_FILTER_WR_NOREPLY_V(0) |
251 FW_FILTER_WR_IQ_V(f->fs.iq));
252 fwr->del_filter_to_l2tix =
253 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
254 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
255 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
256 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
257 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
258 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
259 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
260 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
261 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
262 f->fs.newvlan == VLAN_REWRITE) |
263 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
264 f->fs.newvlan == VLAN_REWRITE) |
265 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
266 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
267 FW_FILTER_WR_PRIO_V(f->fs.prio) |
268 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
269 fwr->ethtype = htons(f->fs.val.ethtype);
270 fwr->ethtypem = htons(f->fs.mask.ethtype);
271 fwr->frag_to_ovlan_vldm =
272 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
273 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
274 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
275 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
276 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
277 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
279 fwr->rx_chan_rx_rpl_iq =
280 htons(FW_FILTER_WR_RX_CHAN_V(0) |
281 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
282 fwr->maci_to_matchtypem =
283 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
284 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
285 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
286 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
287 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
288 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
289 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
290 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
291 fwr->ptcl = f->fs.val.proto;
292 fwr->ptclm = f->fs.mask.proto;
293 fwr->ttyp = f->fs.val.tos;
294 fwr->ttypm = f->fs.mask.tos;
295 fwr->ivlan = htons(f->fs.val.ivlan);
296 fwr->ivlanm = htons(f->fs.mask.ivlan);
297 fwr->ovlan = htons(f->fs.val.ovlan);
298 fwr->ovlanm = htons(f->fs.mask.ovlan);
299 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
300 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
301 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
302 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
303 fwr->lp = htons(f->fs.val.lport);
304 fwr->lpm = htons(f->fs.mask.lport);
305 fwr->fp = htons(f->fs.val.fport);
306 fwr->fpm = htons(f->fs.mask.fport);
308 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
310 /* Mark the filter as "pending" and ship off the Filter Work Request.
311 * When we get the Work Request Reply we'll clear the pending status.
314 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
315 t4_ofld_send(adapter, skb);
319 /* Return an error number if the indicated filter isn't writable ... */
320 int writable_filter(struct filter_entry *f)
330 /* Delete the filter at the specified index (if valid). The checks for all
331 * the common problems with doing this like the filter being locked, currently
332 * pending in another operation, etc.
334 int delete_filter(struct adapter *adapter, unsigned int fidx)
336 struct filter_entry *f;
339 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
342 f = &adapter->tids.ftid_tab[fidx];
343 ret = writable_filter(f);
347 return del_filter_wr(adapter, fidx);
352 /* Clear a filter and release any of its resources that we own. This also
353 * clears the filter's "pending" status.
355 void clear_filter(struct adapter *adap, struct filter_entry *f)
357 /* If the new or old filter have loopback rewriteing rules then we'll
358 * need to free any existing Layer Two Table (L2T) entries of the old
359 * filter rule. The firmware will handle freeing up any Source MAC
360 * Table (SMT) entries used for rewriting Source MAC Addresses in
364 cxgb4_l2t_release(f->l2t);
366 /* The zeroing of the filter rule below clears the filter valid,
367 * pending, locked flags, l2t pointer, etc. so it's all we need for
370 memset(f, 0, sizeof(*f));
373 void clear_all_filters(struct adapter *adapter)
377 if (adapter->tids.ftid_tab) {
378 struct filter_entry *f = &adapter->tids.ftid_tab[0];
379 unsigned int max_ftid = adapter->tids.nftids +
380 adapter->tids.nsftids;
382 for (i = 0; i < max_ftid; i++, f++)
383 if (f->valid || f->pending)
384 clear_filter(adapter, f);
388 /* Fill up default masks for set match fields. */
389 static void fill_default_mask(struct ch_filter_specification *fs)
391 unsigned int lip = 0, lip_mask = 0;
392 unsigned int fip = 0, fip_mask = 0;
395 if (fs->val.iport && !fs->mask.iport)
396 fs->mask.iport |= ~0;
397 if (fs->val.fcoe && !fs->mask.fcoe)
399 if (fs->val.matchtype && !fs->mask.matchtype)
400 fs->mask.matchtype |= ~0;
401 if (fs->val.macidx && !fs->mask.macidx)
402 fs->mask.macidx |= ~0;
403 if (fs->val.ethtype && !fs->mask.ethtype)
404 fs->mask.ethtype |= ~0;
405 if (fs->val.ivlan && !fs->mask.ivlan)
406 fs->mask.ivlan |= ~0;
407 if (fs->val.ovlan && !fs->mask.ovlan)
408 fs->mask.ovlan |= ~0;
409 if (fs->val.frag && !fs->mask.frag)
411 if (fs->val.tos && !fs->mask.tos)
413 if (fs->val.proto && !fs->mask.proto)
414 fs->mask.proto |= ~0;
416 for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
417 lip |= fs->val.lip[i];
418 lip_mask |= fs->mask.lip[i];
419 fip |= fs->val.fip[i];
420 fip_mask |= fs->mask.fip[i];
423 if (lip && !lip_mask)
424 memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
426 if (fip && !fip_mask)
427 memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
429 if (fs->val.lport && !fs->mask.lport)
431 if (fs->val.fport && !fs->mask.fport)
435 /* Check a Chelsio Filter Request for validity, convert it into our internal
436 * format and send it to the hardware. Return 0 on success, an error number
437 * otherwise. We attach any provided filter operation context to the internal
438 * filter specification in order to facilitate signaling completion of the
441 int __cxgb4_set_filter(struct net_device *dev, int filter_id,
442 struct ch_filter_specification *fs,
443 struct filter_ctx *ctx)
445 struct adapter *adapter = netdev2adap(dev);
446 unsigned int max_fidx, fidx;
447 struct filter_entry *f;
451 max_fidx = adapter->tids.nftids;
452 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
453 filter_id >= max_fidx)
456 fill_default_mask(fs);
458 ret = validate_filter(dev, fs);
462 iq = get_filter_steerq(dev, fs);
466 /* IPv6 filters occupy four slots and must be aligned on
467 * four-slot boundaries. IPv4 filters only occupy a single
468 * slot and have no alignment requirements but writing a new
469 * IPv4 filter into the middle of an existing IPv6 filter
470 * requires clearing the old IPv6 filter and hence we prevent
473 if (fs->type == 0) { /* IPv4 */
474 /* If our IPv4 filter isn't being written to a
475 * multiple of four filter index and there's an IPv6
476 * filter at the multiple of 4 base slot, then we
479 fidx = filter_id & ~0x3;
480 if (fidx != filter_id &&
481 adapter->tids.ftid_tab[fidx].fs.type) {
482 f = &adapter->tids.ftid_tab[fidx];
484 dev_err(adapter->pdev_dev,
485 "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
491 /* Ensure that the IPv6 filter is aligned on a
492 * multiple of 4 boundary.
494 if (filter_id & 0x3) {
495 dev_err(adapter->pdev_dev,
496 "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
500 /* Check all except the base overlapping IPv4 filter slots. */
501 for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
502 f = &adapter->tids.ftid_tab[fidx];
504 dev_err(adapter->pdev_dev,
505 "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
512 /* Check to make sure that provided filter index is not
513 * already in use by someone else
515 f = &adapter->tids.ftid_tab[filter_id];
519 fidx = filter_id + adapter->tids.ftid_base;
520 ret = cxgb4_set_ftid(&adapter->tids, filter_id,
521 fs->type ? PF_INET6 : PF_INET);
525 /* Check to make sure the filter requested is writable ... */
526 ret = writable_filter(f);
528 /* Clear the bits we have set above */
529 cxgb4_clear_ftid(&adapter->tids, filter_id,
530 fs->type ? PF_INET6 : PF_INET);
534 /* Clear out any old resources being used by the filter before
535 * we start constructing the new filter.
538 clear_filter(adapter, f);
540 /* Convert the filter specification into our internal format.
541 * We copy the PF/VF specification into the Outer VLAN field
542 * here so the rest of the code -- including the interface to
543 * the firmware -- doesn't have to constantly do these checks.
549 iconf = adapter->params.tp.ingress_config;
550 if (iconf & VNIC_F) {
551 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
552 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
553 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
554 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
557 /* Attempt to set the filter. If we don't succeed, we clear
558 * it and return the failure.
561 f->tid = fidx; /* Save the actual tid */
562 ret = set_filter_wr(adapter, filter_id);
564 cxgb4_clear_ftid(&adapter->tids, filter_id,
565 fs->type ? PF_INET6 : PF_INET);
566 clear_filter(adapter, f);
572 /* Check a delete filter request for validity and send it to the hardware.
573 * Return 0 on success, an error number otherwise. We attach any provided
574 * filter operation context to the internal filter specification in order to
575 * facilitate signaling completion of the operation.
577 int __cxgb4_del_filter(struct net_device *dev, int filter_id,
578 struct filter_ctx *ctx)
580 struct adapter *adapter = netdev2adap(dev);
581 struct filter_entry *f;
582 unsigned int max_fidx;
585 max_fidx = adapter->tids.nftids;
586 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
587 filter_id >= max_fidx)
590 f = &adapter->tids.ftid_tab[filter_id];
591 ret = writable_filter(f);
597 cxgb4_clear_ftid(&adapter->tids, filter_id,
598 f->fs.type ? PF_INET6 : PF_INET);
599 return del_filter_wr(adapter, filter_id);
602 /* If the caller has passed in a Completion Context then we need to
603 * mark it as a successful completion so they don't stall waiting
608 complete(&ctx->completion);
613 int cxgb4_set_filter(struct net_device *dev, int filter_id,
614 struct ch_filter_specification *fs)
616 struct filter_ctx ctx;
619 init_completion(&ctx.completion);
621 ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
626 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
635 int cxgb4_del_filter(struct net_device *dev, int filter_id)
637 struct filter_ctx ctx;
640 init_completion(&ctx.completion);
642 ret = __cxgb4_del_filter(dev, filter_id, &ctx);
647 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
656 /* Handle a filter write/deletion reply. */
657 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
659 unsigned int tid = GET_TID(rpl);
660 struct filter_entry *f = NULL;
661 unsigned int max_fidx;
664 max_fidx = adap->tids.nftids + adap->tids.nsftids;
665 /* Get the corresponding filter entry for this tid */
666 if (adap->tids.ftid_tab) {
667 /* Check this in normal filter region */
668 idx = tid - adap->tids.ftid_base;
671 f = &adap->tids.ftid_tab[idx];
676 /* We found the filter entry for this tid */
678 unsigned int ret = TCB_COOKIE_G(rpl->cookie);
679 struct filter_ctx *ctx;
681 /* Pull off any filter operation context attached to the
687 if (ret == FW_FILTER_WR_FLT_DELETED) {
688 /* Clear the filter when we get confirmation from the
689 * hardware that the filter has been deleted.
691 clear_filter(adap, f);
694 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
695 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
697 clear_filter(adap, f);
699 ctx->result = -ENOMEM;
700 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
701 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
702 f->pending = 0; /* asynchronous setup completed */
709 /* Something went wrong. Issue a warning about the
710 * problem and clear everything out.
712 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
714 clear_filter(adap, f);
716 ctx->result = -EINVAL;
719 complete(&ctx->completion);