2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
42 #include "rocker_hw.h"
44 #include "rocker_tlv.h"
46 static const char rocker_driver_name[] = "rocker";
48 static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
53 struct rocker_flow_tbl_key {
55 enum rocker_of_dpa_table_id tbl_id;
60 enum rocker_of_dpa_table_id goto_tbl;
66 enum rocker_of_dpa_table_id goto_tbl;
75 u8 eth_dst_mask[ETH_ALEN];
78 enum rocker_of_dpa_table_id goto_tbl;
85 enum rocker_of_dpa_table_id goto_tbl;
90 u8 eth_dst_mask[ETH_ALEN];
95 enum rocker_of_dpa_table_id goto_tbl;
102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
118 struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
122 struct rocker_flow_tbl_key key;
124 u32 key_crc32; /* key */
127 struct rocker_group_tbl_entry {
128 struct hlist_node entry;
130 u32 group_id; /* key */
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
153 struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
157 unsigned long touched;
158 struct rocker_fdb_tbl_key {
159 struct rocker_port *rocker_port;
165 struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
172 struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
178 u8 eth_dst[ETH_ALEN];
182 struct rocker_dma_ring_info {
186 struct rocker_desc *desc; /* mapped */
188 struct rocker_desc_info *desc_info;
195 ROCKER_CTRL_LINK_LOCAL_MCAST,
196 ROCKER_CTRL_LOCAL_ARP,
197 ROCKER_CTRL_IPV4_MCAST,
198 ROCKER_CTRL_IPV6_MCAST,
199 ROCKER_CTRL_DFLT_BRIDGING,
200 ROCKER_CTRL_DFLT_OVS,
204 #define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
205 #define ROCKER_N_INTERNAL_VLANS 255
206 #define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
207 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
210 struct net_device *dev;
211 struct net_device *bridge_dev;
212 struct rocker *rocker;
213 unsigned int port_number;
215 __be16 internal_vlan_id;
218 unsigned long ageing_time;
219 bool ctrls[ROCKER_CTRL_MAX];
220 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
221 struct napi_struct napi_tx;
222 struct napi_struct napi_rx;
223 struct rocker_dma_ring_info tx_ring;
224 struct rocker_dma_ring_info rx_ring;
228 struct pci_dev *pdev;
230 struct msix_entry *msix_entries;
231 unsigned int port_count;
232 struct rocker_port **ports;
236 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
237 struct rocker_dma_ring_info cmd_ring;
238 struct rocker_dma_ring_info event_ring;
239 DECLARE_HASHTABLE(flow_tbl, 16);
240 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
241 u64 flow_tbl_next_cookie;
242 DECLARE_HASHTABLE(group_tbl, 16);
243 spinlock_t group_tbl_lock; /* for group tbl accesses */
244 struct timer_list fdb_cleanup_timer;
245 DECLARE_HASHTABLE(fdb_tbl, 16);
246 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
247 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
248 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
249 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
250 DECLARE_HASHTABLE(neigh_tbl, 16);
251 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
252 u32 neigh_tbl_next_index;
255 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
256 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
257 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
258 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
259 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
260 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
261 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
262 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
263 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
265 /* Rocker priority levels for flow table entries. Higher
266 * priority match takes precedence over lower priority match.
270 ROCKER_PRIORITY_UNKNOWN = 0,
271 ROCKER_PRIORITY_IG_PORT = 1,
272 ROCKER_PRIORITY_VLAN = 1,
273 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
274 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
275 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
276 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
277 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
278 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
279 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
280 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
281 ROCKER_PRIORITY_ACL_CTRL = 3,
282 ROCKER_PRIORITY_ACL_NORMAL = 2,
283 ROCKER_PRIORITY_ACL_DFLT = 1,
286 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
288 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
290 u16 _vlan_id = ntohs(vlan_id);
292 return (_vlan_id >= start && _vlan_id <= end);
295 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
296 u16 vid, bool *pop_vlan)
302 vlan_id = htons(vid);
304 vlan_id = rocker_port->internal_vlan_id;
312 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
315 if (rocker_vlan_id_is_internal(vlan_id))
318 return ntohs(vlan_id);
321 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
323 return rocker_port->bridge_dev &&
324 netif_is_bridge_master(rocker_port->bridge_dev);
327 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
329 return rocker_port->bridge_dev &&
330 netif_is_ovs_master(rocker_port->bridge_dev);
333 #define ROCKER_OP_FLAG_REMOVE BIT(0)
334 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
335 #define ROCKER_OP_FLAG_LEARNED BIT(2)
336 #define ROCKER_OP_FLAG_REFRESH BIT(3)
338 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
341 struct switchdev_trans_item *elem = NULL;
342 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
343 GFP_ATOMIC : GFP_KERNEL;
345 /* If in transaction prepare phase, allocate the memory
346 * and enqueue it on a transaction. If in transaction
347 * commit phase, dequeue the memory from the transaction
348 * rather than re-allocating the memory. The idea is the
349 * driver code paths for prepare and commit are identical
350 * so the memory allocated in the prepare phase is the
351 * memory used in the commit phase.
355 elem = kzalloc(size + sizeof(*elem), gfp_flags);
356 } else if (switchdev_trans_ph_prepare(trans)) {
357 elem = kzalloc(size + sizeof(*elem), gfp_flags);
360 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
362 elem = switchdev_trans_item_dequeue(trans);
365 return elem ? elem + 1 : NULL;
368 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
371 return __rocker_mem_alloc(trans, flags, size);
374 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
375 size_t n, size_t size)
377 return __rocker_mem_alloc(trans, flags, n * size);
380 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
382 struct switchdev_trans_item *elem;
384 /* Frees are ignored if in transaction prepare phase. The
385 * memory remains on the per-port list until freed in the
389 if (switchdev_trans_ph_prepare(trans))
392 elem = (struct switchdev_trans_item *) mem - 1;
397 wait_queue_head_t wait;
402 static void rocker_wait_reset(struct rocker_wait *wait)
405 wait->nowait = false;
408 static void rocker_wait_init(struct rocker_wait *wait)
410 init_waitqueue_head(&wait->wait);
411 rocker_wait_reset(wait);
414 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
415 struct switchdev_trans *trans,
418 struct rocker_wait *wait;
420 wait = rocker_kzalloc(trans, flags, sizeof(*wait));
423 rocker_wait_init(wait);
427 static void rocker_wait_destroy(struct switchdev_trans *trans,
428 struct rocker_wait *wait)
430 rocker_kfree(trans, wait);
433 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
434 unsigned long timeout)
436 wait_event_timeout(wait->wait, wait->done, HZ / 10);
442 static void rocker_wait_wake_up(struct rocker_wait *wait)
445 wake_up(&wait->wait);
448 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
450 return rocker->msix_entries[vector].vector;
453 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
455 return rocker_msix_vector(rocker_port->rocker,
456 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
459 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
461 return rocker_msix_vector(rocker_port->rocker,
462 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
465 #define rocker_write32(rocker, reg, val) \
466 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
467 #define rocker_read32(rocker, reg) \
468 readl((rocker)->hw_addr + (ROCKER_ ## reg))
469 #define rocker_write64(rocker, reg, val) \
470 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
471 #define rocker_read64(rocker, reg) \
472 readq((rocker)->hw_addr + (ROCKER_ ## reg))
474 /*****************************
475 * HW basic testing functions
476 *****************************/
478 static int rocker_reg_test(const struct rocker *rocker)
480 const struct pci_dev *pdev = rocker->pdev;
486 rocker_write32(rocker, TEST_REG, rnd);
487 test_reg = rocker_read32(rocker, TEST_REG);
488 if (test_reg != rnd * 2) {
489 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
496 rnd |= prandom_u32();
497 rocker_write64(rocker, TEST_REG64, rnd);
498 test_reg = rocker_read64(rocker, TEST_REG64);
499 if (test_reg != rnd * 2) {
500 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
508 static int rocker_dma_test_one(const struct rocker *rocker,
509 struct rocker_wait *wait, u32 test_type,
510 dma_addr_t dma_handle, const unsigned char *buf,
511 const unsigned char *expect, size_t size)
513 const struct pci_dev *pdev = rocker->pdev;
516 rocker_wait_reset(wait);
517 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
519 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
520 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
524 for (i = 0; i < size; i++) {
525 if (buf[i] != expect[i]) {
526 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
527 buf[i], i, expect[i]);
534 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
535 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
537 static int rocker_dma_test_offset(const struct rocker *rocker,
538 struct rocker_wait *wait, int offset)
540 struct pci_dev *pdev = rocker->pdev;
541 unsigned char *alloc;
543 unsigned char *expect;
544 dma_addr_t dma_handle;
548 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
549 GFP_KERNEL | GFP_DMA);
552 buf = alloc + offset;
553 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
555 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
556 PCI_DMA_BIDIRECTIONAL);
557 if (pci_dma_mapping_error(pdev, dma_handle)) {
562 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
563 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
565 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
566 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
567 dma_handle, buf, expect,
568 ROCKER_TEST_DMA_BUF_SIZE);
572 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
573 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
574 dma_handle, buf, expect,
575 ROCKER_TEST_DMA_BUF_SIZE);
579 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
580 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
582 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
583 dma_handle, buf, expect,
584 ROCKER_TEST_DMA_BUF_SIZE);
589 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
590 PCI_DMA_BIDIRECTIONAL);
597 static int rocker_dma_test(const struct rocker *rocker,
598 struct rocker_wait *wait)
603 for (i = 0; i < 8; i++) {
604 err = rocker_dma_test_offset(rocker, wait, i);
611 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
613 struct rocker_wait *wait = dev_id;
615 rocker_wait_wake_up(wait);
620 static int rocker_basic_hw_test(const struct rocker *rocker)
622 const struct pci_dev *pdev = rocker->pdev;
623 struct rocker_wait wait;
626 err = rocker_reg_test(rocker);
628 dev_err(&pdev->dev, "reg test failed\n");
632 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
633 rocker_test_irq_handler, 0,
634 rocker_driver_name, &wait);
636 dev_err(&pdev->dev, "cannot assign test irq\n");
640 rocker_wait_init(&wait);
641 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
643 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
644 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
649 err = rocker_dma_test(rocker, &wait);
651 dev_err(&pdev->dev, "dma test failed\n");
654 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
658 /******************************************
659 * DMA rings and descriptors manipulations
660 ******************************************/
662 static u32 __pos_inc(u32 pos, size_t limit)
664 return ++pos == limit ? 0 : pos;
667 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
669 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
684 case -ROCKER_EMSGSIZE:
686 case -ROCKER_ENOTSUP:
688 case -ROCKER_ENOBUFS:
695 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
697 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
700 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
702 u32 comp_err = desc_info->desc->comp_err;
704 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
708 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
710 return (void *)(uintptr_t)desc_info->desc->cookie;
713 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
716 desc_info->desc->cookie = (uintptr_t) ptr;
719 static struct rocker_desc_info *
720 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
722 static struct rocker_desc_info *desc_info;
723 u32 head = __pos_inc(info->head, info->size);
725 desc_info = &info->desc_info[info->head];
726 if (head == info->tail)
727 return NULL; /* ring full */
728 desc_info->tlv_size = 0;
732 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
734 desc_info->desc->buf_size = desc_info->data_size;
735 desc_info->desc->tlv_size = desc_info->tlv_size;
738 static void rocker_desc_head_set(const struct rocker *rocker,
739 struct rocker_dma_ring_info *info,
740 const struct rocker_desc_info *desc_info)
742 u32 head = __pos_inc(info->head, info->size);
744 BUG_ON(head == info->tail);
745 rocker_desc_commit(desc_info);
747 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
750 static struct rocker_desc_info *
751 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
753 static struct rocker_desc_info *desc_info;
755 if (info->tail == info->head)
756 return NULL; /* nothing to be done between head and tail */
757 desc_info = &info->desc_info[info->tail];
758 if (!rocker_desc_gen(desc_info))
759 return NULL; /* gen bit not set, desc is not ready yet */
760 info->tail = __pos_inc(info->tail, info->size);
761 desc_info->tlv_size = desc_info->desc->tlv_size;
765 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
766 const struct rocker_dma_ring_info *info,
770 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
773 static unsigned long rocker_dma_ring_size_fix(size_t size)
775 return max(ROCKER_DMA_SIZE_MIN,
776 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
779 static int rocker_dma_ring_create(const struct rocker *rocker,
782 struct rocker_dma_ring_info *info)
786 BUG_ON(size != rocker_dma_ring_size_fix(size));
791 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
793 if (!info->desc_info)
796 info->desc = pci_alloc_consistent(rocker->pdev,
797 info->size * sizeof(*info->desc),
800 kfree(info->desc_info);
804 for (i = 0; i < info->size; i++)
805 info->desc_info[i].desc = &info->desc[i];
807 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
808 ROCKER_DMA_DESC_CTRL_RESET);
809 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
810 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
815 static void rocker_dma_ring_destroy(const struct rocker *rocker,
816 const struct rocker_dma_ring_info *info)
818 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
820 pci_free_consistent(rocker->pdev,
821 info->size * sizeof(struct rocker_desc),
822 info->desc, info->mapaddr);
823 kfree(info->desc_info);
826 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
827 struct rocker_dma_ring_info *info)
831 BUG_ON(info->head || info->tail);
833 /* When ring is consumer, we need to advance head for each desc.
834 * That tells hw that the desc is ready to be used by it.
836 for (i = 0; i < info->size - 1; i++)
837 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
838 rocker_desc_commit(&info->desc_info[i]);
841 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
842 const struct rocker_dma_ring_info *info,
843 int direction, size_t buf_size)
845 struct pci_dev *pdev = rocker->pdev;
849 for (i = 0; i < info->size; i++) {
850 struct rocker_desc_info *desc_info = &info->desc_info[i];
851 struct rocker_desc *desc = &info->desc[i];
852 dma_addr_t dma_handle;
855 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
861 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
862 if (pci_dma_mapping_error(pdev, dma_handle)) {
868 desc_info->data = buf;
869 desc_info->data_size = buf_size;
870 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
872 desc->buf_addr = dma_handle;
873 desc->buf_size = buf_size;
878 for (i--; i >= 0; i--) {
879 const struct rocker_desc_info *desc_info = &info->desc_info[i];
881 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
882 desc_info->data_size, direction);
883 kfree(desc_info->data);
888 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
889 const struct rocker_dma_ring_info *info,
892 struct pci_dev *pdev = rocker->pdev;
895 for (i = 0; i < info->size; i++) {
896 const struct rocker_desc_info *desc_info = &info->desc_info[i];
897 struct rocker_desc *desc = &info->desc[i];
901 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
902 desc_info->data_size, direction);
903 kfree(desc_info->data);
907 static int rocker_dma_rings_init(struct rocker *rocker)
909 const struct pci_dev *pdev = rocker->pdev;
912 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
913 ROCKER_DMA_CMD_DEFAULT_SIZE,
916 dev_err(&pdev->dev, "failed to create command dma ring\n");
920 spin_lock_init(&rocker->cmd_ring_lock);
922 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
923 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
925 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
926 goto err_dma_cmd_ring_bufs_alloc;
929 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
930 ROCKER_DMA_EVENT_DEFAULT_SIZE,
931 &rocker->event_ring);
933 dev_err(&pdev->dev, "failed to create event dma ring\n");
934 goto err_dma_event_ring_create;
937 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
938 PCI_DMA_FROMDEVICE, PAGE_SIZE);
940 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
941 goto err_dma_event_ring_bufs_alloc;
943 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
946 err_dma_event_ring_bufs_alloc:
947 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
948 err_dma_event_ring_create:
949 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
950 PCI_DMA_BIDIRECTIONAL);
951 err_dma_cmd_ring_bufs_alloc:
952 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
956 static void rocker_dma_rings_fini(struct rocker *rocker)
958 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
959 PCI_DMA_BIDIRECTIONAL);
960 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
961 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
962 PCI_DMA_BIDIRECTIONAL);
963 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
966 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
967 struct rocker_desc_info *desc_info,
968 struct sk_buff *skb, size_t buf_len)
970 const struct rocker *rocker = rocker_port->rocker;
971 struct pci_dev *pdev = rocker->pdev;
972 dma_addr_t dma_handle;
974 dma_handle = pci_map_single(pdev, skb->data, buf_len,
976 if (pci_dma_mapping_error(pdev, dma_handle))
978 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
979 goto tlv_put_failure;
980 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
981 goto tlv_put_failure;
985 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
986 desc_info->tlv_size = 0;
990 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
992 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
995 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
996 struct rocker_desc_info *desc_info)
998 struct net_device *dev = rocker_port->dev;
1000 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1003 /* Ensure that hw will see tlv_size zero in case of an error.
1004 * That tells hw to use another descriptor.
1006 rocker_desc_cookie_ptr_set(desc_info, NULL);
1007 desc_info->tlv_size = 0;
1009 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1012 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1014 dev_kfree_skb_any(skb);
1017 rocker_desc_cookie_ptr_set(desc_info, skb);
1021 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1022 const struct rocker_tlv **attrs)
1024 struct pci_dev *pdev = rocker->pdev;
1025 dma_addr_t dma_handle;
1028 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1029 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1031 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1032 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1033 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1036 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1037 const struct rocker_desc_info *desc_info)
1039 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1040 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1044 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1045 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1046 dev_kfree_skb_any(skb);
1049 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1051 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1052 const struct rocker *rocker = rocker_port->rocker;
1056 for (i = 0; i < rx_ring->size; i++) {
1057 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1058 &rx_ring->desc_info[i]);
1065 for (i--; i >= 0; i--)
1066 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1070 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1072 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1073 const struct rocker *rocker = rocker_port->rocker;
1076 for (i = 0; i < rx_ring->size; i++)
1077 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1080 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1082 struct rocker *rocker = rocker_port->rocker;
1085 err = rocker_dma_ring_create(rocker,
1086 ROCKER_DMA_TX(rocker_port->port_number),
1087 ROCKER_DMA_TX_DEFAULT_SIZE,
1088 &rocker_port->tx_ring);
1090 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1094 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1096 ROCKER_DMA_TX_DESC_SIZE);
1098 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1099 goto err_dma_tx_ring_bufs_alloc;
1102 err = rocker_dma_ring_create(rocker,
1103 ROCKER_DMA_RX(rocker_port->port_number),
1104 ROCKER_DMA_RX_DEFAULT_SIZE,
1105 &rocker_port->rx_ring);
1107 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1108 goto err_dma_rx_ring_create;
1111 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1112 PCI_DMA_BIDIRECTIONAL,
1113 ROCKER_DMA_RX_DESC_SIZE);
1115 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1116 goto err_dma_rx_ring_bufs_alloc;
1119 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1121 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1122 goto err_dma_rx_ring_skbs_alloc;
1124 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1128 err_dma_rx_ring_skbs_alloc:
1129 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1130 PCI_DMA_BIDIRECTIONAL);
1131 err_dma_rx_ring_bufs_alloc:
1132 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1133 err_dma_rx_ring_create:
1134 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1136 err_dma_tx_ring_bufs_alloc:
1137 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1141 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1143 struct rocker *rocker = rocker_port->rocker;
1145 rocker_dma_rx_ring_skbs_free(rocker_port);
1146 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1147 PCI_DMA_BIDIRECTIONAL);
1148 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1149 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1151 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1154 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1157 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1160 val |= 1ULL << rocker_port->pport;
1162 val &= ~(1ULL << rocker_port->pport);
1163 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1166 /********************************
1167 * Interrupt handler and helpers
1168 ********************************/
1170 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1172 struct rocker *rocker = dev_id;
1173 const struct rocker_desc_info *desc_info;
1174 struct rocker_wait *wait;
1177 spin_lock(&rocker->cmd_ring_lock);
1178 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1179 wait = rocker_desc_cookie_ptr_get(desc_info);
1181 rocker_desc_gen_clear(desc_info);
1182 rocker_wait_destroy(NULL, wait);
1184 rocker_wait_wake_up(wait);
1188 spin_unlock(&rocker->cmd_ring_lock);
1189 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1194 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1196 netif_carrier_on(rocker_port->dev);
1197 netdev_info(rocker_port->dev, "Link is up\n");
1200 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1202 netif_carrier_off(rocker_port->dev);
1203 netdev_info(rocker_port->dev, "Link is down\n");
1206 static int rocker_event_link_change(const struct rocker *rocker,
1207 const struct rocker_tlv *info)
1209 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1210 unsigned int port_number;
1212 struct rocker_port *rocker_port;
1214 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1215 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1216 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1219 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1220 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1222 if (port_number >= rocker->port_count)
1225 rocker_port = rocker->ports[port_number];
1226 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1228 rocker_port_link_up(rocker_port);
1230 rocker_port_link_down(rocker_port);
1236 static int rocker_port_fdb(struct rocker_port *rocker_port,
1237 struct switchdev_trans *trans,
1238 const unsigned char *addr,
1239 __be16 vlan_id, int flags);
1241 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1242 const struct rocker_tlv *info)
1244 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1245 unsigned int port_number;
1246 struct rocker_port *rocker_port;
1247 const unsigned char *addr;
1248 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1251 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1252 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1253 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1254 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1257 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1258 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1259 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1261 if (port_number >= rocker->port_count)
1264 rocker_port = rocker->ports[port_number];
1266 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1267 rocker_port->stp_state != BR_STATE_FORWARDING)
1270 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1273 static int rocker_event_process(const struct rocker *rocker,
1274 const struct rocker_desc_info *desc_info)
1276 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1277 const struct rocker_tlv *info;
1280 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1281 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1282 !attrs[ROCKER_TLV_EVENT_INFO])
1285 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1286 info = attrs[ROCKER_TLV_EVENT_INFO];
1289 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1290 return rocker_event_link_change(rocker, info);
1291 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1292 return rocker_event_mac_vlan_seen(rocker, info);
1298 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1300 struct rocker *rocker = dev_id;
1301 const struct pci_dev *pdev = rocker->pdev;
1302 const struct rocker_desc_info *desc_info;
1306 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1307 err = rocker_desc_err(desc_info);
1309 dev_err(&pdev->dev, "event desc received with err %d\n",
1312 err = rocker_event_process(rocker, desc_info);
1314 dev_err(&pdev->dev, "event processing failed with err %d\n",
1317 rocker_desc_gen_clear(desc_info);
1318 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1321 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1326 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1328 struct rocker_port *rocker_port = dev_id;
1330 napi_schedule(&rocker_port->napi_tx);
1334 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1336 struct rocker_port *rocker_port = dev_id;
1338 napi_schedule(&rocker_port->napi_rx);
1342 /********************
1344 ********************/
1346 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1347 struct rocker_desc_info *desc_info,
1350 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1351 const struct rocker_desc_info *desc_info,
1354 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1355 struct switchdev_trans *trans, int flags,
1356 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1357 rocker_cmd_proc_cb_t process, void *process_priv)
1359 struct rocker *rocker = rocker_port->rocker;
1360 struct rocker_desc_info *desc_info;
1361 struct rocker_wait *wait;
1362 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1363 unsigned long lock_flags;
1366 wait = rocker_wait_create(rocker_port, trans, flags);
1369 wait->nowait = nowait;
1371 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1373 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1375 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1380 err = prepare(rocker_port, desc_info, prepare_priv);
1382 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1386 rocker_desc_cookie_ptr_set(desc_info, wait);
1388 if (!switchdev_trans_ph_prepare(trans))
1389 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1391 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1396 if (!switchdev_trans_ph_prepare(trans))
1397 if (!rocker_wait_event_timeout(wait, HZ / 10))
1400 err = rocker_desc_err(desc_info);
1405 err = process(rocker_port, desc_info, process_priv);
1407 rocker_desc_gen_clear(desc_info);
1409 rocker_wait_destroy(trans, wait);
1414 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1415 struct rocker_desc_info *desc_info,
1418 struct rocker_tlv *cmd_info;
1420 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1421 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1423 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1426 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1427 rocker_port->pport))
1429 rocker_tlv_nest_end(desc_info, cmd_info);
1434 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1435 const struct rocker_desc_info *desc_info,
1438 struct ethtool_cmd *ecmd = priv;
1439 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1445 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1446 if (!attrs[ROCKER_TLV_CMD_INFO])
1449 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1450 attrs[ROCKER_TLV_CMD_INFO]);
1451 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1452 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1453 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1456 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1457 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1458 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1460 ecmd->transceiver = XCVR_INTERNAL;
1461 ecmd->supported = SUPPORTED_TP;
1462 ecmd->phy_address = 0xff;
1463 ecmd->port = PORT_TP;
1464 ethtool_cmd_speed_set(ecmd, speed);
1465 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1466 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1472 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1473 const struct rocker_desc_info *desc_info,
1476 unsigned char *macaddr = priv;
1477 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1478 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1479 const struct rocker_tlv *attr;
1481 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1482 if (!attrs[ROCKER_TLV_CMD_INFO])
1485 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1486 attrs[ROCKER_TLV_CMD_INFO]);
1487 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1491 if (rocker_tlv_len(attr) != ETH_ALEN)
1494 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1499 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1500 const struct rocker_desc_info *desc_info,
1504 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1505 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1506 const struct rocker_tlv *attr;
1508 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1509 if (!attrs[ROCKER_TLV_CMD_INFO])
1512 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1513 attrs[ROCKER_TLV_CMD_INFO]);
1514 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1518 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1528 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1529 const struct rocker_desc_info *desc_info,
1532 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1533 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1534 struct port_name *name = priv;
1535 const struct rocker_tlv *attr;
1539 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1540 if (!attrs[ROCKER_TLV_CMD_INFO])
1543 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1544 attrs[ROCKER_TLV_CMD_INFO]);
1545 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1549 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1550 str = rocker_tlv_data(attr);
1552 /* make sure name only contains alphanumeric characters */
1553 for (i = j = 0; i < len; ++i) {
1554 if (isalnum(str[i])) {
1555 name->buf[j] = str[i];
1563 name->buf[j] = '\0';
1569 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1570 struct rocker_desc_info *desc_info,
1573 struct ethtool_cmd *ecmd = priv;
1574 struct rocker_tlv *cmd_info;
1576 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1577 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1579 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1582 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1583 rocker_port->pport))
1585 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1586 ethtool_cmd_speed(ecmd)))
1588 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1591 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1594 rocker_tlv_nest_end(desc_info, cmd_info);
1599 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1600 struct rocker_desc_info *desc_info,
1603 const unsigned char *macaddr = priv;
1604 struct rocker_tlv *cmd_info;
1606 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1607 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1609 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1612 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1613 rocker_port->pport))
1615 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1618 rocker_tlv_nest_end(desc_info, cmd_info);
1623 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1624 struct rocker_desc_info *desc_info,
1627 int mtu = *(int *)priv;
1628 struct rocker_tlv *cmd_info;
1630 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1631 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1633 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1636 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1637 rocker_port->pport))
1639 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1642 rocker_tlv_nest_end(desc_info, cmd_info);
1647 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1648 struct rocker_desc_info *desc_info,
1651 struct rocker_tlv *cmd_info;
1653 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1654 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1656 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1659 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1660 rocker_port->pport))
1662 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1663 !!(rocker_port->brport_flags & BR_LEARNING)))
1665 rocker_tlv_nest_end(desc_info, cmd_info);
1669 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1670 struct ethtool_cmd *ecmd)
1672 return rocker_cmd_exec(rocker_port, NULL, 0,
1673 rocker_cmd_get_port_settings_prep, NULL,
1674 rocker_cmd_get_port_settings_ethtool_proc,
1678 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1679 unsigned char *macaddr)
1681 return rocker_cmd_exec(rocker_port, NULL, 0,
1682 rocker_cmd_get_port_settings_prep, NULL,
1683 rocker_cmd_get_port_settings_macaddr_proc,
1687 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1690 return rocker_cmd_exec(rocker_port, NULL, 0,
1691 rocker_cmd_get_port_settings_prep, NULL,
1692 rocker_cmd_get_port_settings_mode_proc, p_mode);
1695 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1696 struct ethtool_cmd *ecmd)
1698 return rocker_cmd_exec(rocker_port, NULL, 0,
1699 rocker_cmd_set_port_settings_ethtool_prep,
1703 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1704 unsigned char *macaddr)
1706 return rocker_cmd_exec(rocker_port, NULL, 0,
1707 rocker_cmd_set_port_settings_macaddr_prep,
1708 macaddr, NULL, NULL);
1711 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1714 return rocker_cmd_exec(rocker_port, NULL, 0,
1715 rocker_cmd_set_port_settings_mtu_prep,
1719 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1720 struct switchdev_trans *trans)
1722 return rocker_cmd_exec(rocker_port, trans, 0,
1723 rocker_cmd_set_port_learning_prep,
1728 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1729 const struct rocker_flow_tbl_entry *entry)
1731 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1732 entry->key.ig_port.in_pport))
1734 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1735 entry->key.ig_port.in_pport_mask))
1737 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1738 entry->key.ig_port.goto_tbl))
1745 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1746 const struct rocker_flow_tbl_entry *entry)
1748 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1749 entry->key.vlan.in_pport))
1751 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1752 entry->key.vlan.vlan_id))
1754 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1755 entry->key.vlan.vlan_id_mask))
1757 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1758 entry->key.vlan.goto_tbl))
1760 if (entry->key.vlan.untagged &&
1761 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1762 entry->key.vlan.new_vlan_id))
1769 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1770 const struct rocker_flow_tbl_entry *entry)
1772 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1773 entry->key.term_mac.in_pport))
1775 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1776 entry->key.term_mac.in_pport_mask))
1778 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1779 entry->key.term_mac.eth_type))
1781 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1782 ETH_ALEN, entry->key.term_mac.eth_dst))
1784 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1785 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1787 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1788 entry->key.term_mac.vlan_id))
1790 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1791 entry->key.term_mac.vlan_id_mask))
1793 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1794 entry->key.term_mac.goto_tbl))
1796 if (entry->key.term_mac.copy_to_cpu &&
1797 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1798 entry->key.term_mac.copy_to_cpu))
1805 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1806 const struct rocker_flow_tbl_entry *entry)
1808 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1809 entry->key.ucast_routing.eth_type))
1811 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1812 entry->key.ucast_routing.dst4))
1814 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1815 entry->key.ucast_routing.dst4_mask))
1817 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1818 entry->key.ucast_routing.goto_tbl))
1820 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1821 entry->key.ucast_routing.group_id))
1828 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1829 const struct rocker_flow_tbl_entry *entry)
1831 if (entry->key.bridge.has_eth_dst &&
1832 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1833 ETH_ALEN, entry->key.bridge.eth_dst))
1835 if (entry->key.bridge.has_eth_dst_mask &&
1836 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1837 ETH_ALEN, entry->key.bridge.eth_dst_mask))
1839 if (entry->key.bridge.vlan_id &&
1840 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1841 entry->key.bridge.vlan_id))
1843 if (entry->key.bridge.tunnel_id &&
1844 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1845 entry->key.bridge.tunnel_id))
1847 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1848 entry->key.bridge.goto_tbl))
1850 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1851 entry->key.bridge.group_id))
1853 if (entry->key.bridge.copy_to_cpu &&
1854 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1855 entry->key.bridge.copy_to_cpu))
1862 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1863 const struct rocker_flow_tbl_entry *entry)
1865 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1866 entry->key.acl.in_pport))
1868 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1869 entry->key.acl.in_pport_mask))
1871 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1872 ETH_ALEN, entry->key.acl.eth_src))
1874 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1875 ETH_ALEN, entry->key.acl.eth_src_mask))
1877 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1878 ETH_ALEN, entry->key.acl.eth_dst))
1880 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1881 ETH_ALEN, entry->key.acl.eth_dst_mask))
1883 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1884 entry->key.acl.eth_type))
1886 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1887 entry->key.acl.vlan_id))
1889 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1890 entry->key.acl.vlan_id_mask))
1893 switch (ntohs(entry->key.acl.eth_type)) {
1896 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1897 entry->key.acl.ip_proto))
1899 if (rocker_tlv_put_u8(desc_info,
1900 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1901 entry->key.acl.ip_proto_mask))
1903 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1904 entry->key.acl.ip_tos & 0x3f))
1906 if (rocker_tlv_put_u8(desc_info,
1907 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1908 entry->key.acl.ip_tos_mask & 0x3f))
1910 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1911 (entry->key.acl.ip_tos & 0xc0) >> 6))
1913 if (rocker_tlv_put_u8(desc_info,
1914 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1915 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1920 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1921 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1922 entry->key.acl.group_id))
1928 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
1929 struct rocker_desc_info *desc_info,
1932 const struct rocker_flow_tbl_entry *entry = priv;
1933 struct rocker_tlv *cmd_info;
1936 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
1938 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1941 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1944 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1945 entry->key.priority))
1947 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1949 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1953 switch (entry->key.tbl_id) {
1954 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1955 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1957 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1958 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1960 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1961 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1963 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1964 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1966 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1967 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1969 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1970 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1980 rocker_tlv_nest_end(desc_info, cmd_info);
1985 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
1986 struct rocker_desc_info *desc_info,
1989 const struct rocker_flow_tbl_entry *entry = priv;
1990 struct rocker_tlv *cmd_info;
1992 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
1994 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1997 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2000 rocker_tlv_nest_end(desc_info, cmd_info);
2006 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2007 struct rocker_group_tbl_entry *entry)
2009 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2010 ROCKER_GROUP_PORT_GET(entry->group_id)))
2012 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2013 entry->l2_interface.pop_vlan))
2020 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2021 const struct rocker_group_tbl_entry *entry)
2023 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2024 entry->l2_rewrite.group_id))
2026 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2027 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2028 ETH_ALEN, entry->l2_rewrite.eth_src))
2030 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2031 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2032 ETH_ALEN, entry->l2_rewrite.eth_dst))
2034 if (entry->l2_rewrite.vlan_id &&
2035 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2036 entry->l2_rewrite.vlan_id))
2043 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2044 const struct rocker_group_tbl_entry *entry)
2047 struct rocker_tlv *group_ids;
2049 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2050 entry->group_count))
2053 group_ids = rocker_tlv_nest_start(desc_info,
2054 ROCKER_TLV_OF_DPA_GROUP_IDS);
2058 for (i = 0; i < entry->group_count; i++)
2059 /* Note TLV array is 1-based */
2060 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2063 rocker_tlv_nest_end(desc_info, group_ids);
2069 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2070 const struct rocker_group_tbl_entry *entry)
2072 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2073 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2074 ETH_ALEN, entry->l3_unicast.eth_src))
2076 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2077 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2078 ETH_ALEN, entry->l3_unicast.eth_dst))
2080 if (entry->l3_unicast.vlan_id &&
2081 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2082 entry->l3_unicast.vlan_id))
2084 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2085 entry->l3_unicast.ttl_check))
2087 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2088 entry->l3_unicast.group_id))
2094 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2095 struct rocker_desc_info *desc_info,
2098 struct rocker_group_tbl_entry *entry = priv;
2099 struct rocker_tlv *cmd_info;
2102 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2104 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2108 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2112 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2113 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2114 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2116 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2117 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2119 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2120 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2121 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2123 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2124 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2134 rocker_tlv_nest_end(desc_info, cmd_info);
2139 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2140 struct rocker_desc_info *desc_info,
2143 const struct rocker_group_tbl_entry *entry = priv;
2144 struct rocker_tlv *cmd_info;
2146 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2148 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2151 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2154 rocker_tlv_nest_end(desc_info, cmd_info);
2159 /***************************************************
2160 * Flow, group, FDB, internal VLAN and neigh tables
2161 ***************************************************/
2163 static int rocker_init_tbls(struct rocker *rocker)
2165 hash_init(rocker->flow_tbl);
2166 spin_lock_init(&rocker->flow_tbl_lock);
2168 hash_init(rocker->group_tbl);
2169 spin_lock_init(&rocker->group_tbl_lock);
2171 hash_init(rocker->fdb_tbl);
2172 spin_lock_init(&rocker->fdb_tbl_lock);
2174 hash_init(rocker->internal_vlan_tbl);
2175 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2177 hash_init(rocker->neigh_tbl);
2178 spin_lock_init(&rocker->neigh_tbl_lock);
2183 static void rocker_free_tbls(struct rocker *rocker)
2185 unsigned long flags;
2186 struct rocker_flow_tbl_entry *flow_entry;
2187 struct rocker_group_tbl_entry *group_entry;
2188 struct rocker_fdb_tbl_entry *fdb_entry;
2189 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2190 struct rocker_neigh_tbl_entry *neigh_entry;
2191 struct hlist_node *tmp;
2194 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2195 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2196 hash_del(&flow_entry->entry);
2197 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2199 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2200 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2201 hash_del(&group_entry->entry);
2202 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2204 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2205 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2206 hash_del(&fdb_entry->entry);
2207 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2209 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2210 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2211 tmp, internal_vlan_entry, entry)
2212 hash_del(&internal_vlan_entry->entry);
2213 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2215 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2216 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2217 hash_del(&neigh_entry->entry);
2218 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2221 static struct rocker_flow_tbl_entry *
2222 rocker_flow_tbl_find(const struct rocker *rocker,
2223 const struct rocker_flow_tbl_entry *match)
2225 struct rocker_flow_tbl_entry *found;
2226 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2228 hash_for_each_possible(rocker->flow_tbl, found,
2229 entry, match->key_crc32) {
2230 if (memcmp(&found->key, &match->key, key_len) == 0)
2237 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2238 struct switchdev_trans *trans, int flags,
2239 struct rocker_flow_tbl_entry *match)
2241 struct rocker *rocker = rocker_port->rocker;
2242 struct rocker_flow_tbl_entry *found;
2243 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2244 unsigned long lock_flags;
2246 match->key_crc32 = crc32(~0, &match->key, key_len);
2248 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2250 found = rocker_flow_tbl_find(rocker, match);
2253 match->cookie = found->cookie;
2254 if (!switchdev_trans_ph_prepare(trans))
2255 hash_del(&found->entry);
2256 rocker_kfree(trans, found);
2258 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2261 found->cookie = rocker->flow_tbl_next_cookie++;
2262 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2265 if (!switchdev_trans_ph_prepare(trans))
2266 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2268 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2270 return rocker_cmd_exec(rocker_port, trans, flags,
2271 rocker_cmd_flow_tbl_add, found, NULL, NULL);
2274 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2275 struct switchdev_trans *trans, int flags,
2276 struct rocker_flow_tbl_entry *match)
2278 struct rocker *rocker = rocker_port->rocker;
2279 struct rocker_flow_tbl_entry *found;
2280 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2281 unsigned long lock_flags;
2284 match->key_crc32 = crc32(~0, &match->key, key_len);
2286 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2288 found = rocker_flow_tbl_find(rocker, match);
2291 if (!switchdev_trans_ph_prepare(trans))
2292 hash_del(&found->entry);
2293 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2296 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2298 rocker_kfree(trans, match);
2301 err = rocker_cmd_exec(rocker_port, trans, flags,
2302 rocker_cmd_flow_tbl_del,
2304 rocker_kfree(trans, found);
2310 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2311 struct switchdev_trans *trans, int flags,
2312 struct rocker_flow_tbl_entry *entry)
2314 if (flags & ROCKER_OP_FLAG_REMOVE)
2315 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2317 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2320 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2321 struct switchdev_trans *trans, int flags,
2322 u32 in_pport, u32 in_pport_mask,
2323 enum rocker_of_dpa_table_id goto_tbl)
2325 struct rocker_flow_tbl_entry *entry;
2327 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2331 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2332 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2333 entry->key.ig_port.in_pport = in_pport;
2334 entry->key.ig_port.in_pport_mask = in_pport_mask;
2335 entry->key.ig_port.goto_tbl = goto_tbl;
2337 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2340 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2341 struct switchdev_trans *trans, int flags,
2342 u32 in_pport, __be16 vlan_id,
2343 __be16 vlan_id_mask,
2344 enum rocker_of_dpa_table_id goto_tbl,
2345 bool untagged, __be16 new_vlan_id)
2347 struct rocker_flow_tbl_entry *entry;
2349 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2353 entry->key.priority = ROCKER_PRIORITY_VLAN;
2354 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2355 entry->key.vlan.in_pport = in_pport;
2356 entry->key.vlan.vlan_id = vlan_id;
2357 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2358 entry->key.vlan.goto_tbl = goto_tbl;
2360 entry->key.vlan.untagged = untagged;
2361 entry->key.vlan.new_vlan_id = new_vlan_id;
2363 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2366 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2367 struct switchdev_trans *trans,
2368 u32 in_pport, u32 in_pport_mask,
2369 __be16 eth_type, const u8 *eth_dst,
2370 const u8 *eth_dst_mask, __be16 vlan_id,
2371 __be16 vlan_id_mask, bool copy_to_cpu,
2374 struct rocker_flow_tbl_entry *entry;
2376 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2380 if (is_multicast_ether_addr(eth_dst)) {
2381 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2382 entry->key.term_mac.goto_tbl =
2383 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2385 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2386 entry->key.term_mac.goto_tbl =
2387 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2390 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2391 entry->key.term_mac.in_pport = in_pport;
2392 entry->key.term_mac.in_pport_mask = in_pport_mask;
2393 entry->key.term_mac.eth_type = eth_type;
2394 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2395 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2396 entry->key.term_mac.vlan_id = vlan_id;
2397 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2398 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2400 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2403 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2404 struct switchdev_trans *trans, int flags,
2405 const u8 *eth_dst, const u8 *eth_dst_mask,
2406 __be16 vlan_id, u32 tunnel_id,
2407 enum rocker_of_dpa_table_id goto_tbl,
2408 u32 group_id, bool copy_to_cpu)
2410 struct rocker_flow_tbl_entry *entry;
2412 bool vlan_bridging = !!vlan_id;
2413 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2416 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2420 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2423 entry->key.bridge.has_eth_dst = 1;
2424 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2427 entry->key.bridge.has_eth_dst_mask = 1;
2428 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2429 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2433 priority = ROCKER_PRIORITY_UNKNOWN;
2434 if (vlan_bridging && dflt && wild)
2435 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2436 else if (vlan_bridging && dflt && !wild)
2437 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2438 else if (vlan_bridging && !dflt)
2439 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2440 else if (!vlan_bridging && dflt && wild)
2441 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2442 else if (!vlan_bridging && dflt && !wild)
2443 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2444 else if (!vlan_bridging && !dflt)
2445 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2447 entry->key.priority = priority;
2448 entry->key.bridge.vlan_id = vlan_id;
2449 entry->key.bridge.tunnel_id = tunnel_id;
2450 entry->key.bridge.goto_tbl = goto_tbl;
2451 entry->key.bridge.group_id = group_id;
2452 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2454 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2457 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2458 struct switchdev_trans *trans,
2459 __be16 eth_type, __be32 dst,
2460 __be32 dst_mask, u32 priority,
2461 enum rocker_of_dpa_table_id goto_tbl,
2462 u32 group_id, int flags)
2464 struct rocker_flow_tbl_entry *entry;
2466 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2470 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2471 entry->key.priority = priority;
2472 entry->key.ucast_routing.eth_type = eth_type;
2473 entry->key.ucast_routing.dst4 = dst;
2474 entry->key.ucast_routing.dst4_mask = dst_mask;
2475 entry->key.ucast_routing.goto_tbl = goto_tbl;
2476 entry->key.ucast_routing.group_id = group_id;
2477 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2478 ucast_routing.group_id);
2480 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2483 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2484 struct switchdev_trans *trans, int flags,
2485 u32 in_pport, u32 in_pport_mask,
2486 const u8 *eth_src, const u8 *eth_src_mask,
2487 const u8 *eth_dst, const u8 *eth_dst_mask,
2488 __be16 eth_type, __be16 vlan_id,
2489 __be16 vlan_id_mask, u8 ip_proto,
2490 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2494 struct rocker_flow_tbl_entry *entry;
2496 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2500 priority = ROCKER_PRIORITY_ACL_NORMAL;
2501 if (eth_dst && eth_dst_mask) {
2502 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2503 priority = ROCKER_PRIORITY_ACL_DFLT;
2504 else if (is_link_local_ether_addr(eth_dst))
2505 priority = ROCKER_PRIORITY_ACL_CTRL;
2508 entry->key.priority = priority;
2509 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2510 entry->key.acl.in_pport = in_pport;
2511 entry->key.acl.in_pport_mask = in_pport_mask;
2514 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2516 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2518 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2520 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2522 entry->key.acl.eth_type = eth_type;
2523 entry->key.acl.vlan_id = vlan_id;
2524 entry->key.acl.vlan_id_mask = vlan_id_mask;
2525 entry->key.acl.ip_proto = ip_proto;
2526 entry->key.acl.ip_proto_mask = ip_proto_mask;
2527 entry->key.acl.ip_tos = ip_tos;
2528 entry->key.acl.ip_tos_mask = ip_tos_mask;
2529 entry->key.acl.group_id = group_id;
2531 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2534 static struct rocker_group_tbl_entry *
2535 rocker_group_tbl_find(const struct rocker *rocker,
2536 const struct rocker_group_tbl_entry *match)
2538 struct rocker_group_tbl_entry *found;
2540 hash_for_each_possible(rocker->group_tbl, found,
2541 entry, match->group_id) {
2542 if (found->group_id == match->group_id)
2549 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2550 struct rocker_group_tbl_entry *entry)
2552 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2553 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2554 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2555 rocker_kfree(trans, entry->group_ids);
2560 rocker_kfree(trans, entry);
2563 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2564 struct switchdev_trans *trans, int flags,
2565 struct rocker_group_tbl_entry *match)
2567 struct rocker *rocker = rocker_port->rocker;
2568 struct rocker_group_tbl_entry *found;
2569 unsigned long lock_flags;
2571 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2573 found = rocker_group_tbl_find(rocker, match);
2576 if (!switchdev_trans_ph_prepare(trans))
2577 hash_del(&found->entry);
2578 rocker_group_tbl_entry_free(trans, found);
2580 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2583 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2586 if (!switchdev_trans_ph_prepare(trans))
2587 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2589 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2591 return rocker_cmd_exec(rocker_port, trans, flags,
2592 rocker_cmd_group_tbl_add, found, NULL, NULL);
2595 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2596 struct switchdev_trans *trans, int flags,
2597 struct rocker_group_tbl_entry *match)
2599 struct rocker *rocker = rocker_port->rocker;
2600 struct rocker_group_tbl_entry *found;
2601 unsigned long lock_flags;
2604 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2606 found = rocker_group_tbl_find(rocker, match);
2609 if (!switchdev_trans_ph_prepare(trans))
2610 hash_del(&found->entry);
2611 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2614 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2616 rocker_group_tbl_entry_free(trans, match);
2619 err = rocker_cmd_exec(rocker_port, trans, flags,
2620 rocker_cmd_group_tbl_del,
2622 rocker_group_tbl_entry_free(trans, found);
2628 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2629 struct switchdev_trans *trans, int flags,
2630 struct rocker_group_tbl_entry *entry)
2632 if (flags & ROCKER_OP_FLAG_REMOVE)
2633 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2635 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2638 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2639 struct switchdev_trans *trans, int flags,
2640 __be16 vlan_id, u32 out_pport,
2643 struct rocker_group_tbl_entry *entry;
2645 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2649 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2650 entry->l2_interface.pop_vlan = pop_vlan;
2652 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2655 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2656 struct switchdev_trans *trans,
2657 int flags, u8 group_count,
2658 const u32 *group_ids, u32 group_id)
2660 struct rocker_group_tbl_entry *entry;
2662 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2666 entry->group_id = group_id;
2667 entry->group_count = group_count;
2669 entry->group_ids = rocker_kcalloc(trans, flags,
2670 group_count, sizeof(u32));
2671 if (!entry->group_ids) {
2672 rocker_kfree(trans, entry);
2675 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2677 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2680 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2681 struct switchdev_trans *trans, int flags,
2682 __be16 vlan_id, u8 group_count,
2683 const u32 *group_ids, u32 group_id)
2685 return rocker_group_l2_fan_out(rocker_port, trans, flags,
2686 group_count, group_ids,
2690 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2691 struct switchdev_trans *trans, int flags,
2692 u32 index, const u8 *src_mac, const u8 *dst_mac,
2693 __be16 vlan_id, bool ttl_check, u32 pport)
2695 struct rocker_group_tbl_entry *entry;
2697 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2701 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2703 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2705 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2706 entry->l3_unicast.vlan_id = vlan_id;
2707 entry->l3_unicast.ttl_check = ttl_check;
2708 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2710 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2713 static struct rocker_neigh_tbl_entry *
2714 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2716 struct rocker_neigh_tbl_entry *found;
2718 hash_for_each_possible(rocker->neigh_tbl, found,
2719 entry, be32_to_cpu(ip_addr))
2720 if (found->ip_addr == ip_addr)
2726 static void _rocker_neigh_add(struct rocker *rocker,
2727 struct switchdev_trans *trans,
2728 struct rocker_neigh_tbl_entry *entry)
2730 if (!switchdev_trans_ph_commit(trans))
2731 entry->index = rocker->neigh_tbl_next_index++;
2732 if (switchdev_trans_ph_prepare(trans))
2735 hash_add(rocker->neigh_tbl, &entry->entry,
2736 be32_to_cpu(entry->ip_addr));
2739 static void _rocker_neigh_del(struct switchdev_trans *trans,
2740 struct rocker_neigh_tbl_entry *entry)
2742 if (switchdev_trans_ph_prepare(trans))
2744 if (--entry->ref_count == 0) {
2745 hash_del(&entry->entry);
2746 rocker_kfree(trans, entry);
2750 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2751 struct switchdev_trans *trans,
2752 const u8 *eth_dst, bool ttl_check)
2755 ether_addr_copy(entry->eth_dst, eth_dst);
2756 entry->ttl_check = ttl_check;
2757 } else if (!switchdev_trans_ph_prepare(trans)) {
2762 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2763 struct switchdev_trans *trans,
2764 int flags, __be32 ip_addr, const u8 *eth_dst)
2766 struct rocker *rocker = rocker_port->rocker;
2767 struct rocker_neigh_tbl_entry *entry;
2768 struct rocker_neigh_tbl_entry *found;
2769 unsigned long lock_flags;
2770 __be16 eth_type = htons(ETH_P_IP);
2771 enum rocker_of_dpa_table_id goto_tbl =
2772 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2775 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2780 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2784 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2786 found = rocker_neigh_tbl_find(rocker, ip_addr);
2788 updating = found && adding;
2789 removing = found && !adding;
2790 adding = !found && adding;
2793 entry->ip_addr = ip_addr;
2794 entry->dev = rocker_port->dev;
2795 ether_addr_copy(entry->eth_dst, eth_dst);
2796 entry->ttl_check = true;
2797 _rocker_neigh_add(rocker, trans, entry);
2798 } else if (removing) {
2799 memcpy(entry, found, sizeof(*entry));
2800 _rocker_neigh_del(trans, found);
2801 } else if (updating) {
2802 _rocker_neigh_update(found, trans, eth_dst, true);
2803 memcpy(entry, found, sizeof(*entry));
2808 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2813 /* For each active neighbor, we have an L3 unicast group and
2814 * a /32 route to the neighbor, which uses the L3 unicast
2815 * group. The L3 unicast group can also be referred to by
2816 * other routes' nexthops.
2819 err = rocker_group_l3_unicast(rocker_port, trans, flags,
2821 rocker_port->dev->dev_addr,
2823 rocker_port->internal_vlan_id,
2825 rocker_port->pport);
2827 netdev_err(rocker_port->dev,
2828 "Error (%d) L3 unicast group index %d\n",
2833 if (adding || removing) {
2834 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
2835 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
2842 netdev_err(rocker_port->dev,
2843 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
2844 err, &entry->ip_addr, group_id);
2849 rocker_kfree(trans, entry);
2854 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2855 struct switchdev_trans *trans,
2858 struct net_device *dev = rocker_port->dev;
2859 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
2863 n = neigh_create(&arp_tbl, &ip_addr, dev);
2868 /* If the neigh is already resolved, then go ahead and
2869 * install the entry, otherwise start the ARP process to
2870 * resolve the neigh.
2873 if (n->nud_state & NUD_VALID)
2874 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
2877 neigh_event_send(n, NULL);
2883 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
2884 struct switchdev_trans *trans, int flags,
2885 __be32 ip_addr, u32 *index)
2887 struct rocker *rocker = rocker_port->rocker;
2888 struct rocker_neigh_tbl_entry *entry;
2889 struct rocker_neigh_tbl_entry *found;
2890 unsigned long lock_flags;
2891 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2894 bool resolved = true;
2897 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2901 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2903 found = rocker_neigh_tbl_find(rocker, ip_addr);
2905 *index = found->index;
2907 updating = found && adding;
2908 removing = found && !adding;
2909 adding = !found && adding;
2912 entry->ip_addr = ip_addr;
2913 entry->dev = rocker_port->dev;
2914 _rocker_neigh_add(rocker, trans, entry);
2915 *index = entry->index;
2917 } else if (removing) {
2918 _rocker_neigh_del(trans, found);
2919 } else if (updating) {
2920 _rocker_neigh_update(found, trans, NULL, false);
2921 resolved = !is_zero_ether_addr(found->eth_dst);
2926 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2929 rocker_kfree(trans, entry);
2934 /* Resolved means neigh ip_addr is resolved to neigh mac. */
2937 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
2942 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2943 struct switchdev_trans *trans,
2944 int flags, __be16 vlan_id)
2946 struct rocker_port *p;
2947 const struct rocker *rocker = rocker_port->rocker;
2948 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2954 group_ids = rocker_kcalloc(trans, flags,
2955 rocker->port_count, sizeof(u32));
2959 /* Adjust the flood group for this VLAN. The flood group
2960 * references an L2 interface group for each port in this
2964 for (i = 0; i < rocker->port_count; i++) {
2965 p = rocker->ports[i];
2968 if (!rocker_port_is_bridged(p))
2970 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2971 group_ids[group_count++] =
2972 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
2976 /* If there are no bridged ports in this VLAN, we're done */
2977 if (group_count == 0)
2978 goto no_ports_in_vlan;
2980 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
2981 group_count, group_ids, group_id);
2983 netdev_err(rocker_port->dev,
2984 "Error (%d) port VLAN l2 flood group\n", err);
2987 rocker_kfree(trans, group_ids);
2991 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2992 struct switchdev_trans *trans, int flags,
2993 __be16 vlan_id, bool pop_vlan)
2995 const struct rocker *rocker = rocker_port->rocker;
2996 struct rocker_port *p;
2997 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3003 /* An L2 interface group for this port in this VLAN, but
3004 * only when port STP state is LEARNING|FORWARDING.
3007 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3008 rocker_port->stp_state == BR_STATE_FORWARDING) {
3009 out_pport = rocker_port->pport;
3010 err = rocker_group_l2_interface(rocker_port, trans, flags,
3011 vlan_id, out_pport, pop_vlan);
3013 netdev_err(rocker_port->dev,
3014 "Error (%d) port VLAN l2 group for pport %d\n",
3020 /* An L2 interface group for this VLAN to CPU port.
3021 * Add when first port joins this VLAN and destroy when
3022 * last port leaves this VLAN.
3025 for (i = 0; i < rocker->port_count; i++) {
3026 p = rocker->ports[i];
3027 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3031 if ((!adding || ref != 1) && (adding || ref != 0))
3035 err = rocker_group_l2_interface(rocker_port, trans, flags,
3036 vlan_id, out_pport, pop_vlan);
3038 netdev_err(rocker_port->dev,
3039 "Error (%d) port VLAN l2 group for CPU port\n", err);
3046 static struct rocker_ctrl {
3048 const u8 *eth_dst_mask;
3054 } rocker_ctrls[] = {
3055 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3056 /* pass link local multicast pkts up to CPU for filtering */
3058 .eth_dst_mask = ll_mask,
3061 [ROCKER_CTRL_LOCAL_ARP] = {
3062 /* pass local ARP pkts up to CPU */
3063 .eth_dst = zero_mac,
3064 .eth_dst_mask = zero_mac,
3065 .eth_type = htons(ETH_P_ARP),
3068 [ROCKER_CTRL_IPV4_MCAST] = {
3069 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3070 .eth_dst = ipv4_mcast,
3071 .eth_dst_mask = ipv4_mask,
3072 .eth_type = htons(ETH_P_IP),
3074 .copy_to_cpu = true,
3076 [ROCKER_CTRL_IPV6_MCAST] = {
3077 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3078 .eth_dst = ipv6_mcast,
3079 .eth_dst_mask = ipv6_mask,
3080 .eth_type = htons(ETH_P_IPV6),
3082 .copy_to_cpu = true,
3084 [ROCKER_CTRL_DFLT_BRIDGING] = {
3085 /* flood any pkts on vlan */
3087 .copy_to_cpu = true,
3089 [ROCKER_CTRL_DFLT_OVS] = {
3090 /* pass all pkts up to CPU */
3091 .eth_dst = zero_mac,
3092 .eth_dst_mask = zero_mac,
3097 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3098 struct switchdev_trans *trans, int flags,
3099 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3101 u32 in_pport = rocker_port->pport;
3102 u32 in_pport_mask = 0xffffffff;
3104 const u8 *eth_src = NULL;
3105 const u8 *eth_src_mask = NULL;
3106 __be16 vlan_id_mask = htons(0xffff);
3108 u8 ip_proto_mask = 0;
3111 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3114 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3115 in_pport, in_pport_mask,
3116 eth_src, eth_src_mask,
3117 ctrl->eth_dst, ctrl->eth_dst_mask,
3119 vlan_id, vlan_id_mask,
3120 ip_proto, ip_proto_mask,
3121 ip_tos, ip_tos_mask,
3125 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3130 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3131 struct switchdev_trans *trans,
3133 const struct rocker_ctrl *ctrl,
3136 enum rocker_of_dpa_table_id goto_tbl =
3137 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3138 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3142 if (!rocker_port_is_bridged(rocker_port))
3145 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3146 ctrl->eth_dst, ctrl->eth_dst_mask,
3148 goto_tbl, group_id, ctrl->copy_to_cpu);
3151 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3156 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3157 struct switchdev_trans *trans, int flags,
3158 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3160 u32 in_pport_mask = 0xffffffff;
3161 __be16 vlan_id_mask = htons(0xffff);
3164 if (ntohs(vlan_id) == 0)
3165 vlan_id = rocker_port->internal_vlan_id;
3167 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3168 rocker_port->pport, in_pport_mask,
3169 ctrl->eth_type, ctrl->eth_dst,
3170 ctrl->eth_dst_mask, vlan_id,
3171 vlan_id_mask, ctrl->copy_to_cpu,
3175 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3180 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3181 struct switchdev_trans *trans, int flags,
3182 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3185 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3188 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3192 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3198 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3199 struct switchdev_trans *trans, int flags,
3205 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3206 if (rocker_port->ctrls[i]) {
3207 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3208 &rocker_ctrls[i], vlan_id);
3217 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3218 struct switchdev_trans *trans, int flags,
3219 const struct rocker_ctrl *ctrl)
3224 for (vid = 1; vid < VLAN_N_VID; vid++) {
3225 if (!test_bit(vid, rocker_port->vlan_bitmap))
3227 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3236 static int rocker_port_vlan(struct rocker_port *rocker_port,
3237 struct switchdev_trans *trans, int flags, u16 vid)
3239 enum rocker_of_dpa_table_id goto_tbl =
3240 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3241 u32 in_pport = rocker_port->pport;
3242 __be16 vlan_id = htons(vid);
3243 __be16 vlan_id_mask = htons(0xffff);
3244 __be16 internal_vlan_id;
3246 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3249 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3251 if (adding && test_bit(ntohs(internal_vlan_id),
3252 rocker_port->vlan_bitmap))
3253 return 0; /* already added */
3254 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3255 rocker_port->vlan_bitmap))
3256 return 0; /* already removed */
3258 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3261 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3264 netdev_err(rocker_port->dev,
3265 "Error (%d) port ctrl vlan add\n", err);
3270 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3271 internal_vlan_id, untagged);
3273 netdev_err(rocker_port->dev,
3274 "Error (%d) port VLAN l2 groups\n", err);
3278 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3281 netdev_err(rocker_port->dev,
3282 "Error (%d) port VLAN l2 flood group\n", err);
3286 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3287 in_pport, vlan_id, vlan_id_mask,
3288 goto_tbl, untagged, internal_vlan_id);
3290 netdev_err(rocker_port->dev,
3291 "Error (%d) port VLAN table\n", err);
3294 if (switchdev_trans_ph_prepare(trans))
3295 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3300 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3301 struct switchdev_trans *trans, int flags)
3303 enum rocker_of_dpa_table_id goto_tbl;
3308 /* Normal Ethernet Frames. Matches pkts from any local physical
3309 * ports. Goto VLAN tbl.
3313 in_pport_mask = 0xffff0000;
3314 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3316 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3317 in_pport, in_pport_mask,
3320 netdev_err(rocker_port->dev,
3321 "Error (%d) ingress port table entry\n", err);
3326 struct rocker_fdb_learn_work {
3327 struct work_struct work;
3328 struct rocker_port *rocker_port;
3329 struct switchdev_trans *trans;
3335 static void rocker_port_fdb_learn_work(struct work_struct *work)
3337 const struct rocker_fdb_learn_work *lw =
3338 container_of(work, struct rocker_fdb_learn_work, work);
3339 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3340 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3341 struct switchdev_notifier_fdb_info info;
3343 info.addr = lw->addr;
3347 if (learned && removing)
3348 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3349 lw->rocker_port->dev, &info.info);
3350 else if (learned && !removing)
3351 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3352 lw->rocker_port->dev, &info.info);
3355 rocker_kfree(lw->trans, work);
3358 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3359 struct switchdev_trans *trans, int flags,
3360 const u8 *addr, __be16 vlan_id)
3362 struct rocker_fdb_learn_work *lw;
3363 enum rocker_of_dpa_table_id goto_tbl =
3364 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3365 u32 out_pport = rocker_port->pport;
3367 u32 group_id = ROCKER_GROUP_NONE;
3368 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3369 bool copy_to_cpu = false;
3372 if (rocker_port_is_bridged(rocker_port))
3373 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3375 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3376 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3377 NULL, vlan_id, tunnel_id, goto_tbl,
3378 group_id, copy_to_cpu);
3386 if (!rocker_port_is_bridged(rocker_port))
3389 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3393 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3395 lw->rocker_port = rocker_port;
3398 ether_addr_copy(lw->addr, addr);
3399 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3401 if (switchdev_trans_ph_prepare(trans))
3402 rocker_kfree(trans, lw);
3404 schedule_work(&lw->work);
3409 static struct rocker_fdb_tbl_entry *
3410 rocker_fdb_tbl_find(const struct rocker *rocker,
3411 const struct rocker_fdb_tbl_entry *match)
3413 struct rocker_fdb_tbl_entry *found;
3415 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3416 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3422 static int rocker_port_fdb(struct rocker_port *rocker_port,
3423 struct switchdev_trans *trans,
3424 const unsigned char *addr,
3425 __be16 vlan_id, int flags)
3427 struct rocker *rocker = rocker_port->rocker;
3428 struct rocker_fdb_tbl_entry *fdb;
3429 struct rocker_fdb_tbl_entry *found;
3430 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3431 unsigned long lock_flags;
3433 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3437 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3438 fdb->touched = jiffies;
3439 fdb->key.rocker_port = rocker_port;
3440 ether_addr_copy(fdb->key.addr, addr);
3441 fdb->key.vlan_id = vlan_id;
3442 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3444 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3446 found = rocker_fdb_tbl_find(rocker, fdb);
3449 found->touched = jiffies;
3451 rocker_kfree(trans, fdb);
3452 if (!switchdev_trans_ph_prepare(trans))
3453 hash_del(&found->entry);
3455 } else if (!removing) {
3456 if (!switchdev_trans_ph_prepare(trans))
3457 hash_add(rocker->fdb_tbl, &fdb->entry,
3461 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3463 /* Check if adding and already exists, or removing and can't find */
3464 if (!found != !removing) {
3465 rocker_kfree(trans, fdb);
3466 if (!found && removing)
3468 /* Refreshing existing to update aging timers */
3469 flags |= ROCKER_OP_FLAG_REFRESH;
3472 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3475 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3476 struct switchdev_trans *trans, int flags)
3478 struct rocker *rocker = rocker_port->rocker;
3479 struct rocker_fdb_tbl_entry *found;
3480 unsigned long lock_flags;
3481 struct hlist_node *tmp;
3485 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3486 rocker_port->stp_state == BR_STATE_FORWARDING)
3489 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3491 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3493 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3494 if (found->key.rocker_port != rocker_port)
3496 if (!found->learned)
3498 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3500 found->key.vlan_id);
3503 if (!switchdev_trans_ph_prepare(trans))
3504 hash_del(&found->entry);
3508 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3513 static void rocker_fdb_cleanup(unsigned long data)
3515 struct rocker *rocker = (struct rocker *)data;
3516 struct rocker_port *rocker_port;
3517 struct rocker_fdb_tbl_entry *entry;
3518 struct hlist_node *tmp;
3519 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3520 unsigned long expires;
3521 unsigned long lock_flags;
3522 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3523 ROCKER_OP_FLAG_LEARNED;
3526 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3528 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3529 if (!entry->learned)
3531 rocker_port = entry->key.rocker_port;
3532 expires = entry->touched + rocker_port->ageing_time;
3533 if (time_before_eq(expires, jiffies)) {
3534 rocker_port_fdb_learn(rocker_port, NULL,
3535 flags, entry->key.addr,
3536 entry->key.vlan_id);
3537 hash_del(&entry->entry);
3538 } else if (time_before(expires, next_timer)) {
3539 next_timer = expires;
3543 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3545 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3548 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3549 struct switchdev_trans *trans, int flags,
3552 u32 in_pport_mask = 0xffffffff;
3554 const u8 *dst_mac_mask = ff_mac;
3555 __be16 vlan_id_mask = htons(0xffff);
3556 bool copy_to_cpu = false;
3559 if (ntohs(vlan_id) == 0)
3560 vlan_id = rocker_port->internal_vlan_id;
3562 eth_type = htons(ETH_P_IP);
3563 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3564 rocker_port->pport, in_pport_mask,
3565 eth_type, rocker_port->dev->dev_addr,
3566 dst_mac_mask, vlan_id, vlan_id_mask,
3567 copy_to_cpu, flags);
3571 eth_type = htons(ETH_P_IPV6);
3572 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3573 rocker_port->pport, in_pport_mask,
3574 eth_type, rocker_port->dev->dev_addr,
3575 dst_mac_mask, vlan_id, vlan_id_mask,
3576 copy_to_cpu, flags);
3581 static int rocker_port_fwding(struct rocker_port *rocker_port,
3582 struct switchdev_trans *trans, int flags)
3590 /* Port will be forwarding-enabled if its STP state is LEARNING
3591 * or FORWARDING. Traffic from CPU can still egress, regardless of
3592 * port STP state. Use L2 interface group on port VLANs as a way
3593 * to toggle port forwarding: if forwarding is disabled, L2
3594 * interface group will not exist.
3597 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3598 rocker_port->stp_state != BR_STATE_FORWARDING)
3599 flags |= ROCKER_OP_FLAG_REMOVE;
3601 out_pport = rocker_port->pport;
3602 for (vid = 1; vid < VLAN_N_VID; vid++) {
3603 if (!test_bit(vid, rocker_port->vlan_bitmap))
3605 vlan_id = htons(vid);
3606 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3607 err = rocker_group_l2_interface(rocker_port, trans, flags,
3608 vlan_id, out_pport, pop_vlan);
3610 netdev_err(rocker_port->dev,
3611 "Error (%d) port VLAN l2 group for pport %d\n",
3620 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3621 struct switchdev_trans *trans, int flags,
3624 bool want[ROCKER_CTRL_MAX] = { 0, };
3625 bool prev_ctrls[ROCKER_CTRL_MAX];
3626 u8 uninitialized_var(prev_state);
3630 if (switchdev_trans_ph_prepare(trans)) {
3631 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3632 prev_state = rocker_port->stp_state;
3635 if (rocker_port->stp_state == state)
3638 rocker_port->stp_state = state;
3641 case BR_STATE_DISABLED:
3642 /* port is completely disabled */
3644 case BR_STATE_LISTENING:
3645 case BR_STATE_BLOCKING:
3646 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3648 case BR_STATE_LEARNING:
3649 case BR_STATE_FORWARDING:
3650 if (!rocker_port_is_ovsed(rocker_port))
3651 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3652 want[ROCKER_CTRL_IPV4_MCAST] = true;
3653 want[ROCKER_CTRL_IPV6_MCAST] = true;
3654 if (rocker_port_is_bridged(rocker_port))
3655 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3656 else if (rocker_port_is_ovsed(rocker_port))
3657 want[ROCKER_CTRL_DFLT_OVS] = true;
3659 want[ROCKER_CTRL_LOCAL_ARP] = true;
3663 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3664 if (want[i] != rocker_port->ctrls[i]) {
3665 int ctrl_flags = flags |
3666 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3667 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3671 rocker_port->ctrls[i] = want[i];
3675 err = rocker_port_fdb_flush(rocker_port, trans, flags);
3679 err = rocker_port_fwding(rocker_port, trans, flags);
3682 if (switchdev_trans_ph_prepare(trans)) {
3683 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3684 rocker_port->stp_state = prev_state;
3690 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3691 struct switchdev_trans *trans, int flags)
3693 if (rocker_port_is_bridged(rocker_port))
3694 /* bridge STP will enable port */
3697 /* port is not bridged, so simulate going to FORWARDING state */
3698 return rocker_port_stp_update(rocker_port, trans, flags,
3699 BR_STATE_FORWARDING);
3702 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3703 struct switchdev_trans *trans, int flags)
3705 if (rocker_port_is_bridged(rocker_port))
3706 /* bridge STP will disable port */
3709 /* port is not bridged, so simulate going to DISABLED state */
3710 return rocker_port_stp_update(rocker_port, trans, flags,
3714 static struct rocker_internal_vlan_tbl_entry *
3715 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3717 struct rocker_internal_vlan_tbl_entry *found;
3719 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3721 if (found->ifindex == ifindex)
3728 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3731 struct rocker *rocker = rocker_port->rocker;
3732 struct rocker_internal_vlan_tbl_entry *entry;
3733 struct rocker_internal_vlan_tbl_entry *found;
3734 unsigned long lock_flags;
3737 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3741 entry->ifindex = ifindex;
3743 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3745 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3752 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3754 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3755 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3757 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3761 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3765 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3767 return found->vlan_id;
3771 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3774 struct rocker *rocker = rocker_port->rocker;
3775 struct rocker_internal_vlan_tbl_entry *found;
3776 unsigned long lock_flags;
3779 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3781 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3783 netdev_err(rocker_port->dev,
3784 "ifindex (%d) not found in internal VLAN tbl\n",
3789 if (--found->ref_count <= 0) {
3790 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3791 clear_bit(bit, rocker->internal_vlan_bitmap);
3792 hash_del(&found->entry);
3797 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3800 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3801 struct switchdev_trans *trans, __be32 dst,
3802 int dst_len, const struct fib_info *fi,
3803 u32 tb_id, int flags)
3805 const struct fib_nh *nh;
3806 __be16 eth_type = htons(ETH_P_IP);
3807 __be32 dst_mask = inet_make_mask(dst_len);
3808 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3809 u32 priority = fi->fib_priority;
3810 enum rocker_of_dpa_table_id goto_tbl =
3811 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3818 /* XXX support ECMP */
3821 nh_on_port = (fi->fib_dev == rocker_port->dev);
3822 has_gw = !!nh->nh_gw;
3824 if (has_gw && nh_on_port) {
3825 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
3830 group_id = ROCKER_GROUP_L3_UNICAST(index);
3832 /* Send to CPU for processing */
3833 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3836 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
3837 dst_mask, priority, goto_tbl,
3840 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3850 static int rocker_port_open(struct net_device *dev)
3852 struct rocker_port *rocker_port = netdev_priv(dev);
3855 err = rocker_port_dma_rings_init(rocker_port);
3859 err = request_irq(rocker_msix_tx_vector(rocker_port),
3860 rocker_tx_irq_handler, 0,
3861 rocker_driver_name, rocker_port);
3863 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3864 goto err_request_tx_irq;
3867 err = request_irq(rocker_msix_rx_vector(rocker_port),
3868 rocker_rx_irq_handler, 0,
3869 rocker_driver_name, rocker_port);
3871 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3872 goto err_request_rx_irq;
3875 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
3877 goto err_fwd_enable;
3879 napi_enable(&rocker_port->napi_tx);
3880 napi_enable(&rocker_port->napi_rx);
3881 if (!dev->proto_down)
3882 rocker_port_set_enable(rocker_port, true);
3883 netif_start_queue(dev);
3887 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3889 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3891 rocker_port_dma_rings_fini(rocker_port);
3895 static int rocker_port_stop(struct net_device *dev)
3897 struct rocker_port *rocker_port = netdev_priv(dev);
3899 netif_stop_queue(dev);
3900 rocker_port_set_enable(rocker_port, false);
3901 napi_disable(&rocker_port->napi_rx);
3902 napi_disable(&rocker_port->napi_tx);
3903 rocker_port_fwd_disable(rocker_port, NULL,
3904 ROCKER_OP_FLAG_NOWAIT);
3905 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3906 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3907 rocker_port_dma_rings_fini(rocker_port);
3912 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
3913 const struct rocker_desc_info *desc_info)
3915 const struct rocker *rocker = rocker_port->rocker;
3916 struct pci_dev *pdev = rocker->pdev;
3917 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3918 struct rocker_tlv *attr;
3921 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3922 if (!attrs[ROCKER_TLV_TX_FRAGS])
3924 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3925 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3926 dma_addr_t dma_handle;
3929 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3931 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3933 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3934 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3936 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3937 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3938 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3942 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
3943 struct rocker_desc_info *desc_info,
3944 char *buf, size_t buf_len)
3946 const struct rocker *rocker = rocker_port->rocker;
3947 struct pci_dev *pdev = rocker->pdev;
3948 dma_addr_t dma_handle;
3949 struct rocker_tlv *frag;
3951 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3952 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3953 if (net_ratelimit())
3954 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3957 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3960 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3963 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3966 rocker_tlv_nest_end(desc_info, frag);
3970 rocker_tlv_nest_cancel(desc_info, frag);
3972 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3976 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3978 struct rocker_port *rocker_port = netdev_priv(dev);
3979 struct rocker *rocker = rocker_port->rocker;
3980 struct rocker_desc_info *desc_info;
3981 struct rocker_tlv *frags;
3985 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3986 if (unlikely(!desc_info)) {
3987 if (net_ratelimit())
3988 netdev_err(dev, "tx ring full when queue awake\n");
3989 return NETDEV_TX_BUSY;
3992 rocker_desc_cookie_ptr_set(desc_info, skb);
3994 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3997 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3998 skb->data, skb_headlen(skb));
4001 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4002 err = skb_linearize(skb);
4007 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4008 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4010 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4011 skb_frag_address(frag),
4012 skb_frag_size(frag));
4016 rocker_tlv_nest_end(desc_info, frags);
4018 rocker_desc_gen_clear(desc_info);
4019 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4021 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4023 netif_stop_queue(dev);
4025 return NETDEV_TX_OK;
4028 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4030 rocker_tlv_nest_cancel(desc_info, frags);
4033 dev->stats.tx_dropped++;
4035 return NETDEV_TX_OK;
4038 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4040 struct sockaddr *addr = p;
4041 struct rocker_port *rocker_port = netdev_priv(dev);
4044 if (!is_valid_ether_addr(addr->sa_data))
4045 return -EADDRNOTAVAIL;
4047 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4050 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4054 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4056 struct rocker_port *rocker_port = netdev_priv(dev);
4057 int running = netif_running(dev);
4060 #define ROCKER_PORT_MIN_MTU 68
4061 #define ROCKER_PORT_MAX_MTU 9000
4063 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4067 rocker_port_stop(dev);
4069 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4072 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4077 err = rocker_port_open(dev);
4082 static int rocker_port_get_phys_port_name(struct net_device *dev,
4083 char *buf, size_t len)
4085 struct rocker_port *rocker_port = netdev_priv(dev);
4086 struct port_name name = { .buf = buf, .len = len };
4089 err = rocker_cmd_exec(rocker_port, NULL, 0,
4090 rocker_cmd_get_port_settings_prep, NULL,
4091 rocker_cmd_get_port_settings_phys_name_proc,
4094 return err ? -EOPNOTSUPP : 0;
4097 static int rocker_port_change_proto_down(struct net_device *dev,
4100 struct rocker_port *rocker_port = netdev_priv(dev);
4102 if (rocker_port->dev->flags & IFF_UP)
4103 rocker_port_set_enable(rocker_port, !proto_down);
4104 rocker_port->dev->proto_down = proto_down;
4108 static void rocker_port_neigh_destroy(struct neighbour *n)
4110 struct rocker_port *rocker_port = netdev_priv(n->dev);
4111 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4112 __be32 ip_addr = *(__be32 *)n->primary_key;
4114 rocker_port_ipv4_neigh(rocker_port, NULL,
4115 flags, ip_addr, n->ha);
4118 static const struct net_device_ops rocker_port_netdev_ops = {
4119 .ndo_open = rocker_port_open,
4120 .ndo_stop = rocker_port_stop,
4121 .ndo_start_xmit = rocker_port_xmit,
4122 .ndo_set_mac_address = rocker_port_set_mac_address,
4123 .ndo_change_mtu = rocker_port_change_mtu,
4124 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4125 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4126 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
4127 .ndo_fdb_add = switchdev_port_fdb_add,
4128 .ndo_fdb_del = switchdev_port_fdb_del,
4129 .ndo_fdb_dump = switchdev_port_fdb_dump,
4130 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
4131 .ndo_change_proto_down = rocker_port_change_proto_down,
4132 .ndo_neigh_destroy = rocker_port_neigh_destroy,
4135 /********************
4137 ********************/
4139 static int rocker_port_attr_get(struct net_device *dev,
4140 struct switchdev_attr *attr)
4142 const struct rocker_port *rocker_port = netdev_priv(dev);
4143 const struct rocker *rocker = rocker_port->rocker;
4146 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4147 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4148 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4150 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4151 attr->u.brport_flags = rocker_port->brport_flags;
4160 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4161 struct switchdev_trans *trans,
4162 unsigned long brport_flags)
4164 unsigned long orig_flags;
4167 orig_flags = rocker_port->brport_flags;
4168 rocker_port->brport_flags = brport_flags;
4169 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4170 err = rocker_port_set_learning(rocker_port, trans);
4172 if (switchdev_trans_ph_prepare(trans))
4173 rocker_port->brport_flags = orig_flags;
4178 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4179 struct switchdev_trans *trans,
4182 if (!switchdev_trans_ph_prepare(trans)) {
4183 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4184 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4190 static int rocker_port_attr_set(struct net_device *dev,
4191 const struct switchdev_attr *attr,
4192 struct switchdev_trans *trans)
4194 struct rocker_port *rocker_port = netdev_priv(dev);
4198 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4199 err = rocker_port_stp_update(rocker_port, trans, 0,
4202 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4203 err = rocker_port_brport_flags_set(rocker_port, trans,
4204 attr->u.brport_flags);
4206 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4207 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4208 attr->u.ageing_time);
4218 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4219 struct switchdev_trans *trans,
4224 /* XXX deal with flags for PVID and untagged */
4226 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4230 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4232 rocker_port_vlan(rocker_port, trans,
4233 ROCKER_OP_FLAG_REMOVE, vid);
4238 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4239 struct switchdev_trans *trans,
4240 const struct switchdev_obj_port_vlan *vlan)
4245 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4246 err = rocker_port_vlan_add(rocker_port, trans,
4255 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4256 struct switchdev_trans *trans,
4257 const struct switchdev_obj_port_fdb *fdb)
4259 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4262 if (!rocker_port_is_bridged(rocker_port))
4265 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4268 static int rocker_port_obj_add(struct net_device *dev,
4269 const struct switchdev_obj *obj,
4270 struct switchdev_trans *trans)
4272 struct rocker_port *rocker_port = netdev_priv(dev);
4273 const struct switchdev_obj_ipv4_fib *fib4;
4277 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4278 err = rocker_port_vlans_add(rocker_port, trans,
4279 SWITCHDEV_OBJ_PORT_VLAN(obj));
4281 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4282 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4283 err = rocker_port_fib_ipv4(rocker_port, trans,
4284 htonl(fib4->dst), fib4->dst_len,
4285 &fib4->fi, fib4->tb_id, 0);
4287 case SWITCHDEV_OBJ_ID_PORT_FDB:
4288 err = rocker_port_fdb_add(rocker_port, trans,
4289 SWITCHDEV_OBJ_PORT_FDB(obj));
4299 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4304 err = rocker_port_router_mac(rocker_port, NULL,
4305 ROCKER_OP_FLAG_REMOVE, htons(vid));
4309 return rocker_port_vlan(rocker_port, NULL,
4310 ROCKER_OP_FLAG_REMOVE, vid);
4313 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4314 const struct switchdev_obj_port_vlan *vlan)
4319 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4320 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4328 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4329 struct switchdev_trans *trans,
4330 const struct switchdev_obj_port_fdb *fdb)
4332 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4333 int flags = ROCKER_OP_FLAG_REMOVE;
4335 if (!rocker_port_is_bridged(rocker_port))
4338 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4341 static int rocker_port_obj_del(struct net_device *dev,
4342 const struct switchdev_obj *obj)
4344 struct rocker_port *rocker_port = netdev_priv(dev);
4345 const struct switchdev_obj_ipv4_fib *fib4;
4349 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4350 err = rocker_port_vlans_del(rocker_port,
4351 SWITCHDEV_OBJ_PORT_VLAN(obj));
4353 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4354 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4355 err = rocker_port_fib_ipv4(rocker_port, NULL,
4356 htonl(fib4->dst), fib4->dst_len,
4357 &fib4->fi, fib4->tb_id,
4358 ROCKER_OP_FLAG_REMOVE);
4360 case SWITCHDEV_OBJ_ID_PORT_FDB:
4361 err = rocker_port_fdb_del(rocker_port, NULL,
4362 SWITCHDEV_OBJ_PORT_FDB(obj));
4372 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4373 struct switchdev_obj_port_fdb *fdb,
4374 switchdev_obj_dump_cb_t *cb)
4376 struct rocker *rocker = rocker_port->rocker;
4377 struct rocker_fdb_tbl_entry *found;
4378 struct hlist_node *tmp;
4379 unsigned long lock_flags;
4383 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4384 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4385 if (found->key.rocker_port != rocker_port)
4387 ether_addr_copy(fdb->addr, found->key.addr);
4388 fdb->ndm_state = NUD_REACHABLE;
4389 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4390 found->key.vlan_id);
4391 err = cb(&fdb->obj);
4395 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4400 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4401 struct switchdev_obj_port_vlan *vlan,
4402 switchdev_obj_dump_cb_t *cb)
4407 for (vid = 1; vid < VLAN_N_VID; vid++) {
4408 if (!test_bit(vid, rocker_port->vlan_bitmap))
4411 if (rocker_vlan_id_is_internal(htons(vid)))
4412 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4413 vlan->vid_begin = vid;
4414 vlan->vid_end = vid;
4415 err = cb(&vlan->obj);
4423 static int rocker_port_obj_dump(struct net_device *dev,
4424 struct switchdev_obj *obj,
4425 switchdev_obj_dump_cb_t *cb)
4427 const struct rocker_port *rocker_port = netdev_priv(dev);
4431 case SWITCHDEV_OBJ_ID_PORT_FDB:
4432 err = rocker_port_fdb_dump(rocker_port,
4433 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4435 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4436 err = rocker_port_vlan_dump(rocker_port,
4437 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4447 static const struct switchdev_ops rocker_port_switchdev_ops = {
4448 .switchdev_port_attr_get = rocker_port_attr_get,
4449 .switchdev_port_attr_set = rocker_port_attr_set,
4450 .switchdev_port_obj_add = rocker_port_obj_add,
4451 .switchdev_port_obj_del = rocker_port_obj_del,
4452 .switchdev_port_obj_dump = rocker_port_obj_dump,
4455 /********************
4457 ********************/
4459 static int rocker_port_get_settings(struct net_device *dev,
4460 struct ethtool_cmd *ecmd)
4462 struct rocker_port *rocker_port = netdev_priv(dev);
4464 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4467 static int rocker_port_set_settings(struct net_device *dev,
4468 struct ethtool_cmd *ecmd)
4470 struct rocker_port *rocker_port = netdev_priv(dev);
4472 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4475 static void rocker_port_get_drvinfo(struct net_device *dev,
4476 struct ethtool_drvinfo *drvinfo)
4478 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4479 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4482 static struct rocker_port_stats {
4483 char str[ETH_GSTRING_LEN];
4485 } rocker_port_stats[] = {
4486 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4487 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4488 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4489 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4491 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4492 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4493 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4494 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4497 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4499 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4505 switch (stringset) {
4507 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4508 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4509 p += ETH_GSTRING_LEN;
4516 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4517 struct rocker_desc_info *desc_info,
4520 struct rocker_tlv *cmd_stats;
4522 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4523 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4526 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4530 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4531 rocker_port->pport))
4534 rocker_tlv_nest_end(desc_info, cmd_stats);
4540 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4541 const struct rocker_desc_info *desc_info,
4544 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4545 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4546 const struct rocker_tlv *pattr;
4551 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4553 if (!attrs[ROCKER_TLV_CMD_INFO])
4556 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4557 attrs[ROCKER_TLV_CMD_INFO]);
4559 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4562 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4563 if (pport != rocker_port->pport)
4566 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4567 pattr = stats_attrs[rocker_port_stats[i].type];
4571 data[i] = rocker_tlv_get_u64(pattr);
4577 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4580 return rocker_cmd_exec(rocker_port, NULL, 0,
4581 rocker_cmd_get_port_stats_prep, NULL,
4582 rocker_cmd_get_port_stats_ethtool_proc,
4586 static void rocker_port_get_stats(struct net_device *dev,
4587 struct ethtool_stats *stats, u64 *data)
4589 struct rocker_port *rocker_port = netdev_priv(dev);
4591 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4594 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4599 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4603 return ROCKER_PORT_STATS_LEN;
4609 static const struct ethtool_ops rocker_port_ethtool_ops = {
4610 .get_settings = rocker_port_get_settings,
4611 .set_settings = rocker_port_set_settings,
4612 .get_drvinfo = rocker_port_get_drvinfo,
4613 .get_link = ethtool_op_get_link,
4614 .get_strings = rocker_port_get_strings,
4615 .get_ethtool_stats = rocker_port_get_stats,
4616 .get_sset_count = rocker_port_get_sset_count,
4623 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4625 return container_of(napi, struct rocker_port, napi_tx);
4628 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4630 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4631 const struct rocker *rocker = rocker_port->rocker;
4632 const struct rocker_desc_info *desc_info;
4636 /* Cleanup tx descriptors */
4637 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4638 struct sk_buff *skb;
4640 err = rocker_desc_err(desc_info);
4641 if (err && net_ratelimit())
4642 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4644 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4646 skb = rocker_desc_cookie_ptr_get(desc_info);
4648 rocker_port->dev->stats.tx_packets++;
4649 rocker_port->dev->stats.tx_bytes += skb->len;
4651 rocker_port->dev->stats.tx_errors++;
4654 dev_kfree_skb_any(skb);
4658 if (credits && netif_queue_stopped(rocker_port->dev))
4659 netif_wake_queue(rocker_port->dev);
4661 napi_complete(napi);
4662 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4667 static int rocker_port_rx_proc(const struct rocker *rocker,
4668 const struct rocker_port *rocker_port,
4669 struct rocker_desc_info *desc_info)
4671 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4672 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4679 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4680 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4682 if (attrs[ROCKER_TLV_RX_FLAGS])
4683 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4685 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4687 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4688 skb_put(skb, rx_len);
4689 skb->protocol = eth_type_trans(skb, rocker_port->dev);
4691 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4692 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4694 rocker_port->dev->stats.rx_packets++;
4695 rocker_port->dev->stats.rx_bytes += skb->len;
4697 netif_receive_skb(skb);
4699 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4702 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4704 return container_of(napi, struct rocker_port, napi_rx);
4707 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4709 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4710 const struct rocker *rocker = rocker_port->rocker;
4711 struct rocker_desc_info *desc_info;
4715 /* Process rx descriptors */
4716 while (credits < budget &&
4717 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4718 err = rocker_desc_err(desc_info);
4720 if (net_ratelimit())
4721 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4724 err = rocker_port_rx_proc(rocker, rocker_port,
4726 if (err && net_ratelimit())
4727 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4731 rocker_port->dev->stats.rx_errors++;
4733 rocker_desc_gen_clear(desc_info);
4734 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4738 if (credits < budget)
4739 napi_complete(napi);
4741 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4750 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4752 const struct rocker *rocker = rocker_port->rocker;
4753 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4756 link_up = link_status & (1 << rocker_port->pport);
4758 netif_carrier_on(rocker_port->dev);
4760 netif_carrier_off(rocker_port->dev);
4763 static void rocker_remove_ports(const struct rocker *rocker)
4765 struct rocker_port *rocker_port;
4768 for (i = 0; i < rocker->port_count; i++) {
4769 rocker_port = rocker->ports[i];
4772 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4773 unregister_netdev(rocker_port->dev);
4774 free_netdev(rocker_port->dev);
4776 kfree(rocker->ports);
4779 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4781 const struct rocker *rocker = rocker_port->rocker;
4782 const struct pci_dev *pdev = rocker->pdev;
4785 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4786 rocker_port->dev->dev_addr);
4788 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4789 eth_hw_addr_random(rocker_port->dev);
4793 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4795 const struct pci_dev *pdev = rocker->pdev;
4796 struct rocker_port *rocker_port;
4797 struct net_device *dev;
4798 u16 untagged_vid = 0;
4801 dev = alloc_etherdev(sizeof(struct rocker_port));
4804 rocker_port = netdev_priv(dev);
4805 rocker_port->dev = dev;
4806 rocker_port->rocker = rocker;
4807 rocker_port->port_number = port_number;
4808 rocker_port->pport = port_number + 1;
4809 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4810 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
4812 rocker_port_dev_addr_init(rocker_port);
4813 dev->netdev_ops = &rocker_port_netdev_ops;
4814 dev->ethtool_ops = &rocker_port_ethtool_ops;
4815 dev->switchdev_ops = &rocker_port_switchdev_ops;
4816 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4818 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4820 rocker_carrier_init(rocker_port);
4822 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
4824 err = register_netdev(dev);
4826 dev_err(&pdev->dev, "register_netdev failed\n");
4827 goto err_register_netdev;
4829 rocker->ports[port_number] = rocker_port;
4831 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
4833 rocker_port_set_learning(rocker_port, NULL);
4835 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
4837 netdev_err(rocker_port->dev, "install ig port table failed\n");
4838 goto err_port_ig_tbl;
4841 rocker_port->internal_vlan_id =
4842 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4844 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
4846 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
4847 goto err_untagged_vlan;
4853 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4855 rocker->ports[port_number] = NULL;
4856 unregister_netdev(dev);
4857 err_register_netdev:
4862 static int rocker_probe_ports(struct rocker *rocker)
4868 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4869 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
4872 for (i = 0; i < rocker->port_count; i++) {
4873 err = rocker_probe_port(rocker, i);
4880 rocker_remove_ports(rocker);
4884 static int rocker_msix_init(struct rocker *rocker)
4886 struct pci_dev *pdev = rocker->pdev;
4891 msix_entries = pci_msix_vec_count(pdev);
4892 if (msix_entries < 0)
4893 return msix_entries;
4895 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4898 rocker->msix_entries = kmalloc_array(msix_entries,
4899 sizeof(struct msix_entry),
4901 if (!rocker->msix_entries)
4904 for (i = 0; i < msix_entries; i++)
4905 rocker->msix_entries[i].entry = i;
4907 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4909 goto err_enable_msix;
4914 kfree(rocker->msix_entries);
4918 static void rocker_msix_fini(const struct rocker *rocker)
4920 pci_disable_msix(rocker->pdev);
4921 kfree(rocker->msix_entries);
4924 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4926 struct rocker *rocker;
4929 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4933 err = pci_enable_device(pdev);
4935 dev_err(&pdev->dev, "pci_enable_device failed\n");
4936 goto err_pci_enable_device;
4939 err = pci_request_regions(pdev, rocker_driver_name);
4941 dev_err(&pdev->dev, "pci_request_regions failed\n");
4942 goto err_pci_request_regions;
4945 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4947 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4949 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4950 goto err_pci_set_dma_mask;
4953 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4955 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4956 goto err_pci_set_dma_mask;
4960 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4961 dev_err(&pdev->dev, "invalid PCI region size\n");
4963 goto err_pci_resource_len_check;
4966 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4967 pci_resource_len(pdev, 0));
4968 if (!rocker->hw_addr) {
4969 dev_err(&pdev->dev, "ioremap failed\n");
4973 pci_set_master(pdev);
4975 rocker->pdev = pdev;
4976 pci_set_drvdata(pdev, rocker);
4978 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4980 err = rocker_msix_init(rocker);
4982 dev_err(&pdev->dev, "MSI-X init failed\n");
4986 err = rocker_basic_hw_test(rocker);
4988 dev_err(&pdev->dev, "basic hw test failed\n");
4989 goto err_basic_hw_test;
4992 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4994 err = rocker_dma_rings_init(rocker);
4996 goto err_dma_rings_init;
4998 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4999 rocker_cmd_irq_handler, 0,
5000 rocker_driver_name, rocker);
5002 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5003 goto err_request_cmd_irq;
5006 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5007 rocker_event_irq_handler, 0,
5008 rocker_driver_name, rocker);
5010 dev_err(&pdev->dev, "cannot assign event irq\n");
5011 goto err_request_event_irq;
5014 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5016 err = rocker_init_tbls(rocker);
5018 dev_err(&pdev->dev, "cannot init rocker tables\n");
5022 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5023 (unsigned long) rocker);
5024 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5026 err = rocker_probe_ports(rocker);
5028 dev_err(&pdev->dev, "failed to probe ports\n");
5029 goto err_probe_ports;
5032 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5033 (int)sizeof(rocker->hw.id), &rocker->hw.id);
5038 del_timer_sync(&rocker->fdb_cleanup_timer);
5039 rocker_free_tbls(rocker);
5041 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5042 err_request_event_irq:
5043 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5044 err_request_cmd_irq:
5045 rocker_dma_rings_fini(rocker);
5048 rocker_msix_fini(rocker);
5050 iounmap(rocker->hw_addr);
5052 err_pci_resource_len_check:
5053 err_pci_set_dma_mask:
5054 pci_release_regions(pdev);
5055 err_pci_request_regions:
5056 pci_disable_device(pdev);
5057 err_pci_enable_device:
5062 static void rocker_remove(struct pci_dev *pdev)
5064 struct rocker *rocker = pci_get_drvdata(pdev);
5066 del_timer_sync(&rocker->fdb_cleanup_timer);
5067 rocker_free_tbls(rocker);
5068 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5069 rocker_remove_ports(rocker);
5070 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5071 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5072 rocker_dma_rings_fini(rocker);
5073 rocker_msix_fini(rocker);
5074 iounmap(rocker->hw_addr);
5075 pci_release_regions(rocker->pdev);
5076 pci_disable_device(rocker->pdev);
5080 static struct pci_driver rocker_pci_driver = {
5081 .name = rocker_driver_name,
5082 .id_table = rocker_pci_id_table,
5083 .probe = rocker_probe,
5084 .remove = rocker_remove,
5087 /************************************
5088 * Net device notifier event handler
5089 ************************************/
5091 static bool rocker_port_dev_check(const struct net_device *dev)
5093 return dev->netdev_ops == &rocker_port_netdev_ops;
5096 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5097 struct net_device *bridge)
5099 u16 untagged_vid = 0;
5102 /* Port is joining bridge, so the internal VLAN for the
5103 * port is going to change to the bridge internal VLAN.
5104 * Let's remove untagged VLAN (vid=0) from port and
5105 * re-add once internal VLAN has changed.
5108 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5112 rocker_port_internal_vlan_id_put(rocker_port,
5113 rocker_port->dev->ifindex);
5114 rocker_port->internal_vlan_id =
5115 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5117 rocker_port->bridge_dev = bridge;
5118 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5120 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5123 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5125 u16 untagged_vid = 0;
5128 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5132 rocker_port_internal_vlan_id_put(rocker_port,
5133 rocker_port->bridge_dev->ifindex);
5134 rocker_port->internal_vlan_id =
5135 rocker_port_internal_vlan_id_get(rocker_port,
5136 rocker_port->dev->ifindex);
5138 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5140 rocker_port->bridge_dev = NULL;
5142 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5146 if (rocker_port->dev->flags & IFF_UP)
5147 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5152 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5153 struct net_device *master)
5157 rocker_port->bridge_dev = master;
5159 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5162 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5167 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5168 struct net_device *master)
5172 if (netif_is_bridge_master(master))
5173 err = rocker_port_bridge_join(rocker_port, master);
5174 else if (netif_is_ovs_master(master))
5175 err = rocker_port_ovs_changed(rocker_port, master);
5179 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5183 if (rocker_port_is_bridged(rocker_port))
5184 err = rocker_port_bridge_leave(rocker_port);
5185 else if (rocker_port_is_ovsed(rocker_port))
5186 err = rocker_port_ovs_changed(rocker_port, NULL);
5190 static int rocker_netdevice_event(struct notifier_block *unused,
5191 unsigned long event, void *ptr)
5193 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5194 struct netdev_notifier_changeupper_info *info;
5195 struct rocker_port *rocker_port;
5198 if (!rocker_port_dev_check(dev))
5202 case NETDEV_CHANGEUPPER:
5206 rocker_port = netdev_priv(dev);
5207 if (info->linking) {
5208 err = rocker_port_master_linked(rocker_port,
5211 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5214 err = rocker_port_master_unlinked(rocker_port);
5216 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5225 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5226 .notifier_call = rocker_netdevice_event,
5229 /************************************
5230 * Net event notifier event handler
5231 ************************************/
5233 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5235 struct rocker_port *rocker_port = netdev_priv(dev);
5236 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5237 ROCKER_OP_FLAG_NOWAIT;
5238 __be32 ip_addr = *(__be32 *)n->primary_key;
5240 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5243 static int rocker_netevent_event(struct notifier_block *unused,
5244 unsigned long event, void *ptr)
5246 struct net_device *dev;
5247 struct neighbour *n = ptr;
5251 case NETEVENT_NEIGH_UPDATE:
5252 if (n->tbl != &arp_tbl)
5255 if (!rocker_port_dev_check(dev))
5257 err = rocker_neigh_update(dev, n);
5260 "failed to handle neigh update (err %d)\n",
5268 static struct notifier_block rocker_netevent_nb __read_mostly = {
5269 .notifier_call = rocker_netevent_event,
5272 /***********************
5273 * Module init and exit
5274 ***********************/
5276 static int __init rocker_module_init(void)
5280 register_netdevice_notifier(&rocker_netdevice_nb);
5281 register_netevent_notifier(&rocker_netevent_nb);
5282 err = pci_register_driver(&rocker_pci_driver);
5284 goto err_pci_register_driver;
5287 err_pci_register_driver:
5288 unregister_netevent_notifier(&rocker_netevent_nb);
5289 unregister_netdevice_notifier(&rocker_netdevice_nb);
5293 static void __exit rocker_module_exit(void)
5295 unregister_netevent_notifier(&rocker_netevent_nb);
5296 unregister_netdevice_notifier(&rocker_netdevice_nb);
5297 pci_unregister_driver(&rocker_pci_driver);
5300 module_init(rocker_module_init);
5301 module_exit(rocker_module_exit);
5303 MODULE_LICENSE("GPL v2");
5304 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5305 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5306 MODULE_DESCRIPTION("Rocker switch device driver");
5307 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);