2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
42 #include "rocker_hw.h"
44 #include "rocker_tlv.h"
46 static const char rocker_driver_name[] = "rocker";
48 static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
53 struct rocker_flow_tbl_key {
55 enum rocker_of_dpa_table_id tbl_id;
60 enum rocker_of_dpa_table_id goto_tbl;
66 enum rocker_of_dpa_table_id goto_tbl;
75 u8 eth_dst_mask[ETH_ALEN];
78 enum rocker_of_dpa_table_id goto_tbl;
85 enum rocker_of_dpa_table_id goto_tbl;
90 u8 eth_dst_mask[ETH_ALEN];
95 enum rocker_of_dpa_table_id goto_tbl;
102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
118 struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
122 struct rocker_flow_tbl_key key;
124 u32 key_crc32; /* key */
127 struct rocker_group_tbl_entry {
128 struct hlist_node entry;
130 u32 group_id; /* key */
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
153 struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
157 unsigned long touched;
158 struct rocker_fdb_tbl_key {
159 struct rocker_port *rocker_port;
165 struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
172 struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
178 u8 eth_dst[ETH_ALEN];
182 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
192 /* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
213 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
217 u16 _vlan_id = ntohs(vlan_id);
219 return (_vlan_id >= start && _vlan_id <= end);
222 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
223 u16 vid, bool *pop_vlan)
229 vlan_id = htons(vid);
231 vlan_id = rocker_port->internal_vlan_id;
239 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
242 if (rocker_vlan_id_is_internal(vlan_id))
245 return ntohs(vlan_id);
248 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
254 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
260 #define ROCKER_OP_FLAG_REMOVE BIT(0)
261 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
262 #define ROCKER_OP_FLAG_LEARNED BIT(2)
263 #define ROCKER_OP_FLAG_REFRESH BIT(3)
265 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
268 struct switchdev_trans_item *elem = NULL;
269 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270 GFP_ATOMIC : GFP_KERNEL;
272 /* If in transaction prepare phase, allocate the memory
273 * and enqueue it on a transaction. If in transaction
274 * commit phase, dequeue the memory from the transaction
275 * rather than re-allocating the memory. The idea is the
276 * driver code paths for prepare and commit are identical
277 * so the memory allocated in the prepare phase is the
278 * memory used in the commit phase.
282 elem = kzalloc(size + sizeof(*elem), gfp_flags);
283 } else if (switchdev_trans_ph_prepare(trans)) {
284 elem = kzalloc(size + sizeof(*elem), gfp_flags);
287 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
289 elem = switchdev_trans_item_dequeue(trans);
292 return elem ? elem + 1 : NULL;
295 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
298 return __rocker_mem_alloc(trans, flags, size);
301 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302 size_t n, size_t size)
304 return __rocker_mem_alloc(trans, flags, n * size);
307 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
309 struct switchdev_trans_item *elem;
311 /* Frees are ignored if in transaction prepare phase. The
312 * memory remains on the per-port list until freed in the
316 if (switchdev_trans_ph_prepare(trans))
319 elem = (struct switchdev_trans_item *) mem - 1;
324 wait_queue_head_t wait;
329 static void rocker_wait_reset(struct rocker_wait *wait)
332 wait->nowait = false;
335 static void rocker_wait_init(struct rocker_wait *wait)
337 init_waitqueue_head(&wait->wait);
338 rocker_wait_reset(wait);
341 static struct rocker_wait *rocker_wait_create(void)
343 struct rocker_wait *wait;
345 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
351 static void rocker_wait_destroy(struct rocker_wait *wait)
356 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
357 unsigned long timeout)
359 wait_event_timeout(wait->wait, wait->done, HZ / 10);
365 static void rocker_wait_wake_up(struct rocker_wait *wait)
368 wake_up(&wait->wait);
371 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
373 return rocker->msix_entries[vector].vector;
376 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
378 return rocker_msix_vector(rocker_port->rocker,
379 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
382 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
384 return rocker_msix_vector(rocker_port->rocker,
385 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
388 #define rocker_write32(rocker, reg, val) \
389 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
390 #define rocker_read32(rocker, reg) \
391 readl((rocker)->hw_addr + (ROCKER_ ## reg))
392 #define rocker_write64(rocker, reg, val) \
393 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394 #define rocker_read64(rocker, reg) \
395 readq((rocker)->hw_addr + (ROCKER_ ## reg))
397 /*****************************
398 * HW basic testing functions
399 *****************************/
401 static int rocker_reg_test(const struct rocker *rocker)
403 const struct pci_dev *pdev = rocker->pdev;
409 rocker_write32(rocker, TEST_REG, rnd);
410 test_reg = rocker_read32(rocker, TEST_REG);
411 if (test_reg != rnd * 2) {
412 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
419 rnd |= prandom_u32();
420 rocker_write64(rocker, TEST_REG64, rnd);
421 test_reg = rocker_read64(rocker, TEST_REG64);
422 if (test_reg != rnd * 2) {
423 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
431 static int rocker_dma_test_one(const struct rocker *rocker,
432 struct rocker_wait *wait, u32 test_type,
433 dma_addr_t dma_handle, const unsigned char *buf,
434 const unsigned char *expect, size_t size)
436 const struct pci_dev *pdev = rocker->pdev;
439 rocker_wait_reset(wait);
440 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
442 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
443 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
447 for (i = 0; i < size; i++) {
448 if (buf[i] != expect[i]) {
449 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
450 buf[i], i, expect[i]);
457 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
458 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
460 static int rocker_dma_test_offset(const struct rocker *rocker,
461 struct rocker_wait *wait, int offset)
463 struct pci_dev *pdev = rocker->pdev;
464 unsigned char *alloc;
466 unsigned char *expect;
467 dma_addr_t dma_handle;
471 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
472 GFP_KERNEL | GFP_DMA);
475 buf = alloc + offset;
476 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
478 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
479 PCI_DMA_BIDIRECTIONAL);
480 if (pci_dma_mapping_error(pdev, dma_handle)) {
485 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
486 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
488 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
489 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
490 dma_handle, buf, expect,
491 ROCKER_TEST_DMA_BUF_SIZE);
495 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
496 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
497 dma_handle, buf, expect,
498 ROCKER_TEST_DMA_BUF_SIZE);
502 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
503 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
505 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
506 dma_handle, buf, expect,
507 ROCKER_TEST_DMA_BUF_SIZE);
512 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
513 PCI_DMA_BIDIRECTIONAL);
520 static int rocker_dma_test(const struct rocker *rocker,
521 struct rocker_wait *wait)
526 for (i = 0; i < 8; i++) {
527 err = rocker_dma_test_offset(rocker, wait, i);
534 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
536 struct rocker_wait *wait = dev_id;
538 rocker_wait_wake_up(wait);
543 static int rocker_basic_hw_test(const struct rocker *rocker)
545 const struct pci_dev *pdev = rocker->pdev;
546 struct rocker_wait wait;
549 err = rocker_reg_test(rocker);
551 dev_err(&pdev->dev, "reg test failed\n");
555 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
556 rocker_test_irq_handler, 0,
557 rocker_driver_name, &wait);
559 dev_err(&pdev->dev, "cannot assign test irq\n");
563 rocker_wait_init(&wait);
564 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
566 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
567 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
572 err = rocker_dma_test(rocker, &wait);
574 dev_err(&pdev->dev, "dma test failed\n");
577 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
581 /******************************************
582 * DMA rings and descriptors manipulations
583 ******************************************/
585 static u32 __pos_inc(u32 pos, size_t limit)
587 return ++pos == limit ? 0 : pos;
590 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
592 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
607 case -ROCKER_EMSGSIZE:
609 case -ROCKER_ENOTSUP:
611 case -ROCKER_ENOBUFS:
618 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
620 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
623 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
625 u32 comp_err = desc_info->desc->comp_err;
627 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
631 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
633 return (void *)(uintptr_t)desc_info->desc->cookie;
636 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
639 desc_info->desc->cookie = (uintptr_t) ptr;
642 static struct rocker_desc_info *
643 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
645 static struct rocker_desc_info *desc_info;
646 u32 head = __pos_inc(info->head, info->size);
648 desc_info = &info->desc_info[info->head];
649 if (head == info->tail)
650 return NULL; /* ring full */
651 desc_info->tlv_size = 0;
655 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
657 desc_info->desc->buf_size = desc_info->data_size;
658 desc_info->desc->tlv_size = desc_info->tlv_size;
661 static void rocker_desc_head_set(const struct rocker *rocker,
662 struct rocker_dma_ring_info *info,
663 const struct rocker_desc_info *desc_info)
665 u32 head = __pos_inc(info->head, info->size);
667 BUG_ON(head == info->tail);
668 rocker_desc_commit(desc_info);
670 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
673 static struct rocker_desc_info *
674 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
676 static struct rocker_desc_info *desc_info;
678 if (info->tail == info->head)
679 return NULL; /* nothing to be done between head and tail */
680 desc_info = &info->desc_info[info->tail];
681 if (!rocker_desc_gen(desc_info))
682 return NULL; /* gen bit not set, desc is not ready yet */
683 info->tail = __pos_inc(info->tail, info->size);
684 desc_info->tlv_size = desc_info->desc->tlv_size;
688 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
689 const struct rocker_dma_ring_info *info,
693 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
696 static unsigned long rocker_dma_ring_size_fix(size_t size)
698 return max(ROCKER_DMA_SIZE_MIN,
699 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
702 static int rocker_dma_ring_create(const struct rocker *rocker,
705 struct rocker_dma_ring_info *info)
709 BUG_ON(size != rocker_dma_ring_size_fix(size));
714 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
716 if (!info->desc_info)
719 info->desc = pci_alloc_consistent(rocker->pdev,
720 info->size * sizeof(*info->desc),
723 kfree(info->desc_info);
727 for (i = 0; i < info->size; i++)
728 info->desc_info[i].desc = &info->desc[i];
730 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
731 ROCKER_DMA_DESC_CTRL_RESET);
732 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
733 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
738 static void rocker_dma_ring_destroy(const struct rocker *rocker,
739 const struct rocker_dma_ring_info *info)
741 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
743 pci_free_consistent(rocker->pdev,
744 info->size * sizeof(struct rocker_desc),
745 info->desc, info->mapaddr);
746 kfree(info->desc_info);
749 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
750 struct rocker_dma_ring_info *info)
754 BUG_ON(info->head || info->tail);
756 /* When ring is consumer, we need to advance head for each desc.
757 * That tells hw that the desc is ready to be used by it.
759 for (i = 0; i < info->size - 1; i++)
760 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
761 rocker_desc_commit(&info->desc_info[i]);
764 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
765 const struct rocker_dma_ring_info *info,
766 int direction, size_t buf_size)
768 struct pci_dev *pdev = rocker->pdev;
772 for (i = 0; i < info->size; i++) {
773 struct rocker_desc_info *desc_info = &info->desc_info[i];
774 struct rocker_desc *desc = &info->desc[i];
775 dma_addr_t dma_handle;
778 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
784 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
785 if (pci_dma_mapping_error(pdev, dma_handle)) {
791 desc_info->data = buf;
792 desc_info->data_size = buf_size;
793 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
795 desc->buf_addr = dma_handle;
796 desc->buf_size = buf_size;
801 for (i--; i >= 0; i--) {
802 const struct rocker_desc_info *desc_info = &info->desc_info[i];
804 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
805 desc_info->data_size, direction);
806 kfree(desc_info->data);
811 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
812 const struct rocker_dma_ring_info *info,
815 struct pci_dev *pdev = rocker->pdev;
818 for (i = 0; i < info->size; i++) {
819 const struct rocker_desc_info *desc_info = &info->desc_info[i];
820 struct rocker_desc *desc = &info->desc[i];
824 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
825 desc_info->data_size, direction);
826 kfree(desc_info->data);
830 static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
832 struct rocker_wait *wait;
834 wait = rocker_wait_create();
837 rocker_desc_cookie_ptr_set(desc_info, wait);
842 rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
844 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
846 rocker_wait_destroy(wait);
849 static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
851 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
855 for (i = 0; i < cmd_ring->size; i++) {
856 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
863 for (i--; i >= 0; i--)
864 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
868 static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
870 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
873 for (i = 0; i < cmd_ring->size; i++)
874 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
877 static int rocker_dma_rings_init(struct rocker *rocker)
879 const struct pci_dev *pdev = rocker->pdev;
882 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
883 ROCKER_DMA_CMD_DEFAULT_SIZE,
886 dev_err(&pdev->dev, "failed to create command dma ring\n");
890 spin_lock_init(&rocker->cmd_ring_lock);
892 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
893 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
895 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
896 goto err_dma_cmd_ring_bufs_alloc;
899 err = rocker_dma_cmd_ring_waits_alloc(rocker);
901 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
902 goto err_dma_cmd_ring_waits_alloc;
905 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
906 ROCKER_DMA_EVENT_DEFAULT_SIZE,
907 &rocker->event_ring);
909 dev_err(&pdev->dev, "failed to create event dma ring\n");
910 goto err_dma_event_ring_create;
913 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
914 PCI_DMA_FROMDEVICE, PAGE_SIZE);
916 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
917 goto err_dma_event_ring_bufs_alloc;
919 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
922 err_dma_event_ring_bufs_alloc:
923 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
924 err_dma_event_ring_create:
925 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
926 PCI_DMA_BIDIRECTIONAL);
927 err_dma_cmd_ring_waits_alloc:
928 rocker_dma_cmd_ring_waits_free(rocker);
929 err_dma_cmd_ring_bufs_alloc:
930 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
934 static void rocker_dma_rings_fini(struct rocker *rocker)
936 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
937 PCI_DMA_BIDIRECTIONAL);
938 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
939 rocker_dma_cmd_ring_waits_free(rocker);
940 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
941 PCI_DMA_BIDIRECTIONAL);
942 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
945 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
946 struct rocker_desc_info *desc_info,
947 struct sk_buff *skb, size_t buf_len)
949 const struct rocker *rocker = rocker_port->rocker;
950 struct pci_dev *pdev = rocker->pdev;
951 dma_addr_t dma_handle;
953 dma_handle = pci_map_single(pdev, skb->data, buf_len,
955 if (pci_dma_mapping_error(pdev, dma_handle))
957 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
958 goto tlv_put_failure;
959 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
960 goto tlv_put_failure;
964 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
965 desc_info->tlv_size = 0;
969 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
971 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
974 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
975 struct rocker_desc_info *desc_info)
977 struct net_device *dev = rocker_port->dev;
979 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
982 /* Ensure that hw will see tlv_size zero in case of an error.
983 * That tells hw to use another descriptor.
985 rocker_desc_cookie_ptr_set(desc_info, NULL);
986 desc_info->tlv_size = 0;
988 skb = netdev_alloc_skb_ip_align(dev, buf_len);
991 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
993 dev_kfree_skb_any(skb);
996 rocker_desc_cookie_ptr_set(desc_info, skb);
1000 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1001 const struct rocker_tlv **attrs)
1003 struct pci_dev *pdev = rocker->pdev;
1004 dma_addr_t dma_handle;
1007 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1008 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1010 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1011 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1012 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1015 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1016 const struct rocker_desc_info *desc_info)
1018 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1019 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1023 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1024 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1025 dev_kfree_skb_any(skb);
1028 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1030 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1031 const struct rocker *rocker = rocker_port->rocker;
1035 for (i = 0; i < rx_ring->size; i++) {
1036 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1037 &rx_ring->desc_info[i]);
1044 for (i--; i >= 0; i--)
1045 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1049 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1051 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1052 const struct rocker *rocker = rocker_port->rocker;
1055 for (i = 0; i < rx_ring->size; i++)
1056 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1059 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1061 struct rocker *rocker = rocker_port->rocker;
1064 err = rocker_dma_ring_create(rocker,
1065 ROCKER_DMA_TX(rocker_port->port_number),
1066 ROCKER_DMA_TX_DEFAULT_SIZE,
1067 &rocker_port->tx_ring);
1069 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1073 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1075 ROCKER_DMA_TX_DESC_SIZE);
1077 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1078 goto err_dma_tx_ring_bufs_alloc;
1081 err = rocker_dma_ring_create(rocker,
1082 ROCKER_DMA_RX(rocker_port->port_number),
1083 ROCKER_DMA_RX_DEFAULT_SIZE,
1084 &rocker_port->rx_ring);
1086 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1087 goto err_dma_rx_ring_create;
1090 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1091 PCI_DMA_BIDIRECTIONAL,
1092 ROCKER_DMA_RX_DESC_SIZE);
1094 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1095 goto err_dma_rx_ring_bufs_alloc;
1098 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1100 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1101 goto err_dma_rx_ring_skbs_alloc;
1103 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1107 err_dma_rx_ring_skbs_alloc:
1108 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1109 PCI_DMA_BIDIRECTIONAL);
1110 err_dma_rx_ring_bufs_alloc:
1111 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1112 err_dma_rx_ring_create:
1113 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1115 err_dma_tx_ring_bufs_alloc:
1116 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1120 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1122 struct rocker *rocker = rocker_port->rocker;
1124 rocker_dma_rx_ring_skbs_free(rocker_port);
1125 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1126 PCI_DMA_BIDIRECTIONAL);
1127 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1128 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1130 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1133 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1136 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1139 val |= 1ULL << rocker_port->pport;
1141 val &= ~(1ULL << rocker_port->pport);
1142 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1145 /********************************
1146 * Interrupt handler and helpers
1147 ********************************/
1149 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1151 struct rocker *rocker = dev_id;
1152 const struct rocker_desc_info *desc_info;
1153 struct rocker_wait *wait;
1156 spin_lock(&rocker->cmd_ring_lock);
1157 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1158 wait = rocker_desc_cookie_ptr_get(desc_info);
1160 rocker_desc_gen_clear(desc_info);
1162 rocker_wait_wake_up(wait);
1166 spin_unlock(&rocker->cmd_ring_lock);
1167 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1172 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1174 netif_carrier_on(rocker_port->dev);
1175 netdev_info(rocker_port->dev, "Link is up\n");
1178 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1180 netif_carrier_off(rocker_port->dev);
1181 netdev_info(rocker_port->dev, "Link is down\n");
1184 static int rocker_event_link_change(const struct rocker *rocker,
1185 const struct rocker_tlv *info)
1187 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1188 unsigned int port_number;
1190 struct rocker_port *rocker_port;
1192 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1193 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1194 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1197 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1198 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1200 if (port_number >= rocker->port_count)
1203 rocker_port = rocker->ports[port_number];
1204 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1206 rocker_port_link_up(rocker_port);
1208 rocker_port_link_down(rocker_port);
1214 static int rocker_port_fdb(struct rocker_port *rocker_port,
1215 struct switchdev_trans *trans,
1216 const unsigned char *addr,
1217 __be16 vlan_id, int flags);
1218 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1219 const unsigned char *addr,
1222 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1223 const struct rocker_tlv *info)
1225 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1226 unsigned int port_number;
1227 struct rocker_port *rocker_port;
1228 const unsigned char *addr;
1229 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1233 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1234 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1235 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1236 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1239 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1240 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1241 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1243 if (port_number >= rocker->port_count)
1246 rocker_port = rocker->ports[port_number];
1248 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1252 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1253 rocker_port->stp_state != BR_STATE_FORWARDING)
1256 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1259 static int rocker_event_process(const struct rocker *rocker,
1260 const struct rocker_desc_info *desc_info)
1262 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1263 const struct rocker_tlv *info;
1266 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1267 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1268 !attrs[ROCKER_TLV_EVENT_INFO])
1271 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1272 info = attrs[ROCKER_TLV_EVENT_INFO];
1275 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1276 return rocker_event_link_change(rocker, info);
1277 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1278 return rocker_event_mac_vlan_seen(rocker, info);
1284 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1286 struct rocker *rocker = dev_id;
1287 const struct pci_dev *pdev = rocker->pdev;
1288 const struct rocker_desc_info *desc_info;
1292 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1293 err = rocker_desc_err(desc_info);
1295 dev_err(&pdev->dev, "event desc received with err %d\n",
1298 err = rocker_event_process(rocker, desc_info);
1300 dev_err(&pdev->dev, "event processing failed with err %d\n",
1303 rocker_desc_gen_clear(desc_info);
1304 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1307 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1312 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1314 struct rocker_port *rocker_port = dev_id;
1316 napi_schedule(&rocker_port->napi_tx);
1320 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1322 struct rocker_port *rocker_port = dev_id;
1324 napi_schedule(&rocker_port->napi_rx);
1328 /********************
1330 ********************/
1332 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1333 struct rocker_desc_info *desc_info,
1336 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1337 const struct rocker_desc_info *desc_info,
1340 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1341 struct switchdev_trans *trans, int flags,
1342 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1343 rocker_cmd_proc_cb_t process, void *process_priv)
1345 struct rocker *rocker = rocker_port->rocker;
1346 struct rocker_desc_info *desc_info;
1347 struct rocker_wait *wait;
1348 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1349 unsigned long lock_flags;
1352 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1354 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1356 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1360 wait = rocker_desc_cookie_ptr_get(desc_info);
1361 rocker_wait_init(wait);
1362 wait->nowait = nowait;
1364 err = prepare(rocker_port, desc_info, prepare_priv);
1366 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1370 if (!switchdev_trans_ph_prepare(trans))
1371 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1373 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1378 if (!switchdev_trans_ph_prepare(trans))
1379 if (!rocker_wait_event_timeout(wait, HZ / 10))
1382 err = rocker_desc_err(desc_info);
1387 err = process(rocker_port, desc_info, process_priv);
1389 rocker_desc_gen_clear(desc_info);
1394 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1395 struct rocker_desc_info *desc_info,
1398 struct rocker_tlv *cmd_info;
1400 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1401 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1403 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1406 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1407 rocker_port->pport))
1409 rocker_tlv_nest_end(desc_info, cmd_info);
1414 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1415 const struct rocker_desc_info *desc_info,
1418 struct ethtool_cmd *ecmd = priv;
1419 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1420 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1425 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1426 if (!attrs[ROCKER_TLV_CMD_INFO])
1429 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1430 attrs[ROCKER_TLV_CMD_INFO]);
1431 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1432 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1433 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1436 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1437 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1438 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1440 ecmd->transceiver = XCVR_INTERNAL;
1441 ecmd->supported = SUPPORTED_TP;
1442 ecmd->phy_address = 0xff;
1443 ecmd->port = PORT_TP;
1444 ethtool_cmd_speed_set(ecmd, speed);
1445 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1446 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1452 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1453 const struct rocker_desc_info *desc_info,
1456 unsigned char *macaddr = priv;
1457 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1458 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1459 const struct rocker_tlv *attr;
1461 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1462 if (!attrs[ROCKER_TLV_CMD_INFO])
1465 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1466 attrs[ROCKER_TLV_CMD_INFO]);
1467 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1471 if (rocker_tlv_len(attr) != ETH_ALEN)
1474 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1479 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1480 const struct rocker_desc_info *desc_info,
1484 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1485 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1486 const struct rocker_tlv *attr;
1488 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1489 if (!attrs[ROCKER_TLV_CMD_INFO])
1492 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1493 attrs[ROCKER_TLV_CMD_INFO]);
1494 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1498 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1508 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1509 const struct rocker_desc_info *desc_info,
1512 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1513 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1514 struct port_name *name = priv;
1515 const struct rocker_tlv *attr;
1519 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1520 if (!attrs[ROCKER_TLV_CMD_INFO])
1523 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1524 attrs[ROCKER_TLV_CMD_INFO]);
1525 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1529 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1530 str = rocker_tlv_data(attr);
1532 /* make sure name only contains alphanumeric characters */
1533 for (i = j = 0; i < len; ++i) {
1534 if (isalnum(str[i])) {
1535 name->buf[j] = str[i];
1543 name->buf[j] = '\0';
1549 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1550 struct rocker_desc_info *desc_info,
1553 struct ethtool_cmd *ecmd = priv;
1554 struct rocker_tlv *cmd_info;
1556 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1557 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1559 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1562 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1563 rocker_port->pport))
1565 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1566 ethtool_cmd_speed(ecmd)))
1568 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1571 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1574 rocker_tlv_nest_end(desc_info, cmd_info);
1579 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1580 struct rocker_desc_info *desc_info,
1583 const unsigned char *macaddr = priv;
1584 struct rocker_tlv *cmd_info;
1586 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1587 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1589 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1592 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1593 rocker_port->pport))
1595 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1598 rocker_tlv_nest_end(desc_info, cmd_info);
1603 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1604 struct rocker_desc_info *desc_info,
1607 int mtu = *(int *)priv;
1608 struct rocker_tlv *cmd_info;
1610 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1611 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1613 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1616 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1617 rocker_port->pport))
1619 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1622 rocker_tlv_nest_end(desc_info, cmd_info);
1627 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1628 struct rocker_desc_info *desc_info,
1631 bool learning = *(bool *)priv;
1632 struct rocker_tlv *cmd_info;
1634 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1635 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1637 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1640 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1641 rocker_port->pport))
1643 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1646 rocker_tlv_nest_end(desc_info, cmd_info);
1650 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1651 struct ethtool_cmd *ecmd)
1653 return rocker_cmd_exec(rocker_port, NULL, 0,
1654 rocker_cmd_get_port_settings_prep, NULL,
1655 rocker_cmd_get_port_settings_ethtool_proc,
1659 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1660 unsigned char *macaddr)
1662 return rocker_cmd_exec(rocker_port, NULL, 0,
1663 rocker_cmd_get_port_settings_prep, NULL,
1664 rocker_cmd_get_port_settings_macaddr_proc,
1668 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1671 return rocker_cmd_exec(rocker_port, NULL, 0,
1672 rocker_cmd_get_port_settings_prep, NULL,
1673 rocker_cmd_get_port_settings_mode_proc, p_mode);
1676 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1677 struct ethtool_cmd *ecmd)
1679 return rocker_cmd_exec(rocker_port, NULL, 0,
1680 rocker_cmd_set_port_settings_ethtool_prep,
1684 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1685 unsigned char *macaddr)
1687 return rocker_cmd_exec(rocker_port, NULL, 0,
1688 rocker_cmd_set_port_settings_macaddr_prep,
1689 macaddr, NULL, NULL);
1692 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1695 return rocker_cmd_exec(rocker_port, NULL, 0,
1696 rocker_cmd_set_port_settings_mtu_prep,
1700 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1701 struct switchdev_trans *trans,
1704 return rocker_cmd_exec(rocker_port, trans, 0,
1705 rocker_cmd_set_port_learning_prep,
1706 &learning, NULL, NULL);
1709 /**********************
1710 * Worlds manipulation
1711 **********************/
1713 static struct rocker_world_ops *rocker_world_ops[] = {
1717 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1719 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1723 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1724 if (rocker_world_ops[i]->mode == mode)
1725 return rocker_world_ops[i];
1729 static int rocker_world_init(struct rocker *rocker, u8 mode)
1731 struct rocker_world_ops *wops;
1734 wops = rocker_world_ops_find(mode);
1736 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1740 rocker->wops = wops;
1741 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1746 err = wops->init(rocker);
1748 kfree(rocker->wpriv);
1752 static void rocker_world_fini(struct rocker *rocker)
1754 struct rocker_world_ops *wops = rocker->wops;
1756 if (!wops || !wops->fini)
1759 kfree(rocker->wpriv);
1762 static int rocker_world_check_init(struct rocker_port *rocker_port)
1764 struct rocker *rocker = rocker_port->rocker;
1768 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1770 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1774 if (rocker->wops->mode != mode) {
1775 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1780 return rocker_world_init(rocker, mode);
1783 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1785 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1788 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1789 if (!rocker_port->wpriv)
1791 if (!wops->port_pre_init)
1793 err = wops->port_pre_init(rocker_port);
1795 kfree(rocker_port->wpriv);
1799 static int rocker_world_port_init(struct rocker_port *rocker_port)
1801 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1803 if (!wops->port_init)
1805 return wops->port_init(rocker_port);
1808 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1810 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1812 if (!wops->port_fini)
1814 wops->port_fini(rocker_port);
1817 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1819 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1821 if (!wops->port_post_fini)
1823 wops->port_post_fini(rocker_port);
1824 kfree(rocker_port->wpriv);
1827 static int rocker_world_port_open(struct rocker_port *rocker_port)
1829 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1831 if (!wops->port_open)
1833 return wops->port_open(rocker_port);
1836 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1838 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1840 if (!wops->port_stop)
1842 wops->port_stop(rocker_port);
1845 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1847 struct switchdev_trans *trans)
1849 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1851 if (!wops->port_attr_stp_state_set)
1853 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1857 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1858 unsigned long brport_flags,
1859 struct switchdev_trans *trans)
1861 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1863 if (!wops->port_attr_bridge_flags_set)
1865 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1870 rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1871 unsigned long *p_brport_flags)
1873 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1875 if (!wops->port_attr_bridge_flags_get)
1877 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1881 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1883 struct switchdev_trans *trans)
1886 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1888 if (!wops->port_attr_bridge_ageing_time_set)
1890 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1895 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1896 const struct switchdev_obj_port_vlan *vlan,
1897 struct switchdev_trans *trans)
1899 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1901 if (!wops->port_obj_vlan_add)
1903 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1907 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1908 const struct switchdev_obj_port_vlan *vlan)
1910 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1912 if (!wops->port_obj_vlan_del)
1914 return wops->port_obj_vlan_del(rocker_port, vlan);
1918 rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1919 struct switchdev_obj_port_vlan *vlan,
1920 switchdev_obj_dump_cb_t *cb)
1922 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1924 if (!wops->port_obj_vlan_dump)
1926 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1930 rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1931 const struct switchdev_obj_ipv4_fib *fib4,
1932 struct switchdev_trans *trans)
1934 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1936 if (!wops->port_obj_fib4_add)
1938 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1942 rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1943 const struct switchdev_obj_ipv4_fib *fib4)
1945 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1947 if (!wops->port_obj_fib4_del)
1949 return wops->port_obj_fib4_del(rocker_port, fib4);
1953 rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1954 const struct switchdev_obj_port_fdb *fdb,
1955 struct switchdev_trans *trans)
1957 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1959 if (!wops->port_obj_fdb_add)
1961 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1965 rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1966 const struct switchdev_obj_port_fdb *fdb)
1968 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1970 if (!wops->port_obj_fdb_del)
1972 return wops->port_obj_fdb_del(rocker_port, fdb);
1976 rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1977 struct switchdev_obj_port_fdb *fdb,
1978 switchdev_obj_dump_cb_t *cb)
1980 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1982 if (!wops->port_obj_fdb_dump)
1984 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1987 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1988 struct net_device *master)
1990 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1992 if (!wops->port_master_linked)
1994 return wops->port_master_linked(rocker_port, master);
1997 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1998 struct net_device *master)
2000 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2002 if (!wops->port_master_unlinked)
2004 return wops->port_master_unlinked(rocker_port, master);
2007 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
2008 struct neighbour *n)
2010 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2012 if (!wops->port_neigh_update)
2014 return wops->port_neigh_update(rocker_port, n);
2017 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
2018 struct neighbour *n)
2020 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2022 if (!wops->port_neigh_destroy)
2024 return wops->port_neigh_destroy(rocker_port, n);
2027 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2028 const unsigned char *addr,
2031 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2033 if (!wops->port_ev_mac_vlan_seen)
2035 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
2039 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
2040 const struct rocker_flow_tbl_entry *entry)
2042 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2043 entry->key.ig_port.in_pport))
2045 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2046 entry->key.ig_port.in_pport_mask))
2048 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2049 entry->key.ig_port.goto_tbl))
2056 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2057 const struct rocker_flow_tbl_entry *entry)
2059 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2060 entry->key.vlan.in_pport))
2062 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2063 entry->key.vlan.vlan_id))
2065 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2066 entry->key.vlan.vlan_id_mask))
2068 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2069 entry->key.vlan.goto_tbl))
2071 if (entry->key.vlan.untagged &&
2072 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2073 entry->key.vlan.new_vlan_id))
2080 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2081 const struct rocker_flow_tbl_entry *entry)
2083 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2084 entry->key.term_mac.in_pport))
2086 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2087 entry->key.term_mac.in_pport_mask))
2089 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2090 entry->key.term_mac.eth_type))
2092 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2093 ETH_ALEN, entry->key.term_mac.eth_dst))
2095 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2096 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2098 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2099 entry->key.term_mac.vlan_id))
2101 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2102 entry->key.term_mac.vlan_id_mask))
2104 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2105 entry->key.term_mac.goto_tbl))
2107 if (entry->key.term_mac.copy_to_cpu &&
2108 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2109 entry->key.term_mac.copy_to_cpu))
2116 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2117 const struct rocker_flow_tbl_entry *entry)
2119 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2120 entry->key.ucast_routing.eth_type))
2122 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2123 entry->key.ucast_routing.dst4))
2125 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2126 entry->key.ucast_routing.dst4_mask))
2128 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2129 entry->key.ucast_routing.goto_tbl))
2131 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2132 entry->key.ucast_routing.group_id))
2139 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2140 const struct rocker_flow_tbl_entry *entry)
2142 if (entry->key.bridge.has_eth_dst &&
2143 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2144 ETH_ALEN, entry->key.bridge.eth_dst))
2146 if (entry->key.bridge.has_eth_dst_mask &&
2147 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2148 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2150 if (entry->key.bridge.vlan_id &&
2151 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2152 entry->key.bridge.vlan_id))
2154 if (entry->key.bridge.tunnel_id &&
2155 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2156 entry->key.bridge.tunnel_id))
2158 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2159 entry->key.bridge.goto_tbl))
2161 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2162 entry->key.bridge.group_id))
2164 if (entry->key.bridge.copy_to_cpu &&
2165 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2166 entry->key.bridge.copy_to_cpu))
2173 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2174 const struct rocker_flow_tbl_entry *entry)
2176 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2177 entry->key.acl.in_pport))
2179 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2180 entry->key.acl.in_pport_mask))
2182 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2183 ETH_ALEN, entry->key.acl.eth_src))
2185 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2186 ETH_ALEN, entry->key.acl.eth_src_mask))
2188 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2189 ETH_ALEN, entry->key.acl.eth_dst))
2191 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2192 ETH_ALEN, entry->key.acl.eth_dst_mask))
2194 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2195 entry->key.acl.eth_type))
2197 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2198 entry->key.acl.vlan_id))
2200 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2201 entry->key.acl.vlan_id_mask))
2204 switch (ntohs(entry->key.acl.eth_type)) {
2207 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2208 entry->key.acl.ip_proto))
2210 if (rocker_tlv_put_u8(desc_info,
2211 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2212 entry->key.acl.ip_proto_mask))
2214 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2215 entry->key.acl.ip_tos & 0x3f))
2217 if (rocker_tlv_put_u8(desc_info,
2218 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2219 entry->key.acl.ip_tos_mask & 0x3f))
2221 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2222 (entry->key.acl.ip_tos & 0xc0) >> 6))
2224 if (rocker_tlv_put_u8(desc_info,
2225 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2226 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2231 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2232 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2233 entry->key.acl.group_id))
2239 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2240 struct rocker_desc_info *desc_info,
2243 const struct rocker_flow_tbl_entry *entry = priv;
2244 struct rocker_tlv *cmd_info;
2247 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2249 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2252 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2255 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2256 entry->key.priority))
2258 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2260 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2264 switch (entry->key.tbl_id) {
2265 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2266 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2268 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2269 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2271 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2272 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2274 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2275 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2277 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2278 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2280 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2281 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2291 rocker_tlv_nest_end(desc_info, cmd_info);
2296 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2297 struct rocker_desc_info *desc_info,
2300 const struct rocker_flow_tbl_entry *entry = priv;
2301 struct rocker_tlv *cmd_info;
2303 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2305 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2308 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2311 rocker_tlv_nest_end(desc_info, cmd_info);
2317 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2318 struct rocker_group_tbl_entry *entry)
2320 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2321 ROCKER_GROUP_PORT_GET(entry->group_id)))
2323 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2324 entry->l2_interface.pop_vlan))
2331 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2332 const struct rocker_group_tbl_entry *entry)
2334 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2335 entry->l2_rewrite.group_id))
2337 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2338 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2339 ETH_ALEN, entry->l2_rewrite.eth_src))
2341 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2342 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2343 ETH_ALEN, entry->l2_rewrite.eth_dst))
2345 if (entry->l2_rewrite.vlan_id &&
2346 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2347 entry->l2_rewrite.vlan_id))
2354 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2355 const struct rocker_group_tbl_entry *entry)
2358 struct rocker_tlv *group_ids;
2360 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2361 entry->group_count))
2364 group_ids = rocker_tlv_nest_start(desc_info,
2365 ROCKER_TLV_OF_DPA_GROUP_IDS);
2369 for (i = 0; i < entry->group_count; i++)
2370 /* Note TLV array is 1-based */
2371 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2374 rocker_tlv_nest_end(desc_info, group_ids);
2380 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2381 const struct rocker_group_tbl_entry *entry)
2383 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2384 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2385 ETH_ALEN, entry->l3_unicast.eth_src))
2387 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2388 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2389 ETH_ALEN, entry->l3_unicast.eth_dst))
2391 if (entry->l3_unicast.vlan_id &&
2392 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2393 entry->l3_unicast.vlan_id))
2395 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2396 entry->l3_unicast.ttl_check))
2398 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2399 entry->l3_unicast.group_id))
2405 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2406 struct rocker_desc_info *desc_info,
2409 struct rocker_group_tbl_entry *entry = priv;
2410 struct rocker_tlv *cmd_info;
2413 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2415 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2419 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2423 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2424 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2425 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2427 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2428 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2430 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2431 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2432 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2434 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2435 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2445 rocker_tlv_nest_end(desc_info, cmd_info);
2450 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2451 struct rocker_desc_info *desc_info,
2454 const struct rocker_group_tbl_entry *entry = priv;
2455 struct rocker_tlv *cmd_info;
2457 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2459 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2462 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2465 rocker_tlv_nest_end(desc_info, cmd_info);
2470 /***************************************************
2471 * Flow, group, FDB, internal VLAN and neigh tables
2472 ***************************************************/
2474 static int rocker_init_tbls(struct rocker *rocker)
2476 hash_init(rocker->flow_tbl);
2477 spin_lock_init(&rocker->flow_tbl_lock);
2479 hash_init(rocker->group_tbl);
2480 spin_lock_init(&rocker->group_tbl_lock);
2482 hash_init(rocker->fdb_tbl);
2483 spin_lock_init(&rocker->fdb_tbl_lock);
2485 hash_init(rocker->internal_vlan_tbl);
2486 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2488 hash_init(rocker->neigh_tbl);
2489 spin_lock_init(&rocker->neigh_tbl_lock);
2494 static void rocker_free_tbls(struct rocker *rocker)
2496 unsigned long flags;
2497 struct rocker_flow_tbl_entry *flow_entry;
2498 struct rocker_group_tbl_entry *group_entry;
2499 struct rocker_fdb_tbl_entry *fdb_entry;
2500 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2501 struct rocker_neigh_tbl_entry *neigh_entry;
2502 struct hlist_node *tmp;
2505 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2506 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2507 hash_del(&flow_entry->entry);
2508 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2510 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2511 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2512 hash_del(&group_entry->entry);
2513 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2515 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2516 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2517 hash_del(&fdb_entry->entry);
2518 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2520 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2521 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2522 tmp, internal_vlan_entry, entry)
2523 hash_del(&internal_vlan_entry->entry);
2524 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2526 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2527 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2528 hash_del(&neigh_entry->entry);
2529 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2532 static struct rocker_flow_tbl_entry *
2533 rocker_flow_tbl_find(const struct rocker *rocker,
2534 const struct rocker_flow_tbl_entry *match)
2536 struct rocker_flow_tbl_entry *found;
2537 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2539 hash_for_each_possible(rocker->flow_tbl, found,
2540 entry, match->key_crc32) {
2541 if (memcmp(&found->key, &match->key, key_len) == 0)
2548 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2549 struct switchdev_trans *trans, int flags,
2550 struct rocker_flow_tbl_entry *match)
2552 struct rocker *rocker = rocker_port->rocker;
2553 struct rocker_flow_tbl_entry *found;
2554 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2555 unsigned long lock_flags;
2557 match->key_crc32 = crc32(~0, &match->key, key_len);
2559 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2561 found = rocker_flow_tbl_find(rocker, match);
2564 match->cookie = found->cookie;
2565 if (!switchdev_trans_ph_prepare(trans))
2566 hash_del(&found->entry);
2567 rocker_kfree(trans, found);
2569 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2572 found->cookie = rocker->flow_tbl_next_cookie++;
2573 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2576 if (!switchdev_trans_ph_prepare(trans))
2577 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2579 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2581 return rocker_cmd_exec(rocker_port, trans, flags,
2582 rocker_cmd_flow_tbl_add, found, NULL, NULL);
2585 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2586 struct switchdev_trans *trans, int flags,
2587 struct rocker_flow_tbl_entry *match)
2589 struct rocker *rocker = rocker_port->rocker;
2590 struct rocker_flow_tbl_entry *found;
2591 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2592 unsigned long lock_flags;
2595 match->key_crc32 = crc32(~0, &match->key, key_len);
2597 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2599 found = rocker_flow_tbl_find(rocker, match);
2602 if (!switchdev_trans_ph_prepare(trans))
2603 hash_del(&found->entry);
2604 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2607 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2609 rocker_kfree(trans, match);
2612 err = rocker_cmd_exec(rocker_port, trans, flags,
2613 rocker_cmd_flow_tbl_del,
2615 rocker_kfree(trans, found);
2621 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2622 struct switchdev_trans *trans, int flags,
2623 struct rocker_flow_tbl_entry *entry)
2625 if (flags & ROCKER_OP_FLAG_REMOVE)
2626 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2628 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2631 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2632 struct switchdev_trans *trans, int flags,
2633 u32 in_pport, u32 in_pport_mask,
2634 enum rocker_of_dpa_table_id goto_tbl)
2636 struct rocker_flow_tbl_entry *entry;
2638 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2642 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2643 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2644 entry->key.ig_port.in_pport = in_pport;
2645 entry->key.ig_port.in_pport_mask = in_pport_mask;
2646 entry->key.ig_port.goto_tbl = goto_tbl;
2648 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2651 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2652 struct switchdev_trans *trans, int flags,
2653 u32 in_pport, __be16 vlan_id,
2654 __be16 vlan_id_mask,
2655 enum rocker_of_dpa_table_id goto_tbl,
2656 bool untagged, __be16 new_vlan_id)
2658 struct rocker_flow_tbl_entry *entry;
2660 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2664 entry->key.priority = ROCKER_PRIORITY_VLAN;
2665 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2666 entry->key.vlan.in_pport = in_pport;
2667 entry->key.vlan.vlan_id = vlan_id;
2668 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2669 entry->key.vlan.goto_tbl = goto_tbl;
2671 entry->key.vlan.untagged = untagged;
2672 entry->key.vlan.new_vlan_id = new_vlan_id;
2674 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2677 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2678 struct switchdev_trans *trans,
2679 u32 in_pport, u32 in_pport_mask,
2680 __be16 eth_type, const u8 *eth_dst,
2681 const u8 *eth_dst_mask, __be16 vlan_id,
2682 __be16 vlan_id_mask, bool copy_to_cpu,
2685 struct rocker_flow_tbl_entry *entry;
2687 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2691 if (is_multicast_ether_addr(eth_dst)) {
2692 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2693 entry->key.term_mac.goto_tbl =
2694 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2696 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2697 entry->key.term_mac.goto_tbl =
2698 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2701 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2702 entry->key.term_mac.in_pport = in_pport;
2703 entry->key.term_mac.in_pport_mask = in_pport_mask;
2704 entry->key.term_mac.eth_type = eth_type;
2705 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2706 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2707 entry->key.term_mac.vlan_id = vlan_id;
2708 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2709 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2711 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2714 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2715 struct switchdev_trans *trans, int flags,
2716 const u8 *eth_dst, const u8 *eth_dst_mask,
2717 __be16 vlan_id, u32 tunnel_id,
2718 enum rocker_of_dpa_table_id goto_tbl,
2719 u32 group_id, bool copy_to_cpu)
2721 struct rocker_flow_tbl_entry *entry;
2723 bool vlan_bridging = !!vlan_id;
2724 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2727 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2731 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2734 entry->key.bridge.has_eth_dst = 1;
2735 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2738 entry->key.bridge.has_eth_dst_mask = 1;
2739 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2740 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2744 priority = ROCKER_PRIORITY_UNKNOWN;
2745 if (vlan_bridging && dflt && wild)
2746 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2747 else if (vlan_bridging && dflt && !wild)
2748 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2749 else if (vlan_bridging && !dflt)
2750 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2751 else if (!vlan_bridging && dflt && wild)
2752 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2753 else if (!vlan_bridging && dflt && !wild)
2754 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2755 else if (!vlan_bridging && !dflt)
2756 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2758 entry->key.priority = priority;
2759 entry->key.bridge.vlan_id = vlan_id;
2760 entry->key.bridge.tunnel_id = tunnel_id;
2761 entry->key.bridge.goto_tbl = goto_tbl;
2762 entry->key.bridge.group_id = group_id;
2763 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2765 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2768 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2769 struct switchdev_trans *trans,
2770 __be16 eth_type, __be32 dst,
2771 __be32 dst_mask, u32 priority,
2772 enum rocker_of_dpa_table_id goto_tbl,
2773 u32 group_id, int flags)
2775 struct rocker_flow_tbl_entry *entry;
2777 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2781 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2782 entry->key.priority = priority;
2783 entry->key.ucast_routing.eth_type = eth_type;
2784 entry->key.ucast_routing.dst4 = dst;
2785 entry->key.ucast_routing.dst4_mask = dst_mask;
2786 entry->key.ucast_routing.goto_tbl = goto_tbl;
2787 entry->key.ucast_routing.group_id = group_id;
2788 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2789 ucast_routing.group_id);
2791 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2794 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2795 struct switchdev_trans *trans, int flags,
2796 u32 in_pport, u32 in_pport_mask,
2797 const u8 *eth_src, const u8 *eth_src_mask,
2798 const u8 *eth_dst, const u8 *eth_dst_mask,
2799 __be16 eth_type, __be16 vlan_id,
2800 __be16 vlan_id_mask, u8 ip_proto,
2801 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2805 struct rocker_flow_tbl_entry *entry;
2807 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2811 priority = ROCKER_PRIORITY_ACL_NORMAL;
2812 if (eth_dst && eth_dst_mask) {
2813 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2814 priority = ROCKER_PRIORITY_ACL_DFLT;
2815 else if (is_link_local_ether_addr(eth_dst))
2816 priority = ROCKER_PRIORITY_ACL_CTRL;
2819 entry->key.priority = priority;
2820 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2821 entry->key.acl.in_pport = in_pport;
2822 entry->key.acl.in_pport_mask = in_pport_mask;
2825 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2827 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2829 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2831 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2833 entry->key.acl.eth_type = eth_type;
2834 entry->key.acl.vlan_id = vlan_id;
2835 entry->key.acl.vlan_id_mask = vlan_id_mask;
2836 entry->key.acl.ip_proto = ip_proto;
2837 entry->key.acl.ip_proto_mask = ip_proto_mask;
2838 entry->key.acl.ip_tos = ip_tos;
2839 entry->key.acl.ip_tos_mask = ip_tos_mask;
2840 entry->key.acl.group_id = group_id;
2842 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2845 static struct rocker_group_tbl_entry *
2846 rocker_group_tbl_find(const struct rocker *rocker,
2847 const struct rocker_group_tbl_entry *match)
2849 struct rocker_group_tbl_entry *found;
2851 hash_for_each_possible(rocker->group_tbl, found,
2852 entry, match->group_id) {
2853 if (found->group_id == match->group_id)
2860 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2861 struct rocker_group_tbl_entry *entry)
2863 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2864 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2865 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2866 rocker_kfree(trans, entry->group_ids);
2871 rocker_kfree(trans, entry);
2874 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2875 struct switchdev_trans *trans, int flags,
2876 struct rocker_group_tbl_entry *match)
2878 struct rocker *rocker = rocker_port->rocker;
2879 struct rocker_group_tbl_entry *found;
2880 unsigned long lock_flags;
2882 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2884 found = rocker_group_tbl_find(rocker, match);
2887 if (!switchdev_trans_ph_prepare(trans))
2888 hash_del(&found->entry);
2889 rocker_group_tbl_entry_free(trans, found);
2891 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2894 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2897 if (!switchdev_trans_ph_prepare(trans))
2898 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2900 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2902 return rocker_cmd_exec(rocker_port, trans, flags,
2903 rocker_cmd_group_tbl_add, found, NULL, NULL);
2906 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2907 struct switchdev_trans *trans, int flags,
2908 struct rocker_group_tbl_entry *match)
2910 struct rocker *rocker = rocker_port->rocker;
2911 struct rocker_group_tbl_entry *found;
2912 unsigned long lock_flags;
2915 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2917 found = rocker_group_tbl_find(rocker, match);
2920 if (!switchdev_trans_ph_prepare(trans))
2921 hash_del(&found->entry);
2922 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2925 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2927 rocker_group_tbl_entry_free(trans, match);
2930 err = rocker_cmd_exec(rocker_port, trans, flags,
2931 rocker_cmd_group_tbl_del,
2933 rocker_group_tbl_entry_free(trans, found);
2939 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2940 struct switchdev_trans *trans, int flags,
2941 struct rocker_group_tbl_entry *entry)
2943 if (flags & ROCKER_OP_FLAG_REMOVE)
2944 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2946 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2949 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2950 struct switchdev_trans *trans, int flags,
2951 __be16 vlan_id, u32 out_pport,
2954 struct rocker_group_tbl_entry *entry;
2956 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2960 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2961 entry->l2_interface.pop_vlan = pop_vlan;
2963 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2966 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2967 struct switchdev_trans *trans,
2968 int flags, u8 group_count,
2969 const u32 *group_ids, u32 group_id)
2971 struct rocker_group_tbl_entry *entry;
2973 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2977 entry->group_id = group_id;
2978 entry->group_count = group_count;
2980 entry->group_ids = rocker_kcalloc(trans, flags,
2981 group_count, sizeof(u32));
2982 if (!entry->group_ids) {
2983 rocker_kfree(trans, entry);
2986 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2988 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2991 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2992 struct switchdev_trans *trans, int flags,
2993 __be16 vlan_id, u8 group_count,
2994 const u32 *group_ids, u32 group_id)
2996 return rocker_group_l2_fan_out(rocker_port, trans, flags,
2997 group_count, group_ids,
3001 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
3002 struct switchdev_trans *trans, int flags,
3003 u32 index, const u8 *src_mac, const u8 *dst_mac,
3004 __be16 vlan_id, bool ttl_check, u32 pport)
3006 struct rocker_group_tbl_entry *entry;
3008 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3012 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
3014 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
3016 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
3017 entry->l3_unicast.vlan_id = vlan_id;
3018 entry->l3_unicast.ttl_check = ttl_check;
3019 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
3021 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
3024 static struct rocker_neigh_tbl_entry *
3025 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
3027 struct rocker_neigh_tbl_entry *found;
3029 hash_for_each_possible(rocker->neigh_tbl, found,
3030 entry, be32_to_cpu(ip_addr))
3031 if (found->ip_addr == ip_addr)
3037 static void _rocker_neigh_add(struct rocker *rocker,
3038 struct switchdev_trans *trans,
3039 struct rocker_neigh_tbl_entry *entry)
3041 if (!switchdev_trans_ph_commit(trans))
3042 entry->index = rocker->neigh_tbl_next_index++;
3043 if (switchdev_trans_ph_prepare(trans))
3046 hash_add(rocker->neigh_tbl, &entry->entry,
3047 be32_to_cpu(entry->ip_addr));
3050 static void _rocker_neigh_del(struct switchdev_trans *trans,
3051 struct rocker_neigh_tbl_entry *entry)
3053 if (switchdev_trans_ph_prepare(trans))
3055 if (--entry->ref_count == 0) {
3056 hash_del(&entry->entry);
3057 rocker_kfree(trans, entry);
3061 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
3062 struct switchdev_trans *trans,
3063 const u8 *eth_dst, bool ttl_check)
3066 ether_addr_copy(entry->eth_dst, eth_dst);
3067 entry->ttl_check = ttl_check;
3068 } else if (!switchdev_trans_ph_prepare(trans)) {
3073 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
3074 struct switchdev_trans *trans,
3075 int flags, __be32 ip_addr, const u8 *eth_dst)
3077 struct rocker *rocker = rocker_port->rocker;
3078 struct rocker_neigh_tbl_entry *entry;
3079 struct rocker_neigh_tbl_entry *found;
3080 unsigned long lock_flags;
3081 __be16 eth_type = htons(ETH_P_IP);
3082 enum rocker_of_dpa_table_id goto_tbl =
3083 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3086 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3091 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3095 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3097 found = rocker_neigh_tbl_find(rocker, ip_addr);
3099 updating = found && adding;
3100 removing = found && !adding;
3101 adding = !found && adding;
3104 entry->ip_addr = ip_addr;
3105 entry->dev = rocker_port->dev;
3106 ether_addr_copy(entry->eth_dst, eth_dst);
3107 entry->ttl_check = true;
3108 _rocker_neigh_add(rocker, trans, entry);
3109 } else if (removing) {
3110 memcpy(entry, found, sizeof(*entry));
3111 _rocker_neigh_del(trans, found);
3112 } else if (updating) {
3113 _rocker_neigh_update(found, trans, eth_dst, true);
3114 memcpy(entry, found, sizeof(*entry));
3119 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3124 /* For each active neighbor, we have an L3 unicast group and
3125 * a /32 route to the neighbor, which uses the L3 unicast
3126 * group. The L3 unicast group can also be referred to by
3127 * other routes' nexthops.
3130 err = rocker_group_l3_unicast(rocker_port, trans, flags,
3132 rocker_port->dev->dev_addr,
3134 rocker_port->internal_vlan_id,
3136 rocker_port->pport);
3138 netdev_err(rocker_port->dev,
3139 "Error (%d) L3 unicast group index %d\n",
3144 if (adding || removing) {
3145 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3146 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3153 netdev_err(rocker_port->dev,
3154 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3155 err, &entry->ip_addr, group_id);
3160 rocker_kfree(trans, entry);
3165 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3166 struct switchdev_trans *trans,
3169 struct net_device *dev = rocker_port->dev;
3170 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3174 n = neigh_create(&arp_tbl, &ip_addr, dev);
3179 /* If the neigh is already resolved, then go ahead and
3180 * install the entry, otherwise start the ARP process to
3181 * resolve the neigh.
3184 if (n->nud_state & NUD_VALID)
3185 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3188 neigh_event_send(n, NULL);
3194 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3195 struct switchdev_trans *trans, int flags,
3196 __be32 ip_addr, u32 *index)
3198 struct rocker *rocker = rocker_port->rocker;
3199 struct rocker_neigh_tbl_entry *entry;
3200 struct rocker_neigh_tbl_entry *found;
3201 unsigned long lock_flags;
3202 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3205 bool resolved = true;
3208 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3212 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3214 found = rocker_neigh_tbl_find(rocker, ip_addr);
3216 *index = found->index;
3218 updating = found && adding;
3219 removing = found && !adding;
3220 adding = !found && adding;
3223 entry->ip_addr = ip_addr;
3224 entry->dev = rocker_port->dev;
3225 _rocker_neigh_add(rocker, trans, entry);
3226 *index = entry->index;
3228 } else if (removing) {
3229 _rocker_neigh_del(trans, found);
3230 } else if (updating) {
3231 _rocker_neigh_update(found, trans, NULL, false);
3232 resolved = !is_zero_ether_addr(found->eth_dst);
3237 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3240 rocker_kfree(trans, entry);
3245 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3248 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3253 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3254 struct switchdev_trans *trans,
3255 int flags, __be16 vlan_id)
3257 struct rocker_port *p;
3258 const struct rocker *rocker = rocker_port->rocker;
3259 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3265 group_ids = rocker_kcalloc(trans, flags,
3266 rocker->port_count, sizeof(u32));
3270 /* Adjust the flood group for this VLAN. The flood group
3271 * references an L2 interface group for each port in this
3275 for (i = 0; i < rocker->port_count; i++) {
3276 p = rocker->ports[i];
3279 if (!rocker_port_is_bridged(p))
3281 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3282 group_ids[group_count++] =
3283 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3287 /* If there are no bridged ports in this VLAN, we're done */
3288 if (group_count == 0)
3289 goto no_ports_in_vlan;
3291 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3292 group_count, group_ids, group_id);
3294 netdev_err(rocker_port->dev,
3295 "Error (%d) port VLAN l2 flood group\n", err);
3298 rocker_kfree(trans, group_ids);
3302 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3303 struct switchdev_trans *trans, int flags,
3304 __be16 vlan_id, bool pop_vlan)
3306 const struct rocker *rocker = rocker_port->rocker;
3307 struct rocker_port *p;
3308 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3314 /* An L2 interface group for this port in this VLAN, but
3315 * only when port STP state is LEARNING|FORWARDING.
3318 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3319 rocker_port->stp_state == BR_STATE_FORWARDING) {
3320 out_pport = rocker_port->pport;
3321 err = rocker_group_l2_interface(rocker_port, trans, flags,
3322 vlan_id, out_pport, pop_vlan);
3324 netdev_err(rocker_port->dev,
3325 "Error (%d) port VLAN l2 group for pport %d\n",
3331 /* An L2 interface group for this VLAN to CPU port.
3332 * Add when first port joins this VLAN and destroy when
3333 * last port leaves this VLAN.
3336 for (i = 0; i < rocker->port_count; i++) {
3337 p = rocker->ports[i];
3338 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3342 if ((!adding || ref != 1) && (adding || ref != 0))
3346 err = rocker_group_l2_interface(rocker_port, trans, flags,
3347 vlan_id, out_pport, pop_vlan);
3349 netdev_err(rocker_port->dev,
3350 "Error (%d) port VLAN l2 group for CPU port\n", err);
3357 static struct rocker_ctrl {
3359 const u8 *eth_dst_mask;
3365 } rocker_ctrls[] = {
3366 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3367 /* pass link local multicast pkts up to CPU for filtering */
3369 .eth_dst_mask = ll_mask,
3372 [ROCKER_CTRL_LOCAL_ARP] = {
3373 /* pass local ARP pkts up to CPU */
3374 .eth_dst = zero_mac,
3375 .eth_dst_mask = zero_mac,
3376 .eth_type = htons(ETH_P_ARP),
3379 [ROCKER_CTRL_IPV4_MCAST] = {
3380 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3381 .eth_dst = ipv4_mcast,
3382 .eth_dst_mask = ipv4_mask,
3383 .eth_type = htons(ETH_P_IP),
3385 .copy_to_cpu = true,
3387 [ROCKER_CTRL_IPV6_MCAST] = {
3388 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3389 .eth_dst = ipv6_mcast,
3390 .eth_dst_mask = ipv6_mask,
3391 .eth_type = htons(ETH_P_IPV6),
3393 .copy_to_cpu = true,
3395 [ROCKER_CTRL_DFLT_BRIDGING] = {
3396 /* flood any pkts on vlan */
3398 .copy_to_cpu = true,
3400 [ROCKER_CTRL_DFLT_OVS] = {
3401 /* pass all pkts up to CPU */
3402 .eth_dst = zero_mac,
3403 .eth_dst_mask = zero_mac,
3408 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3409 struct switchdev_trans *trans, int flags,
3410 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3412 u32 in_pport = rocker_port->pport;
3413 u32 in_pport_mask = 0xffffffff;
3415 const u8 *eth_src = NULL;
3416 const u8 *eth_src_mask = NULL;
3417 __be16 vlan_id_mask = htons(0xffff);
3419 u8 ip_proto_mask = 0;
3422 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3425 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3426 in_pport, in_pport_mask,
3427 eth_src, eth_src_mask,
3428 ctrl->eth_dst, ctrl->eth_dst_mask,
3430 vlan_id, vlan_id_mask,
3431 ip_proto, ip_proto_mask,
3432 ip_tos, ip_tos_mask,
3436 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3441 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3442 struct switchdev_trans *trans,
3444 const struct rocker_ctrl *ctrl,
3447 enum rocker_of_dpa_table_id goto_tbl =
3448 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3449 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3453 if (!rocker_port_is_bridged(rocker_port))
3456 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3457 ctrl->eth_dst, ctrl->eth_dst_mask,
3459 goto_tbl, group_id, ctrl->copy_to_cpu);
3462 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3467 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3468 struct switchdev_trans *trans, int flags,
3469 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3471 u32 in_pport_mask = 0xffffffff;
3472 __be16 vlan_id_mask = htons(0xffff);
3475 if (ntohs(vlan_id) == 0)
3476 vlan_id = rocker_port->internal_vlan_id;
3478 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3479 rocker_port->pport, in_pport_mask,
3480 ctrl->eth_type, ctrl->eth_dst,
3481 ctrl->eth_dst_mask, vlan_id,
3482 vlan_id_mask, ctrl->copy_to_cpu,
3486 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3491 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3492 struct switchdev_trans *trans, int flags,
3493 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3496 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3499 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3503 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3509 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3510 struct switchdev_trans *trans, int flags,
3516 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3517 if (rocker_port->ctrls[i]) {
3518 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3519 &rocker_ctrls[i], vlan_id);
3528 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3529 struct switchdev_trans *trans, int flags,
3530 const struct rocker_ctrl *ctrl)
3535 for (vid = 1; vid < VLAN_N_VID; vid++) {
3536 if (!test_bit(vid, rocker_port->vlan_bitmap))
3538 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3547 static int rocker_port_vlan(struct rocker_port *rocker_port,
3548 struct switchdev_trans *trans, int flags, u16 vid)
3550 enum rocker_of_dpa_table_id goto_tbl =
3551 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3552 u32 in_pport = rocker_port->pport;
3553 __be16 vlan_id = htons(vid);
3554 __be16 vlan_id_mask = htons(0xffff);
3555 __be16 internal_vlan_id;
3557 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3560 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3562 if (adding && test_bit(ntohs(internal_vlan_id),
3563 rocker_port->vlan_bitmap))
3564 return 0; /* already added */
3565 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3566 rocker_port->vlan_bitmap))
3567 return 0; /* already removed */
3569 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3572 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3575 netdev_err(rocker_port->dev,
3576 "Error (%d) port ctrl vlan add\n", err);
3581 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3582 internal_vlan_id, untagged);
3584 netdev_err(rocker_port->dev,
3585 "Error (%d) port VLAN l2 groups\n", err);
3589 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3592 netdev_err(rocker_port->dev,
3593 "Error (%d) port VLAN l2 flood group\n", err);
3597 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3598 in_pport, vlan_id, vlan_id_mask,
3599 goto_tbl, untagged, internal_vlan_id);
3601 netdev_err(rocker_port->dev,
3602 "Error (%d) port VLAN table\n", err);
3605 if (switchdev_trans_ph_prepare(trans))
3606 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3611 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3612 struct switchdev_trans *trans, int flags)
3614 enum rocker_of_dpa_table_id goto_tbl;
3619 /* Normal Ethernet Frames. Matches pkts from any local physical
3620 * ports. Goto VLAN tbl.
3624 in_pport_mask = 0xffff0000;
3625 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3627 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3628 in_pport, in_pport_mask,
3631 netdev_err(rocker_port->dev,
3632 "Error (%d) ingress port table entry\n", err);
3637 struct rocker_fdb_learn_work {
3638 struct work_struct work;
3639 struct rocker_port *rocker_port;
3640 struct switchdev_trans *trans;
3646 static void rocker_port_fdb_learn_work(struct work_struct *work)
3648 const struct rocker_fdb_learn_work *lw =
3649 container_of(work, struct rocker_fdb_learn_work, work);
3650 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3651 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3652 struct switchdev_notifier_fdb_info info;
3654 info.addr = lw->addr;
3658 if (learned && removing)
3659 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3660 lw->rocker_port->dev, &info.info);
3661 else if (learned && !removing)
3662 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3663 lw->rocker_port->dev, &info.info);
3666 rocker_kfree(lw->trans, work);
3669 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3670 struct switchdev_trans *trans, int flags,
3671 const u8 *addr, __be16 vlan_id)
3673 struct rocker_fdb_learn_work *lw;
3674 enum rocker_of_dpa_table_id goto_tbl =
3675 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3676 u32 out_pport = rocker_port->pport;
3678 u32 group_id = ROCKER_GROUP_NONE;
3679 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3680 bool copy_to_cpu = false;
3683 if (rocker_port_is_bridged(rocker_port))
3684 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3686 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3687 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3688 NULL, vlan_id, tunnel_id, goto_tbl,
3689 group_id, copy_to_cpu);
3697 if (!rocker_port_is_bridged(rocker_port))
3700 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3704 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3706 lw->rocker_port = rocker_port;
3709 ether_addr_copy(lw->addr, addr);
3710 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3712 if (switchdev_trans_ph_prepare(trans))
3713 rocker_kfree(trans, lw);
3715 schedule_work(&lw->work);
3720 static struct rocker_fdb_tbl_entry *
3721 rocker_fdb_tbl_find(const struct rocker *rocker,
3722 const struct rocker_fdb_tbl_entry *match)
3724 struct rocker_fdb_tbl_entry *found;
3726 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3727 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3733 static int rocker_port_fdb(struct rocker_port *rocker_port,
3734 struct switchdev_trans *trans,
3735 const unsigned char *addr,
3736 __be16 vlan_id, int flags)
3738 struct rocker *rocker = rocker_port->rocker;
3739 struct rocker_fdb_tbl_entry *fdb;
3740 struct rocker_fdb_tbl_entry *found;
3741 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3742 unsigned long lock_flags;
3744 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3748 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3749 fdb->touched = jiffies;
3750 fdb->key.rocker_port = rocker_port;
3751 ether_addr_copy(fdb->key.addr, addr);
3752 fdb->key.vlan_id = vlan_id;
3753 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3755 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3757 found = rocker_fdb_tbl_find(rocker, fdb);
3760 found->touched = jiffies;
3762 rocker_kfree(trans, fdb);
3763 if (!switchdev_trans_ph_prepare(trans))
3764 hash_del(&found->entry);
3766 } else if (!removing) {
3767 if (!switchdev_trans_ph_prepare(trans))
3768 hash_add(rocker->fdb_tbl, &fdb->entry,
3772 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3774 /* Check if adding and already exists, or removing and can't find */
3775 if (!found != !removing) {
3776 rocker_kfree(trans, fdb);
3777 if (!found && removing)
3779 /* Refreshing existing to update aging timers */
3780 flags |= ROCKER_OP_FLAG_REFRESH;
3783 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3786 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3787 struct switchdev_trans *trans, int flags)
3789 struct rocker *rocker = rocker_port->rocker;
3790 struct rocker_fdb_tbl_entry *found;
3791 unsigned long lock_flags;
3792 struct hlist_node *tmp;
3796 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3797 rocker_port->stp_state == BR_STATE_FORWARDING)
3800 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3802 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3804 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3805 if (found->key.rocker_port != rocker_port)
3807 if (!found->learned)
3809 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3811 found->key.vlan_id);
3814 if (!switchdev_trans_ph_prepare(trans))
3815 hash_del(&found->entry);
3819 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3824 static void rocker_fdb_cleanup(unsigned long data)
3826 struct rocker *rocker = (struct rocker *)data;
3827 struct rocker_port *rocker_port;
3828 struct rocker_fdb_tbl_entry *entry;
3829 struct hlist_node *tmp;
3830 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3831 unsigned long expires;
3832 unsigned long lock_flags;
3833 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3834 ROCKER_OP_FLAG_LEARNED;
3837 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3839 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3840 if (!entry->learned)
3842 rocker_port = entry->key.rocker_port;
3843 expires = entry->touched + rocker_port->ageing_time;
3844 if (time_before_eq(expires, jiffies)) {
3845 rocker_port_fdb_learn(rocker_port, NULL,
3846 flags, entry->key.addr,
3847 entry->key.vlan_id);
3848 hash_del(&entry->entry);
3849 } else if (time_before(expires, next_timer)) {
3850 next_timer = expires;
3854 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3856 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3859 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3860 struct switchdev_trans *trans, int flags,
3863 u32 in_pport_mask = 0xffffffff;
3865 const u8 *dst_mac_mask = ff_mac;
3866 __be16 vlan_id_mask = htons(0xffff);
3867 bool copy_to_cpu = false;
3870 if (ntohs(vlan_id) == 0)
3871 vlan_id = rocker_port->internal_vlan_id;
3873 eth_type = htons(ETH_P_IP);
3874 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3875 rocker_port->pport, in_pport_mask,
3876 eth_type, rocker_port->dev->dev_addr,
3877 dst_mac_mask, vlan_id, vlan_id_mask,
3878 copy_to_cpu, flags);
3882 eth_type = htons(ETH_P_IPV6);
3883 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3884 rocker_port->pport, in_pport_mask,
3885 eth_type, rocker_port->dev->dev_addr,
3886 dst_mac_mask, vlan_id, vlan_id_mask,
3887 copy_to_cpu, flags);
3892 static int rocker_port_fwding(struct rocker_port *rocker_port,
3893 struct switchdev_trans *trans, int flags)
3901 /* Port will be forwarding-enabled if its STP state is LEARNING
3902 * or FORWARDING. Traffic from CPU can still egress, regardless of
3903 * port STP state. Use L2 interface group on port VLANs as a way
3904 * to toggle port forwarding: if forwarding is disabled, L2
3905 * interface group will not exist.
3908 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3909 rocker_port->stp_state != BR_STATE_FORWARDING)
3910 flags |= ROCKER_OP_FLAG_REMOVE;
3912 out_pport = rocker_port->pport;
3913 for (vid = 1; vid < VLAN_N_VID; vid++) {
3914 if (!test_bit(vid, rocker_port->vlan_bitmap))
3916 vlan_id = htons(vid);
3917 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3918 err = rocker_group_l2_interface(rocker_port, trans, flags,
3919 vlan_id, out_pport, pop_vlan);
3921 netdev_err(rocker_port->dev,
3922 "Error (%d) port VLAN l2 group for pport %d\n",
3931 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3932 struct switchdev_trans *trans, int flags,
3935 bool want[ROCKER_CTRL_MAX] = { 0, };
3936 bool prev_ctrls[ROCKER_CTRL_MAX];
3937 u8 uninitialized_var(prev_state);
3941 if (switchdev_trans_ph_prepare(trans)) {
3942 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3943 prev_state = rocker_port->stp_state;
3946 if (rocker_port->stp_state == state)
3949 rocker_port->stp_state = state;
3952 case BR_STATE_DISABLED:
3953 /* port is completely disabled */
3955 case BR_STATE_LISTENING:
3956 case BR_STATE_BLOCKING:
3957 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3959 case BR_STATE_LEARNING:
3960 case BR_STATE_FORWARDING:
3961 if (!rocker_port_is_ovsed(rocker_port))
3962 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3963 want[ROCKER_CTRL_IPV4_MCAST] = true;
3964 want[ROCKER_CTRL_IPV6_MCAST] = true;
3965 if (rocker_port_is_bridged(rocker_port))
3966 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3967 else if (rocker_port_is_ovsed(rocker_port))
3968 want[ROCKER_CTRL_DFLT_OVS] = true;
3970 want[ROCKER_CTRL_LOCAL_ARP] = true;
3974 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3975 if (want[i] != rocker_port->ctrls[i]) {
3976 int ctrl_flags = flags |
3977 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3978 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3982 rocker_port->ctrls[i] = want[i];
3986 err = rocker_port_fdb_flush(rocker_port, trans, flags);
3990 err = rocker_port_fwding(rocker_port, trans, flags);
3993 if (switchdev_trans_ph_prepare(trans)) {
3994 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3995 rocker_port->stp_state = prev_state;
4001 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
4002 struct switchdev_trans *trans, int flags)
4004 if (rocker_port_is_bridged(rocker_port))
4005 /* bridge STP will enable port */
4008 /* port is not bridged, so simulate going to FORWARDING state */
4009 return rocker_port_stp_update(rocker_port, trans, flags,
4010 BR_STATE_FORWARDING);
4013 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
4014 struct switchdev_trans *trans, int flags)
4016 if (rocker_port_is_bridged(rocker_port))
4017 /* bridge STP will disable port */
4020 /* port is not bridged, so simulate going to DISABLED state */
4021 return rocker_port_stp_update(rocker_port, trans, flags,
4025 static struct rocker_internal_vlan_tbl_entry *
4026 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
4028 struct rocker_internal_vlan_tbl_entry *found;
4030 hash_for_each_possible(rocker->internal_vlan_tbl, found,
4032 if (found->ifindex == ifindex)
4039 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
4042 struct rocker *rocker = rocker_port->rocker;
4043 struct rocker_internal_vlan_tbl_entry *entry;
4044 struct rocker_internal_vlan_tbl_entry *found;
4045 unsigned long lock_flags;
4048 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
4052 entry->ifindex = ifindex;
4054 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4056 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4063 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4065 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4066 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4068 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4072 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4076 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4078 return found->vlan_id;
4082 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4085 struct rocker *rocker = rocker_port->rocker;
4086 struct rocker_internal_vlan_tbl_entry *found;
4087 unsigned long lock_flags;
4090 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4092 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4094 netdev_err(rocker_port->dev,
4095 "ifindex (%d) not found in internal VLAN tbl\n",
4100 if (--found->ref_count <= 0) {
4101 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4102 clear_bit(bit, rocker->internal_vlan_bitmap);
4103 hash_del(&found->entry);
4108 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4111 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
4112 struct switchdev_trans *trans, __be32 dst,
4113 int dst_len, const struct fib_info *fi,
4114 u32 tb_id, int flags)
4116 const struct fib_nh *nh;
4117 __be16 eth_type = htons(ETH_P_IP);
4118 __be32 dst_mask = inet_make_mask(dst_len);
4119 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4120 u32 priority = fi->fib_priority;
4121 enum rocker_of_dpa_table_id goto_tbl =
4122 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4129 /* XXX support ECMP */
4132 nh_on_port = (fi->fib_dev == rocker_port->dev);
4133 has_gw = !!nh->nh_gw;
4135 if (has_gw && nh_on_port) {
4136 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4141 group_id = ROCKER_GROUP_L3_UNICAST(index);
4143 /* Send to CPU for processing */
4144 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4147 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4148 dst_mask, priority, goto_tbl,
4151 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4161 static int rocker_port_open(struct net_device *dev)
4163 struct rocker_port *rocker_port = netdev_priv(dev);
4166 err = rocker_port_dma_rings_init(rocker_port);
4170 err = request_irq(rocker_msix_tx_vector(rocker_port),
4171 rocker_tx_irq_handler, 0,
4172 rocker_driver_name, rocker_port);
4174 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4175 goto err_request_tx_irq;
4178 err = request_irq(rocker_msix_rx_vector(rocker_port),
4179 rocker_rx_irq_handler, 0,
4180 rocker_driver_name, rocker_port);
4182 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4183 goto err_request_rx_irq;
4186 err = rocker_world_port_open(rocker_port);
4188 netdev_err(rocker_port->dev, "cannot open port in world\n");
4189 goto err_world_port_open;
4192 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4194 goto err_fwd_enable;
4196 napi_enable(&rocker_port->napi_tx);
4197 napi_enable(&rocker_port->napi_rx);
4198 if (!dev->proto_down)
4199 rocker_port_set_enable(rocker_port, true);
4200 netif_start_queue(dev);
4204 err_world_port_open:
4205 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4207 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4209 rocker_port_dma_rings_fini(rocker_port);
4213 static int rocker_port_stop(struct net_device *dev)
4215 struct rocker_port *rocker_port = netdev_priv(dev);
4217 netif_stop_queue(dev);
4218 rocker_port_set_enable(rocker_port, false);
4219 napi_disable(&rocker_port->napi_rx);
4220 napi_disable(&rocker_port->napi_tx);
4221 rocker_world_port_stop(rocker_port);
4222 rocker_port_fwd_disable(rocker_port, NULL,
4223 ROCKER_OP_FLAG_NOWAIT);
4224 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4225 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4226 rocker_port_dma_rings_fini(rocker_port);
4231 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4232 const struct rocker_desc_info *desc_info)
4234 const struct rocker *rocker = rocker_port->rocker;
4235 struct pci_dev *pdev = rocker->pdev;
4236 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4237 struct rocker_tlv *attr;
4240 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4241 if (!attrs[ROCKER_TLV_TX_FRAGS])
4243 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4244 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4245 dma_addr_t dma_handle;
4248 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4250 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4252 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4253 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4255 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4256 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4257 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4261 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4262 struct rocker_desc_info *desc_info,
4263 char *buf, size_t buf_len)
4265 const struct rocker *rocker = rocker_port->rocker;
4266 struct pci_dev *pdev = rocker->pdev;
4267 dma_addr_t dma_handle;
4268 struct rocker_tlv *frag;
4270 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4271 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4272 if (net_ratelimit())
4273 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4276 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4279 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4282 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4285 rocker_tlv_nest_end(desc_info, frag);
4289 rocker_tlv_nest_cancel(desc_info, frag);
4291 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4295 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4297 struct rocker_port *rocker_port = netdev_priv(dev);
4298 struct rocker *rocker = rocker_port->rocker;
4299 struct rocker_desc_info *desc_info;
4300 struct rocker_tlv *frags;
4304 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4305 if (unlikely(!desc_info)) {
4306 if (net_ratelimit())
4307 netdev_err(dev, "tx ring full when queue awake\n");
4308 return NETDEV_TX_BUSY;
4311 rocker_desc_cookie_ptr_set(desc_info, skb);
4313 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4316 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4317 skb->data, skb_headlen(skb));
4320 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4321 err = skb_linearize(skb);
4326 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4327 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4329 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4330 skb_frag_address(frag),
4331 skb_frag_size(frag));
4335 rocker_tlv_nest_end(desc_info, frags);
4337 rocker_desc_gen_clear(desc_info);
4338 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4340 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4342 netif_stop_queue(dev);
4344 return NETDEV_TX_OK;
4347 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4349 rocker_tlv_nest_cancel(desc_info, frags);
4352 dev->stats.tx_dropped++;
4354 return NETDEV_TX_OK;
4357 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4359 struct sockaddr *addr = p;
4360 struct rocker_port *rocker_port = netdev_priv(dev);
4363 if (!is_valid_ether_addr(addr->sa_data))
4364 return -EADDRNOTAVAIL;
4366 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4369 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4373 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4375 struct rocker_port *rocker_port = netdev_priv(dev);
4376 int running = netif_running(dev);
4379 #define ROCKER_PORT_MIN_MTU 68
4380 #define ROCKER_PORT_MAX_MTU 9000
4382 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4386 rocker_port_stop(dev);
4388 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4391 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4396 err = rocker_port_open(dev);
4401 static int rocker_port_get_phys_port_name(struct net_device *dev,
4402 char *buf, size_t len)
4404 struct rocker_port *rocker_port = netdev_priv(dev);
4405 struct port_name name = { .buf = buf, .len = len };
4408 err = rocker_cmd_exec(rocker_port, NULL, 0,
4409 rocker_cmd_get_port_settings_prep, NULL,
4410 rocker_cmd_get_port_settings_phys_name_proc,
4413 return err ? -EOPNOTSUPP : 0;
4416 static int rocker_port_change_proto_down(struct net_device *dev,
4419 struct rocker_port *rocker_port = netdev_priv(dev);
4421 if (rocker_port->dev->flags & IFF_UP)
4422 rocker_port_set_enable(rocker_port, !proto_down);
4423 rocker_port->dev->proto_down = proto_down;
4427 static void rocker_port_neigh_destroy(struct neighbour *n)
4429 struct rocker_port *rocker_port = netdev_priv(n->dev);
4430 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4431 __be32 ip_addr = *(__be32 *)n->primary_key;
4434 rocker_port_ipv4_neigh(rocker_port, NULL,
4435 flags, ip_addr, n->ha);
4436 err = rocker_world_port_neigh_destroy(rocker_port, n);
4438 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4442 static const struct net_device_ops rocker_port_netdev_ops = {
4443 .ndo_open = rocker_port_open,
4444 .ndo_stop = rocker_port_stop,
4445 .ndo_start_xmit = rocker_port_xmit,
4446 .ndo_set_mac_address = rocker_port_set_mac_address,
4447 .ndo_change_mtu = rocker_port_change_mtu,
4448 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4449 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4450 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
4451 .ndo_fdb_add = switchdev_port_fdb_add,
4452 .ndo_fdb_del = switchdev_port_fdb_del,
4453 .ndo_fdb_dump = switchdev_port_fdb_dump,
4454 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
4455 .ndo_change_proto_down = rocker_port_change_proto_down,
4456 .ndo_neigh_destroy = rocker_port_neigh_destroy,
4459 /********************
4461 ********************/
4463 static int rocker_port_attr_get(struct net_device *dev,
4464 struct switchdev_attr *attr)
4466 const struct rocker_port *rocker_port = netdev_priv(dev);
4467 const struct rocker *rocker = rocker_port->rocker;
4471 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4472 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4473 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4475 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4476 attr->u.brport_flags = rocker_port->brport_flags;
4477 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4478 &attr->u.brport_flags);
4487 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4488 struct switchdev_trans *trans,
4489 unsigned long brport_flags)
4491 unsigned long orig_flags;
4494 orig_flags = rocker_port->brport_flags;
4495 rocker_port->brport_flags = brport_flags;
4496 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4497 err = rocker_port_set_learning(rocker_port, trans,
4498 !!(rocker_port->brport_flags & BR_LEARNING));
4500 if (switchdev_trans_ph_prepare(trans))
4501 rocker_port->brport_flags = orig_flags;
4506 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4507 struct switchdev_trans *trans,
4510 if (!switchdev_trans_ph_prepare(trans)) {
4511 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4512 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4518 static int rocker_port_attr_set(struct net_device *dev,
4519 const struct switchdev_attr *attr,
4520 struct switchdev_trans *trans)
4522 struct rocker_port *rocker_port = netdev_priv(dev);
4526 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4527 err = rocker_port_stp_update(rocker_port, trans, 0,
4531 err = rocker_world_port_attr_stp_state_set(rocker_port,
4535 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4536 err = rocker_port_brport_flags_set(rocker_port, trans,
4537 attr->u.brport_flags);
4540 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4541 attr->u.brport_flags,
4544 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4545 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4546 attr->u.ageing_time);
4549 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4550 attr->u.ageing_time,
4561 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4562 struct switchdev_trans *trans,
4567 /* XXX deal with flags for PVID and untagged */
4569 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4573 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4575 rocker_port_vlan(rocker_port, trans,
4576 ROCKER_OP_FLAG_REMOVE, vid);
4581 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4582 struct switchdev_trans *trans,
4583 const struct switchdev_obj_port_vlan *vlan)
4588 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4589 err = rocker_port_vlan_add(rocker_port, trans,
4598 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4599 struct switchdev_trans *trans,
4600 const struct switchdev_obj_port_fdb *fdb)
4602 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4605 if (!rocker_port_is_bridged(rocker_port))
4608 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4611 static int rocker_port_obj_add(struct net_device *dev,
4612 const struct switchdev_obj *obj,
4613 struct switchdev_trans *trans)
4615 struct rocker_port *rocker_port = netdev_priv(dev);
4616 const struct switchdev_obj_ipv4_fib *fib4;
4620 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4621 err = rocker_port_vlans_add(rocker_port, trans,
4622 SWITCHDEV_OBJ_PORT_VLAN(obj));
4625 err = rocker_world_port_obj_vlan_add(rocker_port,
4626 SWITCHDEV_OBJ_PORT_VLAN(obj),
4629 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4630 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4631 err = rocker_port_fib_ipv4(rocker_port, trans,
4632 htonl(fib4->dst), fib4->dst_len,
4633 &fib4->fi, fib4->tb_id, 0);
4636 err = rocker_world_port_obj_fib4_add(rocker_port,
4637 SWITCHDEV_OBJ_IPV4_FIB(obj),
4640 case SWITCHDEV_OBJ_ID_PORT_FDB:
4641 err = rocker_port_fdb_add(rocker_port, trans,
4642 SWITCHDEV_OBJ_PORT_FDB(obj));
4645 err = rocker_world_port_obj_fdb_add(rocker_port,
4646 SWITCHDEV_OBJ_PORT_FDB(obj),
4657 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4662 err = rocker_port_router_mac(rocker_port, NULL,
4663 ROCKER_OP_FLAG_REMOVE, htons(vid));
4667 return rocker_port_vlan(rocker_port, NULL,
4668 ROCKER_OP_FLAG_REMOVE, vid);
4671 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4672 const struct switchdev_obj_port_vlan *vlan)
4677 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4678 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4686 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4687 struct switchdev_trans *trans,
4688 const struct switchdev_obj_port_fdb *fdb)
4690 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4691 int flags = ROCKER_OP_FLAG_REMOVE;
4693 if (!rocker_port_is_bridged(rocker_port))
4696 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4699 static int rocker_port_obj_del(struct net_device *dev,
4700 const struct switchdev_obj *obj)
4702 struct rocker_port *rocker_port = netdev_priv(dev);
4703 const struct switchdev_obj_ipv4_fib *fib4;
4707 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4708 err = rocker_port_vlans_del(rocker_port,
4709 SWITCHDEV_OBJ_PORT_VLAN(obj));
4712 err = rocker_world_port_obj_vlan_del(rocker_port,
4713 SWITCHDEV_OBJ_PORT_VLAN(obj));
4715 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4716 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4717 err = rocker_port_fib_ipv4(rocker_port, NULL,
4718 htonl(fib4->dst), fib4->dst_len,
4719 &fib4->fi, fib4->tb_id,
4720 ROCKER_OP_FLAG_REMOVE);
4723 err = rocker_world_port_obj_fib4_del(rocker_port,
4724 SWITCHDEV_OBJ_IPV4_FIB(obj));
4726 case SWITCHDEV_OBJ_ID_PORT_FDB:
4727 err = rocker_port_fdb_del(rocker_port, NULL,
4728 SWITCHDEV_OBJ_PORT_FDB(obj));
4731 err = rocker_world_port_obj_fdb_del(rocker_port,
4732 SWITCHDEV_OBJ_PORT_FDB(obj));
4742 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4743 struct switchdev_obj_port_fdb *fdb,
4744 switchdev_obj_dump_cb_t *cb)
4746 struct rocker *rocker = rocker_port->rocker;
4747 struct rocker_fdb_tbl_entry *found;
4748 struct hlist_node *tmp;
4749 unsigned long lock_flags;
4753 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4754 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4755 if (found->key.rocker_port != rocker_port)
4757 ether_addr_copy(fdb->addr, found->key.addr);
4758 fdb->ndm_state = NUD_REACHABLE;
4759 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4760 found->key.vlan_id);
4761 err = cb(&fdb->obj);
4765 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4770 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4771 struct switchdev_obj_port_vlan *vlan,
4772 switchdev_obj_dump_cb_t *cb)
4777 for (vid = 1; vid < VLAN_N_VID; vid++) {
4778 if (!test_bit(vid, rocker_port->vlan_bitmap))
4781 if (rocker_vlan_id_is_internal(htons(vid)))
4782 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4783 vlan->vid_begin = vid;
4784 vlan->vid_end = vid;
4785 err = cb(&vlan->obj);
4793 static int rocker_port_obj_dump(struct net_device *dev,
4794 struct switchdev_obj *obj,
4795 switchdev_obj_dump_cb_t *cb)
4797 const struct rocker_port *rocker_port = netdev_priv(dev);
4801 case SWITCHDEV_OBJ_ID_PORT_FDB:
4802 err = rocker_port_fdb_dump(rocker_port,
4803 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4806 err = rocker_world_port_obj_fdb_dump(rocker_port,
4807 SWITCHDEV_OBJ_PORT_FDB(obj),
4810 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4811 err = rocker_port_vlan_dump(rocker_port,
4812 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4815 err = rocker_world_port_obj_vlan_dump(rocker_port,
4816 SWITCHDEV_OBJ_PORT_VLAN(obj),
4827 static const struct switchdev_ops rocker_port_switchdev_ops = {
4828 .switchdev_port_attr_get = rocker_port_attr_get,
4829 .switchdev_port_attr_set = rocker_port_attr_set,
4830 .switchdev_port_obj_add = rocker_port_obj_add,
4831 .switchdev_port_obj_del = rocker_port_obj_del,
4832 .switchdev_port_obj_dump = rocker_port_obj_dump,
4835 /********************
4837 ********************/
4839 static int rocker_port_get_settings(struct net_device *dev,
4840 struct ethtool_cmd *ecmd)
4842 struct rocker_port *rocker_port = netdev_priv(dev);
4844 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4847 static int rocker_port_set_settings(struct net_device *dev,
4848 struct ethtool_cmd *ecmd)
4850 struct rocker_port *rocker_port = netdev_priv(dev);
4852 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4855 static void rocker_port_get_drvinfo(struct net_device *dev,
4856 struct ethtool_drvinfo *drvinfo)
4858 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4859 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4862 static struct rocker_port_stats {
4863 char str[ETH_GSTRING_LEN];
4865 } rocker_port_stats[] = {
4866 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4867 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4868 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4869 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4871 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4872 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4873 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4874 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4877 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4879 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4885 switch (stringset) {
4887 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4888 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4889 p += ETH_GSTRING_LEN;
4896 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4897 struct rocker_desc_info *desc_info,
4900 struct rocker_tlv *cmd_stats;
4902 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4903 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4906 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4910 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4911 rocker_port->pport))
4914 rocker_tlv_nest_end(desc_info, cmd_stats);
4920 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4921 const struct rocker_desc_info *desc_info,
4924 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4925 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4926 const struct rocker_tlv *pattr;
4931 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4933 if (!attrs[ROCKER_TLV_CMD_INFO])
4936 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4937 attrs[ROCKER_TLV_CMD_INFO]);
4939 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4942 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4943 if (pport != rocker_port->pport)
4946 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4947 pattr = stats_attrs[rocker_port_stats[i].type];
4951 data[i] = rocker_tlv_get_u64(pattr);
4957 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4960 return rocker_cmd_exec(rocker_port, NULL, 0,
4961 rocker_cmd_get_port_stats_prep, NULL,
4962 rocker_cmd_get_port_stats_ethtool_proc,
4966 static void rocker_port_get_stats(struct net_device *dev,
4967 struct ethtool_stats *stats, u64 *data)
4969 struct rocker_port *rocker_port = netdev_priv(dev);
4971 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4974 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4979 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4983 return ROCKER_PORT_STATS_LEN;
4989 static const struct ethtool_ops rocker_port_ethtool_ops = {
4990 .get_settings = rocker_port_get_settings,
4991 .set_settings = rocker_port_set_settings,
4992 .get_drvinfo = rocker_port_get_drvinfo,
4993 .get_link = ethtool_op_get_link,
4994 .get_strings = rocker_port_get_strings,
4995 .get_ethtool_stats = rocker_port_get_stats,
4996 .get_sset_count = rocker_port_get_sset_count,
5003 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
5005 return container_of(napi, struct rocker_port, napi_tx);
5008 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
5010 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
5011 const struct rocker *rocker = rocker_port->rocker;
5012 const struct rocker_desc_info *desc_info;
5016 /* Cleanup tx descriptors */
5017 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
5018 struct sk_buff *skb;
5020 err = rocker_desc_err(desc_info);
5021 if (err && net_ratelimit())
5022 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
5024 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
5026 skb = rocker_desc_cookie_ptr_get(desc_info);
5028 rocker_port->dev->stats.tx_packets++;
5029 rocker_port->dev->stats.tx_bytes += skb->len;
5031 rocker_port->dev->stats.tx_errors++;
5034 dev_kfree_skb_any(skb);
5038 if (credits && netif_queue_stopped(rocker_port->dev))
5039 netif_wake_queue(rocker_port->dev);
5041 napi_complete(napi);
5042 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
5047 static int rocker_port_rx_proc(const struct rocker *rocker,
5048 const struct rocker_port *rocker_port,
5049 struct rocker_desc_info *desc_info)
5051 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
5052 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5059 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5060 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5062 if (attrs[ROCKER_TLV_RX_FLAGS])
5063 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
5065 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5067 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5068 skb_put(skb, rx_len);
5069 skb->protocol = eth_type_trans(skb, rocker_port->dev);
5071 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5072 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5074 rocker_port->dev->stats.rx_packets++;
5075 rocker_port->dev->stats.rx_bytes += skb->len;
5077 netif_receive_skb(skb);
5079 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
5082 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5084 return container_of(napi, struct rocker_port, napi_rx);
5087 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5089 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
5090 const struct rocker *rocker = rocker_port->rocker;
5091 struct rocker_desc_info *desc_info;
5095 /* Process rx descriptors */
5096 while (credits < budget &&
5097 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5098 err = rocker_desc_err(desc_info);
5100 if (net_ratelimit())
5101 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5104 err = rocker_port_rx_proc(rocker, rocker_port,
5106 if (err && net_ratelimit())
5107 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5111 rocker_port->dev->stats.rx_errors++;
5113 rocker_desc_gen_clear(desc_info);
5114 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5118 if (credits < budget)
5119 napi_complete(napi);
5121 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5130 static void rocker_carrier_init(const struct rocker_port *rocker_port)
5132 const struct rocker *rocker = rocker_port->rocker;
5133 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5136 link_up = link_status & (1 << rocker_port->pport);
5138 netif_carrier_on(rocker_port->dev);
5140 netif_carrier_off(rocker_port->dev);
5143 static void rocker_remove_ports(struct rocker *rocker)
5145 struct rocker_port *rocker_port;
5148 for (i = 0; i < rocker->port_count; i++) {
5149 rocker_port = rocker->ports[i];
5152 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5153 rocker_world_port_fini(rocker_port);
5154 unregister_netdev(rocker_port->dev);
5155 rocker_world_port_post_fini(rocker_port);
5156 free_netdev(rocker_port->dev);
5158 rocker_world_fini(rocker);
5159 kfree(rocker->ports);
5162 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
5164 const struct rocker *rocker = rocker_port->rocker;
5165 const struct pci_dev *pdev = rocker->pdev;
5168 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5169 rocker_port->dev->dev_addr);
5171 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5172 eth_hw_addr_random(rocker_port->dev);
5176 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5178 const struct pci_dev *pdev = rocker->pdev;
5179 struct rocker_port *rocker_port;
5180 struct net_device *dev;
5181 u16 untagged_vid = 0;
5184 dev = alloc_etherdev(sizeof(struct rocker_port));
5187 rocker_port = netdev_priv(dev);
5188 rocker_port->dev = dev;
5189 rocker_port->rocker = rocker;
5190 rocker_port->port_number = port_number;
5191 rocker_port->pport = port_number + 1;
5192 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5193 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5195 err = rocker_world_check_init(rocker_port);
5197 dev_err(&pdev->dev, "world init failed\n");
5198 goto err_world_check_init;
5201 rocker_port_dev_addr_init(rocker_port);
5202 dev->netdev_ops = &rocker_port_netdev_ops;
5203 dev->ethtool_ops = &rocker_port_ethtool_ops;
5204 dev->switchdev_ops = &rocker_port_switchdev_ops;
5205 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5207 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5209 rocker_carrier_init(rocker_port);
5211 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5213 err = rocker_world_port_pre_init(rocker_port);
5215 dev_err(&pdev->dev, "port world pre-init failed\n");
5216 goto err_world_port_pre_init;
5218 err = register_netdev(dev);
5220 dev_err(&pdev->dev, "register_netdev failed\n");
5221 goto err_register_netdev;
5223 rocker->ports[port_number] = rocker_port;
5225 err = rocker_world_port_init(rocker_port);
5227 dev_err(&pdev->dev, "port world init failed\n");
5228 goto err_world_port_init;
5231 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5233 rocker_port_set_learning(rocker_port, NULL,
5234 !!(rocker_port->brport_flags & BR_LEARNING));
5236 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5238 netdev_err(rocker_port->dev, "install ig port table failed\n");
5239 goto err_port_ig_tbl;
5242 rocker_port->internal_vlan_id =
5243 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5245 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5247 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5248 goto err_untagged_vlan;
5254 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5256 rocker_world_port_fini(rocker_port);
5257 err_world_port_init:
5258 rocker->ports[port_number] = NULL;
5259 unregister_netdev(dev);
5260 err_register_netdev:
5261 rocker_world_port_post_fini(rocker_port);
5262 err_world_port_pre_init:
5263 err_world_check_init:
5268 static int rocker_probe_ports(struct rocker *rocker)
5274 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5275 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5278 for (i = 0; i < rocker->port_count; i++) {
5279 err = rocker_probe_port(rocker, i);
5286 rocker_remove_ports(rocker);
5290 static int rocker_msix_init(struct rocker *rocker)
5292 struct pci_dev *pdev = rocker->pdev;
5297 msix_entries = pci_msix_vec_count(pdev);
5298 if (msix_entries < 0)
5299 return msix_entries;
5301 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5304 rocker->msix_entries = kmalloc_array(msix_entries,
5305 sizeof(struct msix_entry),
5307 if (!rocker->msix_entries)
5310 for (i = 0; i < msix_entries; i++)
5311 rocker->msix_entries[i].entry = i;
5313 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5315 goto err_enable_msix;
5320 kfree(rocker->msix_entries);
5324 static void rocker_msix_fini(const struct rocker *rocker)
5326 pci_disable_msix(rocker->pdev);
5327 kfree(rocker->msix_entries);
5330 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5332 struct rocker *rocker;
5335 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5339 err = pci_enable_device(pdev);
5341 dev_err(&pdev->dev, "pci_enable_device failed\n");
5342 goto err_pci_enable_device;
5345 err = pci_request_regions(pdev, rocker_driver_name);
5347 dev_err(&pdev->dev, "pci_request_regions failed\n");
5348 goto err_pci_request_regions;
5351 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5353 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5355 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5356 goto err_pci_set_dma_mask;
5359 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5361 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5362 goto err_pci_set_dma_mask;
5366 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5367 dev_err(&pdev->dev, "invalid PCI region size\n");
5369 goto err_pci_resource_len_check;
5372 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5373 pci_resource_len(pdev, 0));
5374 if (!rocker->hw_addr) {
5375 dev_err(&pdev->dev, "ioremap failed\n");
5379 pci_set_master(pdev);
5381 rocker->pdev = pdev;
5382 pci_set_drvdata(pdev, rocker);
5384 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5386 err = rocker_msix_init(rocker);
5388 dev_err(&pdev->dev, "MSI-X init failed\n");
5392 err = rocker_basic_hw_test(rocker);
5394 dev_err(&pdev->dev, "basic hw test failed\n");
5395 goto err_basic_hw_test;
5398 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5400 err = rocker_dma_rings_init(rocker);
5402 goto err_dma_rings_init;
5404 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5405 rocker_cmd_irq_handler, 0,
5406 rocker_driver_name, rocker);
5408 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5409 goto err_request_cmd_irq;
5412 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5413 rocker_event_irq_handler, 0,
5414 rocker_driver_name, rocker);
5416 dev_err(&pdev->dev, "cannot assign event irq\n");
5417 goto err_request_event_irq;
5420 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5422 err = rocker_init_tbls(rocker);
5424 dev_err(&pdev->dev, "cannot init rocker tables\n");
5428 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5429 (unsigned long) rocker);
5430 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5432 err = rocker_probe_ports(rocker);
5434 dev_err(&pdev->dev, "failed to probe ports\n");
5435 goto err_probe_ports;
5438 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5439 (int)sizeof(rocker->hw.id), &rocker->hw.id);
5444 del_timer_sync(&rocker->fdb_cleanup_timer);
5445 rocker_free_tbls(rocker);
5447 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5448 err_request_event_irq:
5449 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5450 err_request_cmd_irq:
5451 rocker_dma_rings_fini(rocker);
5454 rocker_msix_fini(rocker);
5456 iounmap(rocker->hw_addr);
5458 err_pci_resource_len_check:
5459 err_pci_set_dma_mask:
5460 pci_release_regions(pdev);
5461 err_pci_request_regions:
5462 pci_disable_device(pdev);
5463 err_pci_enable_device:
5468 static void rocker_remove(struct pci_dev *pdev)
5470 struct rocker *rocker = pci_get_drvdata(pdev);
5472 del_timer_sync(&rocker->fdb_cleanup_timer);
5473 rocker_free_tbls(rocker);
5474 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5475 rocker_remove_ports(rocker);
5476 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5477 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5478 rocker_dma_rings_fini(rocker);
5479 rocker_msix_fini(rocker);
5480 iounmap(rocker->hw_addr);
5481 pci_release_regions(rocker->pdev);
5482 pci_disable_device(rocker->pdev);
5486 static struct pci_driver rocker_pci_driver = {
5487 .name = rocker_driver_name,
5488 .id_table = rocker_pci_id_table,
5489 .probe = rocker_probe,
5490 .remove = rocker_remove,
5493 /************************************
5494 * Net device notifier event handler
5495 ************************************/
5497 static bool rocker_port_dev_check(const struct net_device *dev)
5499 return dev->netdev_ops == &rocker_port_netdev_ops;
5502 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5503 struct net_device *bridge)
5505 u16 untagged_vid = 0;
5508 /* Port is joining bridge, so the internal VLAN for the
5509 * port is going to change to the bridge internal VLAN.
5510 * Let's remove untagged VLAN (vid=0) from port and
5511 * re-add once internal VLAN has changed.
5514 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5518 rocker_port_internal_vlan_id_put(rocker_port,
5519 rocker_port->dev->ifindex);
5520 rocker_port->internal_vlan_id =
5521 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5523 rocker_port->bridge_dev = bridge;
5524 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5526 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5529 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5531 u16 untagged_vid = 0;
5534 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5538 rocker_port_internal_vlan_id_put(rocker_port,
5539 rocker_port->bridge_dev->ifindex);
5540 rocker_port->internal_vlan_id =
5541 rocker_port_internal_vlan_id_get(rocker_port,
5542 rocker_port->dev->ifindex);
5544 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5546 rocker_port->bridge_dev = NULL;
5548 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5552 if (rocker_port->dev->flags & IFF_UP)
5553 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5558 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5559 struct net_device *master)
5563 rocker_port->bridge_dev = master;
5565 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5568 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5573 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5574 struct net_device *master)
5578 if (netif_is_bridge_master(master))
5579 err = rocker_port_bridge_join(rocker_port, master);
5580 else if (netif_is_ovs_master(master))
5581 err = rocker_port_ovs_changed(rocker_port, master);
5585 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5589 if (rocker_port_is_bridged(rocker_port))
5590 err = rocker_port_bridge_leave(rocker_port);
5591 else if (rocker_port_is_ovsed(rocker_port))
5592 err = rocker_port_ovs_changed(rocker_port, NULL);
5596 static int rocker_netdevice_event(struct notifier_block *unused,
5597 unsigned long event, void *ptr)
5599 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5600 struct netdev_notifier_changeupper_info *info;
5601 struct rocker_port *rocker_port;
5604 if (!rocker_port_dev_check(dev))
5608 case NETDEV_CHANGEUPPER:
5612 rocker_port = netdev_priv(dev);
5613 if (info->linking) {
5614 err = rocker_world_port_master_linked(rocker_port,
5617 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5619 err = rocker_port_master_linked(rocker_port,
5622 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5625 err = rocker_world_port_master_unlinked(rocker_port,
5628 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5630 err = rocker_port_master_unlinked(rocker_port);
5632 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5641 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5642 .notifier_call = rocker_netdevice_event,
5645 /************************************
5646 * Net event notifier event handler
5647 ************************************/
5649 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5651 struct rocker_port *rocker_port = netdev_priv(dev);
5652 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5653 ROCKER_OP_FLAG_NOWAIT;
5654 __be32 ip_addr = *(__be32 *)n->primary_key;
5656 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5659 static int rocker_netevent_event(struct notifier_block *unused,
5660 unsigned long event, void *ptr)
5662 struct rocker_port *rocker_port;
5663 struct net_device *dev;
5664 struct neighbour *n = ptr;
5668 case NETEVENT_NEIGH_UPDATE:
5669 if (n->tbl != &arp_tbl)
5672 if (!rocker_port_dev_check(dev))
5674 rocker_port = netdev_priv(dev);
5675 err = rocker_world_port_neigh_update(rocker_port, n);
5677 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5679 err = rocker_neigh_update(dev, n);
5682 "failed to handle neigh update (err %d)\n",
5690 static struct notifier_block rocker_netevent_nb __read_mostly = {
5691 .notifier_call = rocker_netevent_event,
5694 /***********************
5695 * Module init and exit
5696 ***********************/
5698 static int __init rocker_module_init(void)
5702 register_netdevice_notifier(&rocker_netdevice_nb);
5703 register_netevent_notifier(&rocker_netevent_nb);
5704 err = pci_register_driver(&rocker_pci_driver);
5706 goto err_pci_register_driver;
5709 err_pci_register_driver:
5710 unregister_netevent_notifier(&rocker_netevent_nb);
5711 unregister_netdevice_notifier(&rocker_netdevice_nb);
5715 static void __exit rocker_module_exit(void)
5717 unregister_netevent_notifier(&rocker_netevent_nb);
5718 unregister_netdevice_notifier(&rocker_netdevice_nb);
5719 pci_unregister_driver(&rocker_pci_driver);
5722 module_init(rocker_module_init);
5723 module_exit(rocker_module_exit);
5725 MODULE_LICENSE("GPL v2");
5726 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5727 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5728 MODULE_DESCRIPTION("Rocker switch device driver");
5729 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);