2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
42 #include "rocker_hw.h"
44 #include "rocker_tlv.h"
46 static const char rocker_driver_name[] = "rocker";
48 static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
53 struct rocker_flow_tbl_key {
55 enum rocker_of_dpa_table_id tbl_id;
60 enum rocker_of_dpa_table_id goto_tbl;
66 enum rocker_of_dpa_table_id goto_tbl;
75 u8 eth_dst_mask[ETH_ALEN];
78 enum rocker_of_dpa_table_id goto_tbl;
85 enum rocker_of_dpa_table_id goto_tbl;
90 u8 eth_dst_mask[ETH_ALEN];
95 enum rocker_of_dpa_table_id goto_tbl;
102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
118 struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
122 struct rocker_flow_tbl_key key;
124 u32 key_crc32; /* key */
127 struct rocker_group_tbl_entry {
128 struct hlist_node entry;
130 u32 group_id; /* key */
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
153 struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
157 unsigned long touched;
158 struct rocker_fdb_tbl_key {
159 struct rocker_port *rocker_port;
165 struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
172 struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
178 u8 eth_dst[ETH_ALEN];
182 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
192 /* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
213 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
217 u16 _vlan_id = ntohs(vlan_id);
219 return (_vlan_id >= start && _vlan_id <= end);
222 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
223 u16 vid, bool *pop_vlan)
229 vlan_id = htons(vid);
231 vlan_id = rocker_port->internal_vlan_id;
239 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
242 if (rocker_vlan_id_is_internal(vlan_id))
245 return ntohs(vlan_id);
248 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
254 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
260 #define ROCKER_OP_FLAG_REMOVE BIT(0)
261 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
262 #define ROCKER_OP_FLAG_LEARNED BIT(2)
263 #define ROCKER_OP_FLAG_REFRESH BIT(3)
265 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
268 struct switchdev_trans_item *elem = NULL;
269 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270 GFP_ATOMIC : GFP_KERNEL;
272 /* If in transaction prepare phase, allocate the memory
273 * and enqueue it on a transaction. If in transaction
274 * commit phase, dequeue the memory from the transaction
275 * rather than re-allocating the memory. The idea is the
276 * driver code paths for prepare and commit are identical
277 * so the memory allocated in the prepare phase is the
278 * memory used in the commit phase.
282 elem = kzalloc(size + sizeof(*elem), gfp_flags);
283 } else if (switchdev_trans_ph_prepare(trans)) {
284 elem = kzalloc(size + sizeof(*elem), gfp_flags);
287 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
289 elem = switchdev_trans_item_dequeue(trans);
292 return elem ? elem + 1 : NULL;
295 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
298 return __rocker_mem_alloc(trans, flags, size);
301 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302 size_t n, size_t size)
304 return __rocker_mem_alloc(trans, flags, n * size);
307 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
309 struct switchdev_trans_item *elem;
311 /* Frees are ignored if in transaction prepare phase. The
312 * memory remains on the per-port list until freed in the
316 if (switchdev_trans_ph_prepare(trans))
319 elem = (struct switchdev_trans_item *) mem - 1;
324 wait_queue_head_t wait;
329 static void rocker_wait_reset(struct rocker_wait *wait)
332 wait->nowait = false;
335 static void rocker_wait_init(struct rocker_wait *wait)
337 init_waitqueue_head(&wait->wait);
338 rocker_wait_reset(wait);
341 static struct rocker_wait *rocker_wait_create(void)
343 struct rocker_wait *wait;
345 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
351 static void rocker_wait_destroy(struct rocker_wait *wait)
356 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
357 unsigned long timeout)
359 wait_event_timeout(wait->wait, wait->done, HZ / 10);
365 static void rocker_wait_wake_up(struct rocker_wait *wait)
368 wake_up(&wait->wait);
371 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
373 return rocker->msix_entries[vector].vector;
376 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
378 return rocker_msix_vector(rocker_port->rocker,
379 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
382 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
384 return rocker_msix_vector(rocker_port->rocker,
385 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
388 #define rocker_write32(rocker, reg, val) \
389 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
390 #define rocker_read32(rocker, reg) \
391 readl((rocker)->hw_addr + (ROCKER_ ## reg))
392 #define rocker_write64(rocker, reg, val) \
393 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394 #define rocker_read64(rocker, reg) \
395 readq((rocker)->hw_addr + (ROCKER_ ## reg))
397 /*****************************
398 * HW basic testing functions
399 *****************************/
401 static int rocker_reg_test(const struct rocker *rocker)
403 const struct pci_dev *pdev = rocker->pdev;
409 rocker_write32(rocker, TEST_REG, rnd);
410 test_reg = rocker_read32(rocker, TEST_REG);
411 if (test_reg != rnd * 2) {
412 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
419 rnd |= prandom_u32();
420 rocker_write64(rocker, TEST_REG64, rnd);
421 test_reg = rocker_read64(rocker, TEST_REG64);
422 if (test_reg != rnd * 2) {
423 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
431 static int rocker_dma_test_one(const struct rocker *rocker,
432 struct rocker_wait *wait, u32 test_type,
433 dma_addr_t dma_handle, const unsigned char *buf,
434 const unsigned char *expect, size_t size)
436 const struct pci_dev *pdev = rocker->pdev;
439 rocker_wait_reset(wait);
440 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
442 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
443 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
447 for (i = 0; i < size; i++) {
448 if (buf[i] != expect[i]) {
449 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
450 buf[i], i, expect[i]);
457 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
458 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
460 static int rocker_dma_test_offset(const struct rocker *rocker,
461 struct rocker_wait *wait, int offset)
463 struct pci_dev *pdev = rocker->pdev;
464 unsigned char *alloc;
466 unsigned char *expect;
467 dma_addr_t dma_handle;
471 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
472 GFP_KERNEL | GFP_DMA);
475 buf = alloc + offset;
476 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
478 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
479 PCI_DMA_BIDIRECTIONAL);
480 if (pci_dma_mapping_error(pdev, dma_handle)) {
485 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
486 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
488 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
489 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
490 dma_handle, buf, expect,
491 ROCKER_TEST_DMA_BUF_SIZE);
495 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
496 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
497 dma_handle, buf, expect,
498 ROCKER_TEST_DMA_BUF_SIZE);
502 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
503 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
505 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
506 dma_handle, buf, expect,
507 ROCKER_TEST_DMA_BUF_SIZE);
512 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
513 PCI_DMA_BIDIRECTIONAL);
520 static int rocker_dma_test(const struct rocker *rocker,
521 struct rocker_wait *wait)
526 for (i = 0; i < 8; i++) {
527 err = rocker_dma_test_offset(rocker, wait, i);
534 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
536 struct rocker_wait *wait = dev_id;
538 rocker_wait_wake_up(wait);
543 static int rocker_basic_hw_test(const struct rocker *rocker)
545 const struct pci_dev *pdev = rocker->pdev;
546 struct rocker_wait wait;
549 err = rocker_reg_test(rocker);
551 dev_err(&pdev->dev, "reg test failed\n");
555 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
556 rocker_test_irq_handler, 0,
557 rocker_driver_name, &wait);
559 dev_err(&pdev->dev, "cannot assign test irq\n");
563 rocker_wait_init(&wait);
564 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
566 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
567 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
572 err = rocker_dma_test(rocker, &wait);
574 dev_err(&pdev->dev, "dma test failed\n");
577 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
581 /******************************************
582 * DMA rings and descriptors manipulations
583 ******************************************/
585 static u32 __pos_inc(u32 pos, size_t limit)
587 return ++pos == limit ? 0 : pos;
590 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
592 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
607 case -ROCKER_EMSGSIZE:
609 case -ROCKER_ENOTSUP:
611 case -ROCKER_ENOBUFS:
618 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
620 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
623 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
625 u32 comp_err = desc_info->desc->comp_err;
627 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
631 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
633 return (void *)(uintptr_t)desc_info->desc->cookie;
636 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
639 desc_info->desc->cookie = (uintptr_t) ptr;
642 static struct rocker_desc_info *
643 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
645 static struct rocker_desc_info *desc_info;
646 u32 head = __pos_inc(info->head, info->size);
648 desc_info = &info->desc_info[info->head];
649 if (head == info->tail)
650 return NULL; /* ring full */
651 desc_info->tlv_size = 0;
655 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
657 desc_info->desc->buf_size = desc_info->data_size;
658 desc_info->desc->tlv_size = desc_info->tlv_size;
661 static void rocker_desc_head_set(const struct rocker *rocker,
662 struct rocker_dma_ring_info *info,
663 const struct rocker_desc_info *desc_info)
665 u32 head = __pos_inc(info->head, info->size);
667 BUG_ON(head == info->tail);
668 rocker_desc_commit(desc_info);
670 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
673 static struct rocker_desc_info *
674 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
676 static struct rocker_desc_info *desc_info;
678 if (info->tail == info->head)
679 return NULL; /* nothing to be done between head and tail */
680 desc_info = &info->desc_info[info->tail];
681 if (!rocker_desc_gen(desc_info))
682 return NULL; /* gen bit not set, desc is not ready yet */
683 info->tail = __pos_inc(info->tail, info->size);
684 desc_info->tlv_size = desc_info->desc->tlv_size;
688 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
689 const struct rocker_dma_ring_info *info,
693 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
696 static unsigned long rocker_dma_ring_size_fix(size_t size)
698 return max(ROCKER_DMA_SIZE_MIN,
699 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
702 static int rocker_dma_ring_create(const struct rocker *rocker,
705 struct rocker_dma_ring_info *info)
709 BUG_ON(size != rocker_dma_ring_size_fix(size));
714 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
716 if (!info->desc_info)
719 info->desc = pci_alloc_consistent(rocker->pdev,
720 info->size * sizeof(*info->desc),
723 kfree(info->desc_info);
727 for (i = 0; i < info->size; i++)
728 info->desc_info[i].desc = &info->desc[i];
730 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
731 ROCKER_DMA_DESC_CTRL_RESET);
732 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
733 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
738 static void rocker_dma_ring_destroy(const struct rocker *rocker,
739 const struct rocker_dma_ring_info *info)
741 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
743 pci_free_consistent(rocker->pdev,
744 info->size * sizeof(struct rocker_desc),
745 info->desc, info->mapaddr);
746 kfree(info->desc_info);
749 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
750 struct rocker_dma_ring_info *info)
754 BUG_ON(info->head || info->tail);
756 /* When ring is consumer, we need to advance head for each desc.
757 * That tells hw that the desc is ready to be used by it.
759 for (i = 0; i < info->size - 1; i++)
760 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
761 rocker_desc_commit(&info->desc_info[i]);
764 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
765 const struct rocker_dma_ring_info *info,
766 int direction, size_t buf_size)
768 struct pci_dev *pdev = rocker->pdev;
772 for (i = 0; i < info->size; i++) {
773 struct rocker_desc_info *desc_info = &info->desc_info[i];
774 struct rocker_desc *desc = &info->desc[i];
775 dma_addr_t dma_handle;
778 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
784 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
785 if (pci_dma_mapping_error(pdev, dma_handle)) {
791 desc_info->data = buf;
792 desc_info->data_size = buf_size;
793 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
795 desc->buf_addr = dma_handle;
796 desc->buf_size = buf_size;
801 for (i--; i >= 0; i--) {
802 const struct rocker_desc_info *desc_info = &info->desc_info[i];
804 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
805 desc_info->data_size, direction);
806 kfree(desc_info->data);
811 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
812 const struct rocker_dma_ring_info *info,
815 struct pci_dev *pdev = rocker->pdev;
818 for (i = 0; i < info->size; i++) {
819 const struct rocker_desc_info *desc_info = &info->desc_info[i];
820 struct rocker_desc *desc = &info->desc[i];
824 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
825 desc_info->data_size, direction);
826 kfree(desc_info->data);
830 static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
832 struct rocker_wait *wait;
834 wait = rocker_wait_create();
837 rocker_desc_cookie_ptr_set(desc_info, wait);
842 rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
844 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
846 rocker_wait_destroy(wait);
849 static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
851 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
855 for (i = 0; i < cmd_ring->size; i++) {
856 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
863 for (i--; i >= 0; i--)
864 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
868 static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
870 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
873 for (i = 0; i < cmd_ring->size; i++)
874 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
877 static int rocker_dma_rings_init(struct rocker *rocker)
879 const struct pci_dev *pdev = rocker->pdev;
882 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
883 ROCKER_DMA_CMD_DEFAULT_SIZE,
886 dev_err(&pdev->dev, "failed to create command dma ring\n");
890 spin_lock_init(&rocker->cmd_ring_lock);
892 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
893 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
895 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
896 goto err_dma_cmd_ring_bufs_alloc;
899 err = rocker_dma_cmd_ring_waits_alloc(rocker);
901 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
902 goto err_dma_cmd_ring_waits_alloc;
905 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
906 ROCKER_DMA_EVENT_DEFAULT_SIZE,
907 &rocker->event_ring);
909 dev_err(&pdev->dev, "failed to create event dma ring\n");
910 goto err_dma_event_ring_create;
913 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
914 PCI_DMA_FROMDEVICE, PAGE_SIZE);
916 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
917 goto err_dma_event_ring_bufs_alloc;
919 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
922 err_dma_event_ring_bufs_alloc:
923 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
924 err_dma_event_ring_create:
925 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
926 PCI_DMA_BIDIRECTIONAL);
927 err_dma_cmd_ring_waits_alloc:
928 rocker_dma_cmd_ring_waits_free(rocker);
929 err_dma_cmd_ring_bufs_alloc:
930 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
934 static void rocker_dma_rings_fini(struct rocker *rocker)
936 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
937 PCI_DMA_BIDIRECTIONAL);
938 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
939 rocker_dma_cmd_ring_waits_free(rocker);
940 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
941 PCI_DMA_BIDIRECTIONAL);
942 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
945 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
946 struct rocker_desc_info *desc_info,
947 struct sk_buff *skb, size_t buf_len)
949 const struct rocker *rocker = rocker_port->rocker;
950 struct pci_dev *pdev = rocker->pdev;
951 dma_addr_t dma_handle;
953 dma_handle = pci_map_single(pdev, skb->data, buf_len,
955 if (pci_dma_mapping_error(pdev, dma_handle))
957 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
958 goto tlv_put_failure;
959 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
960 goto tlv_put_failure;
964 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
965 desc_info->tlv_size = 0;
969 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
971 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
974 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
975 struct rocker_desc_info *desc_info)
977 struct net_device *dev = rocker_port->dev;
979 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
982 /* Ensure that hw will see tlv_size zero in case of an error.
983 * That tells hw to use another descriptor.
985 rocker_desc_cookie_ptr_set(desc_info, NULL);
986 desc_info->tlv_size = 0;
988 skb = netdev_alloc_skb_ip_align(dev, buf_len);
991 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
993 dev_kfree_skb_any(skb);
996 rocker_desc_cookie_ptr_set(desc_info, skb);
1000 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1001 const struct rocker_tlv **attrs)
1003 struct pci_dev *pdev = rocker->pdev;
1004 dma_addr_t dma_handle;
1007 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1008 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1010 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1011 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1012 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1015 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1016 const struct rocker_desc_info *desc_info)
1018 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1019 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1023 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1024 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1025 dev_kfree_skb_any(skb);
1028 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1030 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1031 const struct rocker *rocker = rocker_port->rocker;
1035 for (i = 0; i < rx_ring->size; i++) {
1036 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1037 &rx_ring->desc_info[i]);
1044 for (i--; i >= 0; i--)
1045 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1049 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1051 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1052 const struct rocker *rocker = rocker_port->rocker;
1055 for (i = 0; i < rx_ring->size; i++)
1056 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1059 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1061 struct rocker *rocker = rocker_port->rocker;
1064 err = rocker_dma_ring_create(rocker,
1065 ROCKER_DMA_TX(rocker_port->port_number),
1066 ROCKER_DMA_TX_DEFAULT_SIZE,
1067 &rocker_port->tx_ring);
1069 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1073 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1075 ROCKER_DMA_TX_DESC_SIZE);
1077 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1078 goto err_dma_tx_ring_bufs_alloc;
1081 err = rocker_dma_ring_create(rocker,
1082 ROCKER_DMA_RX(rocker_port->port_number),
1083 ROCKER_DMA_RX_DEFAULT_SIZE,
1084 &rocker_port->rx_ring);
1086 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1087 goto err_dma_rx_ring_create;
1090 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1091 PCI_DMA_BIDIRECTIONAL,
1092 ROCKER_DMA_RX_DESC_SIZE);
1094 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1095 goto err_dma_rx_ring_bufs_alloc;
1098 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1100 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1101 goto err_dma_rx_ring_skbs_alloc;
1103 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1107 err_dma_rx_ring_skbs_alloc:
1108 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1109 PCI_DMA_BIDIRECTIONAL);
1110 err_dma_rx_ring_bufs_alloc:
1111 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1112 err_dma_rx_ring_create:
1113 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1115 err_dma_tx_ring_bufs_alloc:
1116 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1120 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1122 struct rocker *rocker = rocker_port->rocker;
1124 rocker_dma_rx_ring_skbs_free(rocker_port);
1125 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1126 PCI_DMA_BIDIRECTIONAL);
1127 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1128 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1130 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1133 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1136 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1139 val |= 1ULL << rocker_port->pport;
1141 val &= ~(1ULL << rocker_port->pport);
1142 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1145 /********************************
1146 * Interrupt handler and helpers
1147 ********************************/
1149 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1151 struct rocker *rocker = dev_id;
1152 const struct rocker_desc_info *desc_info;
1153 struct rocker_wait *wait;
1156 spin_lock(&rocker->cmd_ring_lock);
1157 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1158 wait = rocker_desc_cookie_ptr_get(desc_info);
1160 rocker_desc_gen_clear(desc_info);
1162 rocker_wait_wake_up(wait);
1166 spin_unlock(&rocker->cmd_ring_lock);
1167 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1172 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1174 netif_carrier_on(rocker_port->dev);
1175 netdev_info(rocker_port->dev, "Link is up\n");
1178 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1180 netif_carrier_off(rocker_port->dev);
1181 netdev_info(rocker_port->dev, "Link is down\n");
1184 static int rocker_event_link_change(const struct rocker *rocker,
1185 const struct rocker_tlv *info)
1187 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1188 unsigned int port_number;
1190 struct rocker_port *rocker_port;
1192 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1193 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1194 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1197 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1198 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1200 if (port_number >= rocker->port_count)
1203 rocker_port = rocker->ports[port_number];
1204 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1206 rocker_port_link_up(rocker_port);
1208 rocker_port_link_down(rocker_port);
1214 static int rocker_port_fdb(struct rocker_port *rocker_port,
1215 struct switchdev_trans *trans,
1216 const unsigned char *addr,
1217 __be16 vlan_id, int flags);
1218 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1219 const unsigned char *addr,
1222 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1223 const struct rocker_tlv *info)
1225 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1226 unsigned int port_number;
1227 struct rocker_port *rocker_port;
1228 const unsigned char *addr;
1229 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1233 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1234 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1235 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1236 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1239 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1240 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1241 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1243 if (port_number >= rocker->port_count)
1246 rocker_port = rocker->ports[port_number];
1248 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1252 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1253 rocker_port->stp_state != BR_STATE_FORWARDING)
1256 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1259 static int rocker_event_process(const struct rocker *rocker,
1260 const struct rocker_desc_info *desc_info)
1262 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1263 const struct rocker_tlv *info;
1266 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1267 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1268 !attrs[ROCKER_TLV_EVENT_INFO])
1271 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1272 info = attrs[ROCKER_TLV_EVENT_INFO];
1275 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1276 return rocker_event_link_change(rocker, info);
1277 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1278 return rocker_event_mac_vlan_seen(rocker, info);
1284 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1286 struct rocker *rocker = dev_id;
1287 const struct pci_dev *pdev = rocker->pdev;
1288 const struct rocker_desc_info *desc_info;
1292 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1293 err = rocker_desc_err(desc_info);
1295 dev_err(&pdev->dev, "event desc received with err %d\n",
1298 err = rocker_event_process(rocker, desc_info);
1300 dev_err(&pdev->dev, "event processing failed with err %d\n",
1303 rocker_desc_gen_clear(desc_info);
1304 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1307 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1312 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1314 struct rocker_port *rocker_port = dev_id;
1316 napi_schedule(&rocker_port->napi_tx);
1320 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1322 struct rocker_port *rocker_port = dev_id;
1324 napi_schedule(&rocker_port->napi_rx);
1328 /********************
1330 ********************/
1332 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1333 struct rocker_desc_info *desc_info,
1336 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1337 const struct rocker_desc_info *desc_info,
1340 static int rocker_cmd_exec(struct rocker_port *rocker_port, int flags,
1341 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1342 rocker_cmd_proc_cb_t process, void *process_priv)
1344 struct rocker *rocker = rocker_port->rocker;
1345 struct rocker_desc_info *desc_info;
1346 struct rocker_wait *wait;
1347 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1348 unsigned long lock_flags;
1351 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1353 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1355 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1359 wait = rocker_desc_cookie_ptr_get(desc_info);
1360 rocker_wait_init(wait);
1361 wait->nowait = nowait;
1363 err = prepare(rocker_port, desc_info, prepare_priv);
1365 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1369 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1371 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1376 if (!rocker_wait_event_timeout(wait, HZ / 10))
1379 err = rocker_desc_err(desc_info);
1384 err = process(rocker_port, desc_info, process_priv);
1386 rocker_desc_gen_clear(desc_info);
1391 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1392 struct rocker_desc_info *desc_info,
1395 struct rocker_tlv *cmd_info;
1397 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1398 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1400 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1403 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1404 rocker_port->pport))
1406 rocker_tlv_nest_end(desc_info, cmd_info);
1411 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1412 const struct rocker_desc_info *desc_info,
1415 struct ethtool_cmd *ecmd = priv;
1416 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1417 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1422 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1423 if (!attrs[ROCKER_TLV_CMD_INFO])
1426 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1427 attrs[ROCKER_TLV_CMD_INFO]);
1428 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1429 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1430 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1433 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1434 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1435 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1437 ecmd->transceiver = XCVR_INTERNAL;
1438 ecmd->supported = SUPPORTED_TP;
1439 ecmd->phy_address = 0xff;
1440 ecmd->port = PORT_TP;
1441 ethtool_cmd_speed_set(ecmd, speed);
1442 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1443 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1449 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1450 const struct rocker_desc_info *desc_info,
1453 unsigned char *macaddr = priv;
1454 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1455 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1456 const struct rocker_tlv *attr;
1458 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1459 if (!attrs[ROCKER_TLV_CMD_INFO])
1462 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1463 attrs[ROCKER_TLV_CMD_INFO]);
1464 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1468 if (rocker_tlv_len(attr) != ETH_ALEN)
1471 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1476 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1477 const struct rocker_desc_info *desc_info,
1481 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1482 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1483 const struct rocker_tlv *attr;
1485 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1486 if (!attrs[ROCKER_TLV_CMD_INFO])
1489 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1490 attrs[ROCKER_TLV_CMD_INFO]);
1491 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1495 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1505 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1506 const struct rocker_desc_info *desc_info,
1509 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1510 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1511 struct port_name *name = priv;
1512 const struct rocker_tlv *attr;
1516 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1517 if (!attrs[ROCKER_TLV_CMD_INFO])
1520 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1521 attrs[ROCKER_TLV_CMD_INFO]);
1522 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1526 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1527 str = rocker_tlv_data(attr);
1529 /* make sure name only contains alphanumeric characters */
1530 for (i = j = 0; i < len; ++i) {
1531 if (isalnum(str[i])) {
1532 name->buf[j] = str[i];
1540 name->buf[j] = '\0';
1546 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1547 struct rocker_desc_info *desc_info,
1550 struct ethtool_cmd *ecmd = priv;
1551 struct rocker_tlv *cmd_info;
1553 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1554 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1556 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1559 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1560 rocker_port->pport))
1562 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1563 ethtool_cmd_speed(ecmd)))
1565 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1568 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1571 rocker_tlv_nest_end(desc_info, cmd_info);
1576 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1577 struct rocker_desc_info *desc_info,
1580 const unsigned char *macaddr = priv;
1581 struct rocker_tlv *cmd_info;
1583 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1584 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1586 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1589 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1590 rocker_port->pport))
1592 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1595 rocker_tlv_nest_end(desc_info, cmd_info);
1600 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1601 struct rocker_desc_info *desc_info,
1604 int mtu = *(int *)priv;
1605 struct rocker_tlv *cmd_info;
1607 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1608 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1610 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1613 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1614 rocker_port->pport))
1616 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1619 rocker_tlv_nest_end(desc_info, cmd_info);
1624 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1625 struct rocker_desc_info *desc_info,
1628 bool learning = *(bool *)priv;
1629 struct rocker_tlv *cmd_info;
1631 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1632 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1634 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1637 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1638 rocker_port->pport))
1640 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1643 rocker_tlv_nest_end(desc_info, cmd_info);
1647 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1648 struct ethtool_cmd *ecmd)
1650 return rocker_cmd_exec(rocker_port, 0,
1651 rocker_cmd_get_port_settings_prep, NULL,
1652 rocker_cmd_get_port_settings_ethtool_proc,
1656 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1657 unsigned char *macaddr)
1659 return rocker_cmd_exec(rocker_port, 0,
1660 rocker_cmd_get_port_settings_prep, NULL,
1661 rocker_cmd_get_port_settings_macaddr_proc,
1665 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1668 return rocker_cmd_exec(rocker_port, 0,
1669 rocker_cmd_get_port_settings_prep, NULL,
1670 rocker_cmd_get_port_settings_mode_proc, p_mode);
1673 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1674 struct ethtool_cmd *ecmd)
1676 return rocker_cmd_exec(rocker_port, 0,
1677 rocker_cmd_set_port_settings_ethtool_prep,
1681 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1682 unsigned char *macaddr)
1684 return rocker_cmd_exec(rocker_port, 0,
1685 rocker_cmd_set_port_settings_macaddr_prep,
1686 macaddr, NULL, NULL);
1689 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1692 return rocker_cmd_exec(rocker_port, 0,
1693 rocker_cmd_set_port_settings_mtu_prep,
1697 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1700 return rocker_cmd_exec(rocker_port, 0,
1701 rocker_cmd_set_port_learning_prep,
1702 &learning, NULL, NULL);
1705 /**********************
1706 * Worlds manipulation
1707 **********************/
1709 static struct rocker_world_ops *rocker_world_ops[] = {
1713 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1715 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1719 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1720 if (rocker_world_ops[i]->mode == mode)
1721 return rocker_world_ops[i];
1725 static int rocker_world_init(struct rocker *rocker, u8 mode)
1727 struct rocker_world_ops *wops;
1730 wops = rocker_world_ops_find(mode);
1732 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1736 rocker->wops = wops;
1737 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1742 err = wops->init(rocker);
1744 kfree(rocker->wpriv);
1748 static void rocker_world_fini(struct rocker *rocker)
1750 struct rocker_world_ops *wops = rocker->wops;
1752 if (!wops || !wops->fini)
1755 kfree(rocker->wpriv);
1758 static int rocker_world_check_init(struct rocker_port *rocker_port)
1760 struct rocker *rocker = rocker_port->rocker;
1764 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1766 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1770 if (rocker->wops->mode != mode) {
1771 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1776 return rocker_world_init(rocker, mode);
1779 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1781 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1784 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1785 if (!rocker_port->wpriv)
1787 if (!wops->port_pre_init)
1789 err = wops->port_pre_init(rocker_port);
1791 kfree(rocker_port->wpriv);
1795 static int rocker_world_port_init(struct rocker_port *rocker_port)
1797 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1799 if (!wops->port_init)
1801 return wops->port_init(rocker_port);
1804 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1806 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1808 if (!wops->port_fini)
1810 wops->port_fini(rocker_port);
1813 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1815 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1817 if (!wops->port_post_fini)
1819 wops->port_post_fini(rocker_port);
1820 kfree(rocker_port->wpriv);
1823 static int rocker_world_port_open(struct rocker_port *rocker_port)
1825 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1827 if (!wops->port_open)
1829 return wops->port_open(rocker_port);
1832 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1834 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1836 if (!wops->port_stop)
1838 wops->port_stop(rocker_port);
1841 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1843 struct switchdev_trans *trans)
1845 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1847 if (!wops->port_attr_stp_state_set)
1849 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1853 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1854 unsigned long brport_flags,
1855 struct switchdev_trans *trans)
1857 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1859 if (!wops->port_attr_bridge_flags_set)
1861 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1866 rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1867 unsigned long *p_brport_flags)
1869 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1871 if (!wops->port_attr_bridge_flags_get)
1873 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1877 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1879 struct switchdev_trans *trans)
1882 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1884 if (!wops->port_attr_bridge_ageing_time_set)
1886 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1891 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1892 const struct switchdev_obj_port_vlan *vlan,
1893 struct switchdev_trans *trans)
1895 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1897 if (!wops->port_obj_vlan_add)
1899 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1903 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1904 const struct switchdev_obj_port_vlan *vlan)
1906 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1908 if (!wops->port_obj_vlan_del)
1910 return wops->port_obj_vlan_del(rocker_port, vlan);
1914 rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1915 struct switchdev_obj_port_vlan *vlan,
1916 switchdev_obj_dump_cb_t *cb)
1918 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1920 if (!wops->port_obj_vlan_dump)
1922 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1926 rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1927 const struct switchdev_obj_ipv4_fib *fib4,
1928 struct switchdev_trans *trans)
1930 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1932 if (!wops->port_obj_fib4_add)
1934 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1938 rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1939 const struct switchdev_obj_ipv4_fib *fib4)
1941 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1943 if (!wops->port_obj_fib4_del)
1945 return wops->port_obj_fib4_del(rocker_port, fib4);
1949 rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1950 const struct switchdev_obj_port_fdb *fdb,
1951 struct switchdev_trans *trans)
1953 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1955 if (!wops->port_obj_fdb_add)
1957 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1961 rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1962 const struct switchdev_obj_port_fdb *fdb)
1964 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1966 if (!wops->port_obj_fdb_del)
1968 return wops->port_obj_fdb_del(rocker_port, fdb);
1972 rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1973 struct switchdev_obj_port_fdb *fdb,
1974 switchdev_obj_dump_cb_t *cb)
1976 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1978 if (!wops->port_obj_fdb_dump)
1980 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1983 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1984 struct net_device *master)
1986 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1988 if (!wops->port_master_linked)
1990 return wops->port_master_linked(rocker_port, master);
1993 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1994 struct net_device *master)
1996 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1998 if (!wops->port_master_unlinked)
2000 return wops->port_master_unlinked(rocker_port, master);
2003 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
2004 struct neighbour *n)
2006 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2008 if (!wops->port_neigh_update)
2010 return wops->port_neigh_update(rocker_port, n);
2013 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
2014 struct neighbour *n)
2016 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2018 if (!wops->port_neigh_destroy)
2020 return wops->port_neigh_destroy(rocker_port, n);
2023 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2024 const unsigned char *addr,
2027 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2029 if (!wops->port_ev_mac_vlan_seen)
2031 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
2035 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
2036 const struct rocker_flow_tbl_entry *entry)
2038 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2039 entry->key.ig_port.in_pport))
2041 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2042 entry->key.ig_port.in_pport_mask))
2044 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2045 entry->key.ig_port.goto_tbl))
2052 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2053 const struct rocker_flow_tbl_entry *entry)
2055 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2056 entry->key.vlan.in_pport))
2058 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2059 entry->key.vlan.vlan_id))
2061 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2062 entry->key.vlan.vlan_id_mask))
2064 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2065 entry->key.vlan.goto_tbl))
2067 if (entry->key.vlan.untagged &&
2068 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2069 entry->key.vlan.new_vlan_id))
2076 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2077 const struct rocker_flow_tbl_entry *entry)
2079 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2080 entry->key.term_mac.in_pport))
2082 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2083 entry->key.term_mac.in_pport_mask))
2085 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2086 entry->key.term_mac.eth_type))
2088 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2089 ETH_ALEN, entry->key.term_mac.eth_dst))
2091 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2092 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2094 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2095 entry->key.term_mac.vlan_id))
2097 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2098 entry->key.term_mac.vlan_id_mask))
2100 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2101 entry->key.term_mac.goto_tbl))
2103 if (entry->key.term_mac.copy_to_cpu &&
2104 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2105 entry->key.term_mac.copy_to_cpu))
2112 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2113 const struct rocker_flow_tbl_entry *entry)
2115 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2116 entry->key.ucast_routing.eth_type))
2118 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2119 entry->key.ucast_routing.dst4))
2121 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2122 entry->key.ucast_routing.dst4_mask))
2124 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2125 entry->key.ucast_routing.goto_tbl))
2127 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2128 entry->key.ucast_routing.group_id))
2135 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2136 const struct rocker_flow_tbl_entry *entry)
2138 if (entry->key.bridge.has_eth_dst &&
2139 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2140 ETH_ALEN, entry->key.bridge.eth_dst))
2142 if (entry->key.bridge.has_eth_dst_mask &&
2143 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2144 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2146 if (entry->key.bridge.vlan_id &&
2147 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2148 entry->key.bridge.vlan_id))
2150 if (entry->key.bridge.tunnel_id &&
2151 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2152 entry->key.bridge.tunnel_id))
2154 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2155 entry->key.bridge.goto_tbl))
2157 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2158 entry->key.bridge.group_id))
2160 if (entry->key.bridge.copy_to_cpu &&
2161 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2162 entry->key.bridge.copy_to_cpu))
2169 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2170 const struct rocker_flow_tbl_entry *entry)
2172 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2173 entry->key.acl.in_pport))
2175 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2176 entry->key.acl.in_pport_mask))
2178 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2179 ETH_ALEN, entry->key.acl.eth_src))
2181 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2182 ETH_ALEN, entry->key.acl.eth_src_mask))
2184 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2185 ETH_ALEN, entry->key.acl.eth_dst))
2187 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2188 ETH_ALEN, entry->key.acl.eth_dst_mask))
2190 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2191 entry->key.acl.eth_type))
2193 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2194 entry->key.acl.vlan_id))
2196 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2197 entry->key.acl.vlan_id_mask))
2200 switch (ntohs(entry->key.acl.eth_type)) {
2203 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2204 entry->key.acl.ip_proto))
2206 if (rocker_tlv_put_u8(desc_info,
2207 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2208 entry->key.acl.ip_proto_mask))
2210 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2211 entry->key.acl.ip_tos & 0x3f))
2213 if (rocker_tlv_put_u8(desc_info,
2214 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2215 entry->key.acl.ip_tos_mask & 0x3f))
2217 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2218 (entry->key.acl.ip_tos & 0xc0) >> 6))
2220 if (rocker_tlv_put_u8(desc_info,
2221 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2222 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2227 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2228 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2229 entry->key.acl.group_id))
2235 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2236 struct rocker_desc_info *desc_info,
2239 const struct rocker_flow_tbl_entry *entry = priv;
2240 struct rocker_tlv *cmd_info;
2243 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2245 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2248 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2251 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2252 entry->key.priority))
2254 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2256 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2260 switch (entry->key.tbl_id) {
2261 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2262 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2264 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2265 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2267 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2268 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2270 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2271 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2273 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2274 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2276 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2277 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2287 rocker_tlv_nest_end(desc_info, cmd_info);
2292 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2293 struct rocker_desc_info *desc_info,
2296 const struct rocker_flow_tbl_entry *entry = priv;
2297 struct rocker_tlv *cmd_info;
2299 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2301 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2304 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2307 rocker_tlv_nest_end(desc_info, cmd_info);
2313 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2314 struct rocker_group_tbl_entry *entry)
2316 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2317 ROCKER_GROUP_PORT_GET(entry->group_id)))
2319 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2320 entry->l2_interface.pop_vlan))
2327 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2328 const struct rocker_group_tbl_entry *entry)
2330 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2331 entry->l2_rewrite.group_id))
2333 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2334 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2335 ETH_ALEN, entry->l2_rewrite.eth_src))
2337 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2338 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2339 ETH_ALEN, entry->l2_rewrite.eth_dst))
2341 if (entry->l2_rewrite.vlan_id &&
2342 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2343 entry->l2_rewrite.vlan_id))
2350 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2351 const struct rocker_group_tbl_entry *entry)
2354 struct rocker_tlv *group_ids;
2356 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2357 entry->group_count))
2360 group_ids = rocker_tlv_nest_start(desc_info,
2361 ROCKER_TLV_OF_DPA_GROUP_IDS);
2365 for (i = 0; i < entry->group_count; i++)
2366 /* Note TLV array is 1-based */
2367 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2370 rocker_tlv_nest_end(desc_info, group_ids);
2376 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2377 const struct rocker_group_tbl_entry *entry)
2379 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2380 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2381 ETH_ALEN, entry->l3_unicast.eth_src))
2383 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2384 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2385 ETH_ALEN, entry->l3_unicast.eth_dst))
2387 if (entry->l3_unicast.vlan_id &&
2388 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2389 entry->l3_unicast.vlan_id))
2391 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2392 entry->l3_unicast.ttl_check))
2394 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2395 entry->l3_unicast.group_id))
2401 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2402 struct rocker_desc_info *desc_info,
2405 struct rocker_group_tbl_entry *entry = priv;
2406 struct rocker_tlv *cmd_info;
2409 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2411 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2415 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2419 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2420 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2421 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2423 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2424 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2426 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2427 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2428 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2430 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2431 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2441 rocker_tlv_nest_end(desc_info, cmd_info);
2446 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2447 struct rocker_desc_info *desc_info,
2450 const struct rocker_group_tbl_entry *entry = priv;
2451 struct rocker_tlv *cmd_info;
2453 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2455 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2458 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2461 rocker_tlv_nest_end(desc_info, cmd_info);
2466 /***************************************************
2467 * Flow, group, FDB, internal VLAN and neigh tables
2468 ***************************************************/
2470 static int rocker_init_tbls(struct rocker *rocker)
2472 hash_init(rocker->flow_tbl);
2473 spin_lock_init(&rocker->flow_tbl_lock);
2475 hash_init(rocker->group_tbl);
2476 spin_lock_init(&rocker->group_tbl_lock);
2478 hash_init(rocker->fdb_tbl);
2479 spin_lock_init(&rocker->fdb_tbl_lock);
2481 hash_init(rocker->internal_vlan_tbl);
2482 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2484 hash_init(rocker->neigh_tbl);
2485 spin_lock_init(&rocker->neigh_tbl_lock);
2490 static void rocker_free_tbls(struct rocker *rocker)
2492 unsigned long flags;
2493 struct rocker_flow_tbl_entry *flow_entry;
2494 struct rocker_group_tbl_entry *group_entry;
2495 struct rocker_fdb_tbl_entry *fdb_entry;
2496 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2497 struct rocker_neigh_tbl_entry *neigh_entry;
2498 struct hlist_node *tmp;
2501 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2502 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2503 hash_del(&flow_entry->entry);
2504 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2506 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2507 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2508 hash_del(&group_entry->entry);
2509 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2511 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2512 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2513 hash_del(&fdb_entry->entry);
2514 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2516 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2517 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2518 tmp, internal_vlan_entry, entry)
2519 hash_del(&internal_vlan_entry->entry);
2520 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2522 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2523 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2524 hash_del(&neigh_entry->entry);
2525 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2528 static struct rocker_flow_tbl_entry *
2529 rocker_flow_tbl_find(const struct rocker *rocker,
2530 const struct rocker_flow_tbl_entry *match)
2532 struct rocker_flow_tbl_entry *found;
2533 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2535 hash_for_each_possible(rocker->flow_tbl, found,
2536 entry, match->key_crc32) {
2537 if (memcmp(&found->key, &match->key, key_len) == 0)
2544 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2545 struct switchdev_trans *trans, int flags,
2546 struct rocker_flow_tbl_entry *match)
2548 struct rocker *rocker = rocker_port->rocker;
2549 struct rocker_flow_tbl_entry *found;
2550 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2551 unsigned long lock_flags;
2553 match->key_crc32 = crc32(~0, &match->key, key_len);
2555 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2557 found = rocker_flow_tbl_find(rocker, match);
2560 match->cookie = found->cookie;
2561 if (!switchdev_trans_ph_prepare(trans))
2562 hash_del(&found->entry);
2563 rocker_kfree(trans, found);
2565 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2568 found->cookie = rocker->flow_tbl_next_cookie++;
2569 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2572 if (!switchdev_trans_ph_prepare(trans))
2573 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2575 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2577 if (!switchdev_trans_ph_prepare(trans))
2578 return rocker_cmd_exec(rocker_port, flags,
2579 rocker_cmd_flow_tbl_add,
2584 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2585 struct switchdev_trans *trans, int flags,
2586 struct rocker_flow_tbl_entry *match)
2588 struct rocker *rocker = rocker_port->rocker;
2589 struct rocker_flow_tbl_entry *found;
2590 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2591 unsigned long lock_flags;
2594 match->key_crc32 = crc32(~0, &match->key, key_len);
2596 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2598 found = rocker_flow_tbl_find(rocker, match);
2601 if (!switchdev_trans_ph_prepare(trans))
2602 hash_del(&found->entry);
2603 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2606 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2608 rocker_kfree(trans, match);
2611 if (!switchdev_trans_ph_prepare(trans))
2612 err = rocker_cmd_exec(rocker_port, flags,
2613 rocker_cmd_flow_tbl_del,
2615 rocker_kfree(trans, found);
2621 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2622 struct switchdev_trans *trans, int flags,
2623 struct rocker_flow_tbl_entry *entry)
2625 if (flags & ROCKER_OP_FLAG_REMOVE)
2626 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2628 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2631 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2632 struct switchdev_trans *trans, int flags,
2633 u32 in_pport, u32 in_pport_mask,
2634 enum rocker_of_dpa_table_id goto_tbl)
2636 struct rocker_flow_tbl_entry *entry;
2638 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2642 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2643 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2644 entry->key.ig_port.in_pport = in_pport;
2645 entry->key.ig_port.in_pport_mask = in_pport_mask;
2646 entry->key.ig_port.goto_tbl = goto_tbl;
2648 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2651 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2652 struct switchdev_trans *trans, int flags,
2653 u32 in_pport, __be16 vlan_id,
2654 __be16 vlan_id_mask,
2655 enum rocker_of_dpa_table_id goto_tbl,
2656 bool untagged, __be16 new_vlan_id)
2658 struct rocker_flow_tbl_entry *entry;
2660 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2664 entry->key.priority = ROCKER_PRIORITY_VLAN;
2665 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2666 entry->key.vlan.in_pport = in_pport;
2667 entry->key.vlan.vlan_id = vlan_id;
2668 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2669 entry->key.vlan.goto_tbl = goto_tbl;
2671 entry->key.vlan.untagged = untagged;
2672 entry->key.vlan.new_vlan_id = new_vlan_id;
2674 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2677 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2678 struct switchdev_trans *trans,
2679 u32 in_pport, u32 in_pport_mask,
2680 __be16 eth_type, const u8 *eth_dst,
2681 const u8 *eth_dst_mask, __be16 vlan_id,
2682 __be16 vlan_id_mask, bool copy_to_cpu,
2685 struct rocker_flow_tbl_entry *entry;
2687 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2691 if (is_multicast_ether_addr(eth_dst)) {
2692 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2693 entry->key.term_mac.goto_tbl =
2694 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2696 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2697 entry->key.term_mac.goto_tbl =
2698 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2701 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2702 entry->key.term_mac.in_pport = in_pport;
2703 entry->key.term_mac.in_pport_mask = in_pport_mask;
2704 entry->key.term_mac.eth_type = eth_type;
2705 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2706 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2707 entry->key.term_mac.vlan_id = vlan_id;
2708 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2709 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2711 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2714 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2715 struct switchdev_trans *trans, int flags,
2716 const u8 *eth_dst, const u8 *eth_dst_mask,
2717 __be16 vlan_id, u32 tunnel_id,
2718 enum rocker_of_dpa_table_id goto_tbl,
2719 u32 group_id, bool copy_to_cpu)
2721 struct rocker_flow_tbl_entry *entry;
2723 bool vlan_bridging = !!vlan_id;
2724 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2727 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2731 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2734 entry->key.bridge.has_eth_dst = 1;
2735 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2738 entry->key.bridge.has_eth_dst_mask = 1;
2739 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2740 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2744 priority = ROCKER_PRIORITY_UNKNOWN;
2745 if (vlan_bridging && dflt && wild)
2746 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2747 else if (vlan_bridging && dflt && !wild)
2748 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2749 else if (vlan_bridging && !dflt)
2750 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2751 else if (!vlan_bridging && dflt && wild)
2752 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2753 else if (!vlan_bridging && dflt && !wild)
2754 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2755 else if (!vlan_bridging && !dflt)
2756 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2758 entry->key.priority = priority;
2759 entry->key.bridge.vlan_id = vlan_id;
2760 entry->key.bridge.tunnel_id = tunnel_id;
2761 entry->key.bridge.goto_tbl = goto_tbl;
2762 entry->key.bridge.group_id = group_id;
2763 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2765 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2768 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2769 struct switchdev_trans *trans,
2770 __be16 eth_type, __be32 dst,
2771 __be32 dst_mask, u32 priority,
2772 enum rocker_of_dpa_table_id goto_tbl,
2773 u32 group_id, int flags)
2775 struct rocker_flow_tbl_entry *entry;
2777 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2781 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2782 entry->key.priority = priority;
2783 entry->key.ucast_routing.eth_type = eth_type;
2784 entry->key.ucast_routing.dst4 = dst;
2785 entry->key.ucast_routing.dst4_mask = dst_mask;
2786 entry->key.ucast_routing.goto_tbl = goto_tbl;
2787 entry->key.ucast_routing.group_id = group_id;
2788 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2789 ucast_routing.group_id);
2791 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2794 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2795 struct switchdev_trans *trans, int flags,
2796 u32 in_pport, u32 in_pport_mask,
2797 const u8 *eth_src, const u8 *eth_src_mask,
2798 const u8 *eth_dst, const u8 *eth_dst_mask,
2799 __be16 eth_type, __be16 vlan_id,
2800 __be16 vlan_id_mask, u8 ip_proto,
2801 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2805 struct rocker_flow_tbl_entry *entry;
2807 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2811 priority = ROCKER_PRIORITY_ACL_NORMAL;
2812 if (eth_dst && eth_dst_mask) {
2813 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2814 priority = ROCKER_PRIORITY_ACL_DFLT;
2815 else if (is_link_local_ether_addr(eth_dst))
2816 priority = ROCKER_PRIORITY_ACL_CTRL;
2819 entry->key.priority = priority;
2820 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2821 entry->key.acl.in_pport = in_pport;
2822 entry->key.acl.in_pport_mask = in_pport_mask;
2825 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2827 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2829 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2831 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2833 entry->key.acl.eth_type = eth_type;
2834 entry->key.acl.vlan_id = vlan_id;
2835 entry->key.acl.vlan_id_mask = vlan_id_mask;
2836 entry->key.acl.ip_proto = ip_proto;
2837 entry->key.acl.ip_proto_mask = ip_proto_mask;
2838 entry->key.acl.ip_tos = ip_tos;
2839 entry->key.acl.ip_tos_mask = ip_tos_mask;
2840 entry->key.acl.group_id = group_id;
2842 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2845 static struct rocker_group_tbl_entry *
2846 rocker_group_tbl_find(const struct rocker *rocker,
2847 const struct rocker_group_tbl_entry *match)
2849 struct rocker_group_tbl_entry *found;
2851 hash_for_each_possible(rocker->group_tbl, found,
2852 entry, match->group_id) {
2853 if (found->group_id == match->group_id)
2860 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2861 struct rocker_group_tbl_entry *entry)
2863 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2864 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2865 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2866 rocker_kfree(trans, entry->group_ids);
2871 rocker_kfree(trans, entry);
2874 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2875 struct switchdev_trans *trans, int flags,
2876 struct rocker_group_tbl_entry *match)
2878 struct rocker *rocker = rocker_port->rocker;
2879 struct rocker_group_tbl_entry *found;
2880 unsigned long lock_flags;
2882 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2884 found = rocker_group_tbl_find(rocker, match);
2887 if (!switchdev_trans_ph_prepare(trans))
2888 hash_del(&found->entry);
2889 rocker_group_tbl_entry_free(trans, found);
2891 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2894 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2897 if (!switchdev_trans_ph_prepare(trans))
2898 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2900 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2902 if (!switchdev_trans_ph_prepare(trans))
2903 return rocker_cmd_exec(rocker_port, flags,
2904 rocker_cmd_group_tbl_add,
2909 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2910 struct switchdev_trans *trans, int flags,
2911 struct rocker_group_tbl_entry *match)
2913 struct rocker *rocker = rocker_port->rocker;
2914 struct rocker_group_tbl_entry *found;
2915 unsigned long lock_flags;
2918 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2920 found = rocker_group_tbl_find(rocker, match);
2923 if (!switchdev_trans_ph_prepare(trans))
2924 hash_del(&found->entry);
2925 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2928 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2930 rocker_group_tbl_entry_free(trans, match);
2933 if (!switchdev_trans_ph_prepare(trans))
2934 err = rocker_cmd_exec(rocker_port, flags,
2935 rocker_cmd_group_tbl_del,
2937 rocker_group_tbl_entry_free(trans, found);
2943 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2944 struct switchdev_trans *trans, int flags,
2945 struct rocker_group_tbl_entry *entry)
2947 if (flags & ROCKER_OP_FLAG_REMOVE)
2948 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2950 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2953 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2954 struct switchdev_trans *trans, int flags,
2955 __be16 vlan_id, u32 out_pport,
2958 struct rocker_group_tbl_entry *entry;
2960 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2964 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2965 entry->l2_interface.pop_vlan = pop_vlan;
2967 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2970 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2971 struct switchdev_trans *trans,
2972 int flags, u8 group_count,
2973 const u32 *group_ids, u32 group_id)
2975 struct rocker_group_tbl_entry *entry;
2977 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2981 entry->group_id = group_id;
2982 entry->group_count = group_count;
2984 entry->group_ids = rocker_kcalloc(trans, flags,
2985 group_count, sizeof(u32));
2986 if (!entry->group_ids) {
2987 rocker_kfree(trans, entry);
2990 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2992 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2995 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2996 struct switchdev_trans *trans, int flags,
2997 __be16 vlan_id, u8 group_count,
2998 const u32 *group_ids, u32 group_id)
3000 return rocker_group_l2_fan_out(rocker_port, trans, flags,
3001 group_count, group_ids,
3005 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
3006 struct switchdev_trans *trans, int flags,
3007 u32 index, const u8 *src_mac, const u8 *dst_mac,
3008 __be16 vlan_id, bool ttl_check, u32 pport)
3010 struct rocker_group_tbl_entry *entry;
3012 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3016 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
3018 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
3020 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
3021 entry->l3_unicast.vlan_id = vlan_id;
3022 entry->l3_unicast.ttl_check = ttl_check;
3023 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
3025 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
3028 static struct rocker_neigh_tbl_entry *
3029 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
3031 struct rocker_neigh_tbl_entry *found;
3033 hash_for_each_possible(rocker->neigh_tbl, found,
3034 entry, be32_to_cpu(ip_addr))
3035 if (found->ip_addr == ip_addr)
3041 static void _rocker_neigh_add(struct rocker *rocker,
3042 struct switchdev_trans *trans,
3043 struct rocker_neigh_tbl_entry *entry)
3045 if (!switchdev_trans_ph_commit(trans))
3046 entry->index = rocker->neigh_tbl_next_index++;
3047 if (switchdev_trans_ph_prepare(trans))
3050 hash_add(rocker->neigh_tbl, &entry->entry,
3051 be32_to_cpu(entry->ip_addr));
3054 static void _rocker_neigh_del(struct switchdev_trans *trans,
3055 struct rocker_neigh_tbl_entry *entry)
3057 if (switchdev_trans_ph_prepare(trans))
3059 if (--entry->ref_count == 0) {
3060 hash_del(&entry->entry);
3061 rocker_kfree(trans, entry);
3065 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
3066 struct switchdev_trans *trans,
3067 const u8 *eth_dst, bool ttl_check)
3070 ether_addr_copy(entry->eth_dst, eth_dst);
3071 entry->ttl_check = ttl_check;
3072 } else if (!switchdev_trans_ph_prepare(trans)) {
3077 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
3078 struct switchdev_trans *trans,
3079 int flags, __be32 ip_addr, const u8 *eth_dst)
3081 struct rocker *rocker = rocker_port->rocker;
3082 struct rocker_neigh_tbl_entry *entry;
3083 struct rocker_neigh_tbl_entry *found;
3084 unsigned long lock_flags;
3085 __be16 eth_type = htons(ETH_P_IP);
3086 enum rocker_of_dpa_table_id goto_tbl =
3087 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3090 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3095 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3099 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3101 found = rocker_neigh_tbl_find(rocker, ip_addr);
3103 updating = found && adding;
3104 removing = found && !adding;
3105 adding = !found && adding;
3108 entry->ip_addr = ip_addr;
3109 entry->dev = rocker_port->dev;
3110 ether_addr_copy(entry->eth_dst, eth_dst);
3111 entry->ttl_check = true;
3112 _rocker_neigh_add(rocker, trans, entry);
3113 } else if (removing) {
3114 memcpy(entry, found, sizeof(*entry));
3115 _rocker_neigh_del(trans, found);
3116 } else if (updating) {
3117 _rocker_neigh_update(found, trans, eth_dst, true);
3118 memcpy(entry, found, sizeof(*entry));
3123 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3128 /* For each active neighbor, we have an L3 unicast group and
3129 * a /32 route to the neighbor, which uses the L3 unicast
3130 * group. The L3 unicast group can also be referred to by
3131 * other routes' nexthops.
3134 err = rocker_group_l3_unicast(rocker_port, trans, flags,
3136 rocker_port->dev->dev_addr,
3138 rocker_port->internal_vlan_id,
3140 rocker_port->pport);
3142 netdev_err(rocker_port->dev,
3143 "Error (%d) L3 unicast group index %d\n",
3148 if (adding || removing) {
3149 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3150 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3157 netdev_err(rocker_port->dev,
3158 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3159 err, &entry->ip_addr, group_id);
3164 rocker_kfree(trans, entry);
3169 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3170 struct switchdev_trans *trans,
3173 struct net_device *dev = rocker_port->dev;
3174 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3178 n = neigh_create(&arp_tbl, &ip_addr, dev);
3183 /* If the neigh is already resolved, then go ahead and
3184 * install the entry, otherwise start the ARP process to
3185 * resolve the neigh.
3188 if (n->nud_state & NUD_VALID)
3189 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3192 neigh_event_send(n, NULL);
3198 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3199 struct switchdev_trans *trans, int flags,
3200 __be32 ip_addr, u32 *index)
3202 struct rocker *rocker = rocker_port->rocker;
3203 struct rocker_neigh_tbl_entry *entry;
3204 struct rocker_neigh_tbl_entry *found;
3205 unsigned long lock_flags;
3206 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3209 bool resolved = true;
3212 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
3216 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3218 found = rocker_neigh_tbl_find(rocker, ip_addr);
3220 *index = found->index;
3222 updating = found && adding;
3223 removing = found && !adding;
3224 adding = !found && adding;
3227 entry->ip_addr = ip_addr;
3228 entry->dev = rocker_port->dev;
3229 _rocker_neigh_add(rocker, trans, entry);
3230 *index = entry->index;
3232 } else if (removing) {
3233 _rocker_neigh_del(trans, found);
3234 } else if (updating) {
3235 _rocker_neigh_update(found, trans, NULL, false);
3236 resolved = !is_zero_ether_addr(found->eth_dst);
3241 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3244 rocker_kfree(trans, entry);
3249 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3252 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3257 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3258 struct switchdev_trans *trans,
3259 int flags, __be16 vlan_id)
3261 struct rocker_port *p;
3262 const struct rocker *rocker = rocker_port->rocker;
3263 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3269 group_ids = rocker_kcalloc(trans, flags,
3270 rocker->port_count, sizeof(u32));
3274 /* Adjust the flood group for this VLAN. The flood group
3275 * references an L2 interface group for each port in this
3279 for (i = 0; i < rocker->port_count; i++) {
3280 p = rocker->ports[i];
3283 if (!rocker_port_is_bridged(p))
3285 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3286 group_ids[group_count++] =
3287 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3291 /* If there are no bridged ports in this VLAN, we're done */
3292 if (group_count == 0)
3293 goto no_ports_in_vlan;
3295 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3296 group_count, group_ids, group_id);
3298 netdev_err(rocker_port->dev,
3299 "Error (%d) port VLAN l2 flood group\n", err);
3302 rocker_kfree(trans, group_ids);
3306 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3307 struct switchdev_trans *trans, int flags,
3308 __be16 vlan_id, bool pop_vlan)
3310 const struct rocker *rocker = rocker_port->rocker;
3311 struct rocker_port *p;
3312 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3318 /* An L2 interface group for this port in this VLAN, but
3319 * only when port STP state is LEARNING|FORWARDING.
3322 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3323 rocker_port->stp_state == BR_STATE_FORWARDING) {
3324 out_pport = rocker_port->pport;
3325 err = rocker_group_l2_interface(rocker_port, trans, flags,
3326 vlan_id, out_pport, pop_vlan);
3328 netdev_err(rocker_port->dev,
3329 "Error (%d) port VLAN l2 group for pport %d\n",
3335 /* An L2 interface group for this VLAN to CPU port.
3336 * Add when first port joins this VLAN and destroy when
3337 * last port leaves this VLAN.
3340 for (i = 0; i < rocker->port_count; i++) {
3341 p = rocker->ports[i];
3342 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3346 if ((!adding || ref != 1) && (adding || ref != 0))
3350 err = rocker_group_l2_interface(rocker_port, trans, flags,
3351 vlan_id, out_pport, pop_vlan);
3353 netdev_err(rocker_port->dev,
3354 "Error (%d) port VLAN l2 group for CPU port\n", err);
3361 static struct rocker_ctrl {
3363 const u8 *eth_dst_mask;
3369 } rocker_ctrls[] = {
3370 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3371 /* pass link local multicast pkts up to CPU for filtering */
3373 .eth_dst_mask = ll_mask,
3376 [ROCKER_CTRL_LOCAL_ARP] = {
3377 /* pass local ARP pkts up to CPU */
3378 .eth_dst = zero_mac,
3379 .eth_dst_mask = zero_mac,
3380 .eth_type = htons(ETH_P_ARP),
3383 [ROCKER_CTRL_IPV4_MCAST] = {
3384 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3385 .eth_dst = ipv4_mcast,
3386 .eth_dst_mask = ipv4_mask,
3387 .eth_type = htons(ETH_P_IP),
3389 .copy_to_cpu = true,
3391 [ROCKER_CTRL_IPV6_MCAST] = {
3392 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3393 .eth_dst = ipv6_mcast,
3394 .eth_dst_mask = ipv6_mask,
3395 .eth_type = htons(ETH_P_IPV6),
3397 .copy_to_cpu = true,
3399 [ROCKER_CTRL_DFLT_BRIDGING] = {
3400 /* flood any pkts on vlan */
3402 .copy_to_cpu = true,
3404 [ROCKER_CTRL_DFLT_OVS] = {
3405 /* pass all pkts up to CPU */
3406 .eth_dst = zero_mac,
3407 .eth_dst_mask = zero_mac,
3412 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3413 struct switchdev_trans *trans, int flags,
3414 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3416 u32 in_pport = rocker_port->pport;
3417 u32 in_pport_mask = 0xffffffff;
3419 const u8 *eth_src = NULL;
3420 const u8 *eth_src_mask = NULL;
3421 __be16 vlan_id_mask = htons(0xffff);
3423 u8 ip_proto_mask = 0;
3426 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3429 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3430 in_pport, in_pport_mask,
3431 eth_src, eth_src_mask,
3432 ctrl->eth_dst, ctrl->eth_dst_mask,
3434 vlan_id, vlan_id_mask,
3435 ip_proto, ip_proto_mask,
3436 ip_tos, ip_tos_mask,
3440 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3445 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3446 struct switchdev_trans *trans,
3448 const struct rocker_ctrl *ctrl,
3451 enum rocker_of_dpa_table_id goto_tbl =
3452 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3453 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3457 if (!rocker_port_is_bridged(rocker_port))
3460 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3461 ctrl->eth_dst, ctrl->eth_dst_mask,
3463 goto_tbl, group_id, ctrl->copy_to_cpu);
3466 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3471 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3472 struct switchdev_trans *trans, int flags,
3473 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3475 u32 in_pport_mask = 0xffffffff;
3476 __be16 vlan_id_mask = htons(0xffff);
3479 if (ntohs(vlan_id) == 0)
3480 vlan_id = rocker_port->internal_vlan_id;
3482 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3483 rocker_port->pport, in_pport_mask,
3484 ctrl->eth_type, ctrl->eth_dst,
3485 ctrl->eth_dst_mask, vlan_id,
3486 vlan_id_mask, ctrl->copy_to_cpu,
3490 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3495 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3496 struct switchdev_trans *trans, int flags,
3497 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3500 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3503 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3507 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3513 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3514 struct switchdev_trans *trans, int flags,
3520 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3521 if (rocker_port->ctrls[i]) {
3522 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3523 &rocker_ctrls[i], vlan_id);
3532 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3533 struct switchdev_trans *trans, int flags,
3534 const struct rocker_ctrl *ctrl)
3539 for (vid = 1; vid < VLAN_N_VID; vid++) {
3540 if (!test_bit(vid, rocker_port->vlan_bitmap))
3542 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3551 static int rocker_port_vlan(struct rocker_port *rocker_port,
3552 struct switchdev_trans *trans, int flags, u16 vid)
3554 enum rocker_of_dpa_table_id goto_tbl =
3555 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3556 u32 in_pport = rocker_port->pport;
3557 __be16 vlan_id = htons(vid);
3558 __be16 vlan_id_mask = htons(0xffff);
3559 __be16 internal_vlan_id;
3561 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3564 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3566 if (adding && test_bit(ntohs(internal_vlan_id),
3567 rocker_port->vlan_bitmap))
3568 return 0; /* already added */
3569 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3570 rocker_port->vlan_bitmap))
3571 return 0; /* already removed */
3573 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3576 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3579 netdev_err(rocker_port->dev,
3580 "Error (%d) port ctrl vlan add\n", err);
3585 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3586 internal_vlan_id, untagged);
3588 netdev_err(rocker_port->dev,
3589 "Error (%d) port VLAN l2 groups\n", err);
3593 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3596 netdev_err(rocker_port->dev,
3597 "Error (%d) port VLAN l2 flood group\n", err);
3601 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3602 in_pport, vlan_id, vlan_id_mask,
3603 goto_tbl, untagged, internal_vlan_id);
3605 netdev_err(rocker_port->dev,
3606 "Error (%d) port VLAN table\n", err);
3609 if (switchdev_trans_ph_prepare(trans))
3610 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3615 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3616 struct switchdev_trans *trans, int flags)
3618 enum rocker_of_dpa_table_id goto_tbl;
3623 /* Normal Ethernet Frames. Matches pkts from any local physical
3624 * ports. Goto VLAN tbl.
3628 in_pport_mask = 0xffff0000;
3629 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3631 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3632 in_pport, in_pport_mask,
3635 netdev_err(rocker_port->dev,
3636 "Error (%d) ingress port table entry\n", err);
3641 struct rocker_fdb_learn_work {
3642 struct work_struct work;
3643 struct rocker_port *rocker_port;
3644 struct switchdev_trans *trans;
3650 static void rocker_port_fdb_learn_work(struct work_struct *work)
3652 const struct rocker_fdb_learn_work *lw =
3653 container_of(work, struct rocker_fdb_learn_work, work);
3654 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3655 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3656 struct switchdev_notifier_fdb_info info;
3658 info.addr = lw->addr;
3662 if (learned && removing)
3663 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3664 lw->rocker_port->dev, &info.info);
3665 else if (learned && !removing)
3666 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3667 lw->rocker_port->dev, &info.info);
3670 rocker_kfree(lw->trans, work);
3673 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3674 struct switchdev_trans *trans, int flags,
3675 const u8 *addr, __be16 vlan_id)
3677 struct rocker_fdb_learn_work *lw;
3678 enum rocker_of_dpa_table_id goto_tbl =
3679 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3680 u32 out_pport = rocker_port->pport;
3682 u32 group_id = ROCKER_GROUP_NONE;
3683 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3684 bool copy_to_cpu = false;
3687 if (rocker_port_is_bridged(rocker_port))
3688 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3690 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3691 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3692 NULL, vlan_id, tunnel_id, goto_tbl,
3693 group_id, copy_to_cpu);
3701 if (!rocker_port_is_bridged(rocker_port))
3704 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3708 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3710 lw->rocker_port = rocker_port;
3713 ether_addr_copy(lw->addr, addr);
3714 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3716 if (switchdev_trans_ph_prepare(trans))
3717 rocker_kfree(trans, lw);
3719 schedule_work(&lw->work);
3724 static struct rocker_fdb_tbl_entry *
3725 rocker_fdb_tbl_find(const struct rocker *rocker,
3726 const struct rocker_fdb_tbl_entry *match)
3728 struct rocker_fdb_tbl_entry *found;
3730 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3731 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3737 static int rocker_port_fdb(struct rocker_port *rocker_port,
3738 struct switchdev_trans *trans,
3739 const unsigned char *addr,
3740 __be16 vlan_id, int flags)
3742 struct rocker *rocker = rocker_port->rocker;
3743 struct rocker_fdb_tbl_entry *fdb;
3744 struct rocker_fdb_tbl_entry *found;
3745 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3746 unsigned long lock_flags;
3748 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3752 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3753 fdb->touched = jiffies;
3754 fdb->key.rocker_port = rocker_port;
3755 ether_addr_copy(fdb->key.addr, addr);
3756 fdb->key.vlan_id = vlan_id;
3757 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3759 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3761 found = rocker_fdb_tbl_find(rocker, fdb);
3764 found->touched = jiffies;
3766 rocker_kfree(trans, fdb);
3767 if (!switchdev_trans_ph_prepare(trans))
3768 hash_del(&found->entry);
3770 } else if (!removing) {
3771 if (!switchdev_trans_ph_prepare(trans))
3772 hash_add(rocker->fdb_tbl, &fdb->entry,
3776 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3778 /* Check if adding and already exists, or removing and can't find */
3779 if (!found != !removing) {
3780 rocker_kfree(trans, fdb);
3781 if (!found && removing)
3783 /* Refreshing existing to update aging timers */
3784 flags |= ROCKER_OP_FLAG_REFRESH;
3787 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3790 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3791 struct switchdev_trans *trans, int flags)
3793 struct rocker *rocker = rocker_port->rocker;
3794 struct rocker_fdb_tbl_entry *found;
3795 unsigned long lock_flags;
3796 struct hlist_node *tmp;
3800 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3801 rocker_port->stp_state == BR_STATE_FORWARDING)
3804 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3806 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3808 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3809 if (found->key.rocker_port != rocker_port)
3811 if (!found->learned)
3813 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3815 found->key.vlan_id);
3818 if (!switchdev_trans_ph_prepare(trans))
3819 hash_del(&found->entry);
3823 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3828 static void rocker_fdb_cleanup(unsigned long data)
3830 struct rocker *rocker = (struct rocker *)data;
3831 struct rocker_port *rocker_port;
3832 struct rocker_fdb_tbl_entry *entry;
3833 struct hlist_node *tmp;
3834 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3835 unsigned long expires;
3836 unsigned long lock_flags;
3837 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3838 ROCKER_OP_FLAG_LEARNED;
3841 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3843 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3844 if (!entry->learned)
3846 rocker_port = entry->key.rocker_port;
3847 expires = entry->touched + rocker_port->ageing_time;
3848 if (time_before_eq(expires, jiffies)) {
3849 rocker_port_fdb_learn(rocker_port, NULL,
3850 flags, entry->key.addr,
3851 entry->key.vlan_id);
3852 hash_del(&entry->entry);
3853 } else if (time_before(expires, next_timer)) {
3854 next_timer = expires;
3858 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3860 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3863 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3864 struct switchdev_trans *trans, int flags,
3867 u32 in_pport_mask = 0xffffffff;
3869 const u8 *dst_mac_mask = ff_mac;
3870 __be16 vlan_id_mask = htons(0xffff);
3871 bool copy_to_cpu = false;
3874 if (ntohs(vlan_id) == 0)
3875 vlan_id = rocker_port->internal_vlan_id;
3877 eth_type = htons(ETH_P_IP);
3878 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3879 rocker_port->pport, in_pport_mask,
3880 eth_type, rocker_port->dev->dev_addr,
3881 dst_mac_mask, vlan_id, vlan_id_mask,
3882 copy_to_cpu, flags);
3886 eth_type = htons(ETH_P_IPV6);
3887 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3888 rocker_port->pport, in_pport_mask,
3889 eth_type, rocker_port->dev->dev_addr,
3890 dst_mac_mask, vlan_id, vlan_id_mask,
3891 copy_to_cpu, flags);
3896 static int rocker_port_fwding(struct rocker_port *rocker_port,
3897 struct switchdev_trans *trans, int flags)
3905 /* Port will be forwarding-enabled if its STP state is LEARNING
3906 * or FORWARDING. Traffic from CPU can still egress, regardless of
3907 * port STP state. Use L2 interface group on port VLANs as a way
3908 * to toggle port forwarding: if forwarding is disabled, L2
3909 * interface group will not exist.
3912 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3913 rocker_port->stp_state != BR_STATE_FORWARDING)
3914 flags |= ROCKER_OP_FLAG_REMOVE;
3916 out_pport = rocker_port->pport;
3917 for (vid = 1; vid < VLAN_N_VID; vid++) {
3918 if (!test_bit(vid, rocker_port->vlan_bitmap))
3920 vlan_id = htons(vid);
3921 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3922 err = rocker_group_l2_interface(rocker_port, trans, flags,
3923 vlan_id, out_pport, pop_vlan);
3925 netdev_err(rocker_port->dev,
3926 "Error (%d) port VLAN l2 group for pport %d\n",
3935 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3936 struct switchdev_trans *trans, int flags,
3939 bool want[ROCKER_CTRL_MAX] = { 0, };
3940 bool prev_ctrls[ROCKER_CTRL_MAX];
3941 u8 uninitialized_var(prev_state);
3945 if (switchdev_trans_ph_prepare(trans)) {
3946 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3947 prev_state = rocker_port->stp_state;
3950 if (rocker_port->stp_state == state)
3953 rocker_port->stp_state = state;
3956 case BR_STATE_DISABLED:
3957 /* port is completely disabled */
3959 case BR_STATE_LISTENING:
3960 case BR_STATE_BLOCKING:
3961 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3963 case BR_STATE_LEARNING:
3964 case BR_STATE_FORWARDING:
3965 if (!rocker_port_is_ovsed(rocker_port))
3966 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3967 want[ROCKER_CTRL_IPV4_MCAST] = true;
3968 want[ROCKER_CTRL_IPV6_MCAST] = true;
3969 if (rocker_port_is_bridged(rocker_port))
3970 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3971 else if (rocker_port_is_ovsed(rocker_port))
3972 want[ROCKER_CTRL_DFLT_OVS] = true;
3974 want[ROCKER_CTRL_LOCAL_ARP] = true;
3978 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3979 if (want[i] != rocker_port->ctrls[i]) {
3980 int ctrl_flags = flags |
3981 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3982 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3986 rocker_port->ctrls[i] = want[i];
3990 err = rocker_port_fdb_flush(rocker_port, trans, flags);
3994 err = rocker_port_fwding(rocker_port, trans, flags);
3997 if (switchdev_trans_ph_prepare(trans)) {
3998 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3999 rocker_port->stp_state = prev_state;
4005 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
4006 struct switchdev_trans *trans, int flags)
4008 if (rocker_port_is_bridged(rocker_port))
4009 /* bridge STP will enable port */
4012 /* port is not bridged, so simulate going to FORWARDING state */
4013 return rocker_port_stp_update(rocker_port, trans, flags,
4014 BR_STATE_FORWARDING);
4017 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
4018 struct switchdev_trans *trans, int flags)
4020 if (rocker_port_is_bridged(rocker_port))
4021 /* bridge STP will disable port */
4024 /* port is not bridged, so simulate going to DISABLED state */
4025 return rocker_port_stp_update(rocker_port, trans, flags,
4029 static struct rocker_internal_vlan_tbl_entry *
4030 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
4032 struct rocker_internal_vlan_tbl_entry *found;
4034 hash_for_each_possible(rocker->internal_vlan_tbl, found,
4036 if (found->ifindex == ifindex)
4043 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
4046 struct rocker *rocker = rocker_port->rocker;
4047 struct rocker_internal_vlan_tbl_entry *entry;
4048 struct rocker_internal_vlan_tbl_entry *found;
4049 unsigned long lock_flags;
4052 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
4056 entry->ifindex = ifindex;
4058 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4060 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4067 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4069 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4070 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4072 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4076 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4080 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4082 return found->vlan_id;
4086 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4089 struct rocker *rocker = rocker_port->rocker;
4090 struct rocker_internal_vlan_tbl_entry *found;
4091 unsigned long lock_flags;
4094 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4096 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4098 netdev_err(rocker_port->dev,
4099 "ifindex (%d) not found in internal VLAN tbl\n",
4104 if (--found->ref_count <= 0) {
4105 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4106 clear_bit(bit, rocker->internal_vlan_bitmap);
4107 hash_del(&found->entry);
4112 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4115 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
4116 struct switchdev_trans *trans, __be32 dst,
4117 int dst_len, const struct fib_info *fi,
4118 u32 tb_id, int flags)
4120 const struct fib_nh *nh;
4121 __be16 eth_type = htons(ETH_P_IP);
4122 __be32 dst_mask = inet_make_mask(dst_len);
4123 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4124 u32 priority = fi->fib_priority;
4125 enum rocker_of_dpa_table_id goto_tbl =
4126 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4133 /* XXX support ECMP */
4136 nh_on_port = (fi->fib_dev == rocker_port->dev);
4137 has_gw = !!nh->nh_gw;
4139 if (has_gw && nh_on_port) {
4140 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4145 group_id = ROCKER_GROUP_L3_UNICAST(index);
4147 /* Send to CPU for processing */
4148 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4151 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4152 dst_mask, priority, goto_tbl,
4155 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4165 static int rocker_port_open(struct net_device *dev)
4167 struct rocker_port *rocker_port = netdev_priv(dev);
4170 err = rocker_port_dma_rings_init(rocker_port);
4174 err = request_irq(rocker_msix_tx_vector(rocker_port),
4175 rocker_tx_irq_handler, 0,
4176 rocker_driver_name, rocker_port);
4178 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4179 goto err_request_tx_irq;
4182 err = request_irq(rocker_msix_rx_vector(rocker_port),
4183 rocker_rx_irq_handler, 0,
4184 rocker_driver_name, rocker_port);
4186 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4187 goto err_request_rx_irq;
4190 err = rocker_world_port_open(rocker_port);
4192 netdev_err(rocker_port->dev, "cannot open port in world\n");
4193 goto err_world_port_open;
4196 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4198 goto err_fwd_enable;
4200 napi_enable(&rocker_port->napi_tx);
4201 napi_enable(&rocker_port->napi_rx);
4202 if (!dev->proto_down)
4203 rocker_port_set_enable(rocker_port, true);
4204 netif_start_queue(dev);
4208 err_world_port_open:
4209 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4211 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4213 rocker_port_dma_rings_fini(rocker_port);
4217 static int rocker_port_stop(struct net_device *dev)
4219 struct rocker_port *rocker_port = netdev_priv(dev);
4221 netif_stop_queue(dev);
4222 rocker_port_set_enable(rocker_port, false);
4223 napi_disable(&rocker_port->napi_rx);
4224 napi_disable(&rocker_port->napi_tx);
4225 rocker_world_port_stop(rocker_port);
4226 rocker_port_fwd_disable(rocker_port, NULL,
4227 ROCKER_OP_FLAG_NOWAIT);
4228 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4229 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4230 rocker_port_dma_rings_fini(rocker_port);
4235 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4236 const struct rocker_desc_info *desc_info)
4238 const struct rocker *rocker = rocker_port->rocker;
4239 struct pci_dev *pdev = rocker->pdev;
4240 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4241 struct rocker_tlv *attr;
4244 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4245 if (!attrs[ROCKER_TLV_TX_FRAGS])
4247 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4248 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4249 dma_addr_t dma_handle;
4252 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4254 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4256 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4257 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4259 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4260 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4261 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4265 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4266 struct rocker_desc_info *desc_info,
4267 char *buf, size_t buf_len)
4269 const struct rocker *rocker = rocker_port->rocker;
4270 struct pci_dev *pdev = rocker->pdev;
4271 dma_addr_t dma_handle;
4272 struct rocker_tlv *frag;
4274 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4275 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4276 if (net_ratelimit())
4277 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4280 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4283 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4286 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4289 rocker_tlv_nest_end(desc_info, frag);
4293 rocker_tlv_nest_cancel(desc_info, frag);
4295 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4299 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4301 struct rocker_port *rocker_port = netdev_priv(dev);
4302 struct rocker *rocker = rocker_port->rocker;
4303 struct rocker_desc_info *desc_info;
4304 struct rocker_tlv *frags;
4308 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4309 if (unlikely(!desc_info)) {
4310 if (net_ratelimit())
4311 netdev_err(dev, "tx ring full when queue awake\n");
4312 return NETDEV_TX_BUSY;
4315 rocker_desc_cookie_ptr_set(desc_info, skb);
4317 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4320 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4321 skb->data, skb_headlen(skb));
4324 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4325 err = skb_linearize(skb);
4330 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4331 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4333 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4334 skb_frag_address(frag),
4335 skb_frag_size(frag));
4339 rocker_tlv_nest_end(desc_info, frags);
4341 rocker_desc_gen_clear(desc_info);
4342 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4344 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4346 netif_stop_queue(dev);
4348 return NETDEV_TX_OK;
4351 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4353 rocker_tlv_nest_cancel(desc_info, frags);
4356 dev->stats.tx_dropped++;
4358 return NETDEV_TX_OK;
4361 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4363 struct sockaddr *addr = p;
4364 struct rocker_port *rocker_port = netdev_priv(dev);
4367 if (!is_valid_ether_addr(addr->sa_data))
4368 return -EADDRNOTAVAIL;
4370 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4373 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4377 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4379 struct rocker_port *rocker_port = netdev_priv(dev);
4380 int running = netif_running(dev);
4383 #define ROCKER_PORT_MIN_MTU 68
4384 #define ROCKER_PORT_MAX_MTU 9000
4386 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4390 rocker_port_stop(dev);
4392 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4395 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4400 err = rocker_port_open(dev);
4405 static int rocker_port_get_phys_port_name(struct net_device *dev,
4406 char *buf, size_t len)
4408 struct rocker_port *rocker_port = netdev_priv(dev);
4409 struct port_name name = { .buf = buf, .len = len };
4412 err = rocker_cmd_exec(rocker_port, 0,
4413 rocker_cmd_get_port_settings_prep, NULL,
4414 rocker_cmd_get_port_settings_phys_name_proc,
4417 return err ? -EOPNOTSUPP : 0;
4420 static int rocker_port_change_proto_down(struct net_device *dev,
4423 struct rocker_port *rocker_port = netdev_priv(dev);
4425 if (rocker_port->dev->flags & IFF_UP)
4426 rocker_port_set_enable(rocker_port, !proto_down);
4427 rocker_port->dev->proto_down = proto_down;
4431 static void rocker_port_neigh_destroy(struct neighbour *n)
4433 struct rocker_port *rocker_port = netdev_priv(n->dev);
4434 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4435 __be32 ip_addr = *(__be32 *)n->primary_key;
4438 rocker_port_ipv4_neigh(rocker_port, NULL,
4439 flags, ip_addr, n->ha);
4440 err = rocker_world_port_neigh_destroy(rocker_port, n);
4442 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4446 static const struct net_device_ops rocker_port_netdev_ops = {
4447 .ndo_open = rocker_port_open,
4448 .ndo_stop = rocker_port_stop,
4449 .ndo_start_xmit = rocker_port_xmit,
4450 .ndo_set_mac_address = rocker_port_set_mac_address,
4451 .ndo_change_mtu = rocker_port_change_mtu,
4452 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4453 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4454 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
4455 .ndo_fdb_add = switchdev_port_fdb_add,
4456 .ndo_fdb_del = switchdev_port_fdb_del,
4457 .ndo_fdb_dump = switchdev_port_fdb_dump,
4458 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
4459 .ndo_change_proto_down = rocker_port_change_proto_down,
4460 .ndo_neigh_destroy = rocker_port_neigh_destroy,
4463 /********************
4465 ********************/
4467 static int rocker_port_attr_get(struct net_device *dev,
4468 struct switchdev_attr *attr)
4470 const struct rocker_port *rocker_port = netdev_priv(dev);
4471 const struct rocker *rocker = rocker_port->rocker;
4475 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4476 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4477 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4479 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4480 attr->u.brport_flags = rocker_port->brport_flags;
4481 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4482 &attr->u.brport_flags);
4491 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4492 struct switchdev_trans *trans,
4493 unsigned long brport_flags)
4495 unsigned long orig_flags;
4498 orig_flags = rocker_port->brport_flags;
4499 rocker_port->brport_flags = brport_flags;
4500 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING &&
4501 !switchdev_trans_ph_prepare(trans))
4502 err = rocker_port_set_learning(rocker_port,
4503 !!(rocker_port->brport_flags & BR_LEARNING));
4505 if (switchdev_trans_ph_prepare(trans))
4506 rocker_port->brport_flags = orig_flags;
4511 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4512 struct switchdev_trans *trans,
4515 if (!switchdev_trans_ph_prepare(trans)) {
4516 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4517 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4523 static int rocker_port_attr_set(struct net_device *dev,
4524 const struct switchdev_attr *attr,
4525 struct switchdev_trans *trans)
4527 struct rocker_port *rocker_port = netdev_priv(dev);
4531 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4532 err = rocker_port_stp_update(rocker_port, trans, 0,
4536 err = rocker_world_port_attr_stp_state_set(rocker_port,
4540 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4541 err = rocker_port_brport_flags_set(rocker_port, trans,
4542 attr->u.brport_flags);
4545 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4546 attr->u.brport_flags,
4549 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4550 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4551 attr->u.ageing_time);
4554 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4555 attr->u.ageing_time,
4566 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4567 struct switchdev_trans *trans,
4572 /* XXX deal with flags for PVID and untagged */
4574 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4578 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4580 rocker_port_vlan(rocker_port, trans,
4581 ROCKER_OP_FLAG_REMOVE, vid);
4586 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4587 struct switchdev_trans *trans,
4588 const struct switchdev_obj_port_vlan *vlan)
4593 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4594 err = rocker_port_vlan_add(rocker_port, trans,
4603 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4604 struct switchdev_trans *trans,
4605 const struct switchdev_obj_port_fdb *fdb)
4607 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4610 if (!rocker_port_is_bridged(rocker_port))
4613 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4616 static int rocker_port_obj_add(struct net_device *dev,
4617 const struct switchdev_obj *obj,
4618 struct switchdev_trans *trans)
4620 struct rocker_port *rocker_port = netdev_priv(dev);
4621 const struct switchdev_obj_ipv4_fib *fib4;
4625 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4626 err = rocker_port_vlans_add(rocker_port, trans,
4627 SWITCHDEV_OBJ_PORT_VLAN(obj));
4630 err = rocker_world_port_obj_vlan_add(rocker_port,
4631 SWITCHDEV_OBJ_PORT_VLAN(obj),
4634 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4635 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4636 err = rocker_port_fib_ipv4(rocker_port, trans,
4637 htonl(fib4->dst), fib4->dst_len,
4638 &fib4->fi, fib4->tb_id, 0);
4641 err = rocker_world_port_obj_fib4_add(rocker_port,
4642 SWITCHDEV_OBJ_IPV4_FIB(obj),
4645 case SWITCHDEV_OBJ_ID_PORT_FDB:
4646 err = rocker_port_fdb_add(rocker_port, trans,
4647 SWITCHDEV_OBJ_PORT_FDB(obj));
4650 err = rocker_world_port_obj_fdb_add(rocker_port,
4651 SWITCHDEV_OBJ_PORT_FDB(obj),
4662 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4667 err = rocker_port_router_mac(rocker_port, NULL,
4668 ROCKER_OP_FLAG_REMOVE, htons(vid));
4672 return rocker_port_vlan(rocker_port, NULL,
4673 ROCKER_OP_FLAG_REMOVE, vid);
4676 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4677 const struct switchdev_obj_port_vlan *vlan)
4682 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4683 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4691 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4692 struct switchdev_trans *trans,
4693 const struct switchdev_obj_port_fdb *fdb)
4695 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4696 int flags = ROCKER_OP_FLAG_REMOVE;
4698 if (!rocker_port_is_bridged(rocker_port))
4701 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4704 static int rocker_port_obj_del(struct net_device *dev,
4705 const struct switchdev_obj *obj)
4707 struct rocker_port *rocker_port = netdev_priv(dev);
4708 const struct switchdev_obj_ipv4_fib *fib4;
4712 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4713 err = rocker_port_vlans_del(rocker_port,
4714 SWITCHDEV_OBJ_PORT_VLAN(obj));
4717 err = rocker_world_port_obj_vlan_del(rocker_port,
4718 SWITCHDEV_OBJ_PORT_VLAN(obj));
4720 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4721 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4722 err = rocker_port_fib_ipv4(rocker_port, NULL,
4723 htonl(fib4->dst), fib4->dst_len,
4724 &fib4->fi, fib4->tb_id,
4725 ROCKER_OP_FLAG_REMOVE);
4728 err = rocker_world_port_obj_fib4_del(rocker_port,
4729 SWITCHDEV_OBJ_IPV4_FIB(obj));
4731 case SWITCHDEV_OBJ_ID_PORT_FDB:
4732 err = rocker_port_fdb_del(rocker_port, NULL,
4733 SWITCHDEV_OBJ_PORT_FDB(obj));
4736 err = rocker_world_port_obj_fdb_del(rocker_port,
4737 SWITCHDEV_OBJ_PORT_FDB(obj));
4747 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4748 struct switchdev_obj_port_fdb *fdb,
4749 switchdev_obj_dump_cb_t *cb)
4751 struct rocker *rocker = rocker_port->rocker;
4752 struct rocker_fdb_tbl_entry *found;
4753 struct hlist_node *tmp;
4754 unsigned long lock_flags;
4758 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4759 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4760 if (found->key.rocker_port != rocker_port)
4762 ether_addr_copy(fdb->addr, found->key.addr);
4763 fdb->ndm_state = NUD_REACHABLE;
4764 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4765 found->key.vlan_id);
4766 err = cb(&fdb->obj);
4770 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4775 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4776 struct switchdev_obj_port_vlan *vlan,
4777 switchdev_obj_dump_cb_t *cb)
4782 for (vid = 1; vid < VLAN_N_VID; vid++) {
4783 if (!test_bit(vid, rocker_port->vlan_bitmap))
4786 if (rocker_vlan_id_is_internal(htons(vid)))
4787 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4788 vlan->vid_begin = vid;
4789 vlan->vid_end = vid;
4790 err = cb(&vlan->obj);
4798 static int rocker_port_obj_dump(struct net_device *dev,
4799 struct switchdev_obj *obj,
4800 switchdev_obj_dump_cb_t *cb)
4802 const struct rocker_port *rocker_port = netdev_priv(dev);
4806 case SWITCHDEV_OBJ_ID_PORT_FDB:
4807 err = rocker_port_fdb_dump(rocker_port,
4808 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4811 err = rocker_world_port_obj_fdb_dump(rocker_port,
4812 SWITCHDEV_OBJ_PORT_FDB(obj),
4815 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4816 err = rocker_port_vlan_dump(rocker_port,
4817 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4820 err = rocker_world_port_obj_vlan_dump(rocker_port,
4821 SWITCHDEV_OBJ_PORT_VLAN(obj),
4832 static const struct switchdev_ops rocker_port_switchdev_ops = {
4833 .switchdev_port_attr_get = rocker_port_attr_get,
4834 .switchdev_port_attr_set = rocker_port_attr_set,
4835 .switchdev_port_obj_add = rocker_port_obj_add,
4836 .switchdev_port_obj_del = rocker_port_obj_del,
4837 .switchdev_port_obj_dump = rocker_port_obj_dump,
4840 /********************
4842 ********************/
4844 static int rocker_port_get_settings(struct net_device *dev,
4845 struct ethtool_cmd *ecmd)
4847 struct rocker_port *rocker_port = netdev_priv(dev);
4849 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4852 static int rocker_port_set_settings(struct net_device *dev,
4853 struct ethtool_cmd *ecmd)
4855 struct rocker_port *rocker_port = netdev_priv(dev);
4857 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4860 static void rocker_port_get_drvinfo(struct net_device *dev,
4861 struct ethtool_drvinfo *drvinfo)
4863 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4864 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4867 static struct rocker_port_stats {
4868 char str[ETH_GSTRING_LEN];
4870 } rocker_port_stats[] = {
4871 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4872 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4873 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4874 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4876 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4877 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4878 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4879 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4882 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4884 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4890 switch (stringset) {
4892 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4893 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4894 p += ETH_GSTRING_LEN;
4901 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4902 struct rocker_desc_info *desc_info,
4905 struct rocker_tlv *cmd_stats;
4907 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4908 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4911 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4915 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4916 rocker_port->pport))
4919 rocker_tlv_nest_end(desc_info, cmd_stats);
4925 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4926 const struct rocker_desc_info *desc_info,
4929 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4930 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4931 const struct rocker_tlv *pattr;
4936 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4938 if (!attrs[ROCKER_TLV_CMD_INFO])
4941 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4942 attrs[ROCKER_TLV_CMD_INFO]);
4944 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4947 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4948 if (pport != rocker_port->pport)
4951 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4952 pattr = stats_attrs[rocker_port_stats[i].type];
4956 data[i] = rocker_tlv_get_u64(pattr);
4962 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4965 return rocker_cmd_exec(rocker_port, 0,
4966 rocker_cmd_get_port_stats_prep, NULL,
4967 rocker_cmd_get_port_stats_ethtool_proc,
4971 static void rocker_port_get_stats(struct net_device *dev,
4972 struct ethtool_stats *stats, u64 *data)
4974 struct rocker_port *rocker_port = netdev_priv(dev);
4976 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4979 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4984 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4988 return ROCKER_PORT_STATS_LEN;
4994 static const struct ethtool_ops rocker_port_ethtool_ops = {
4995 .get_settings = rocker_port_get_settings,
4996 .set_settings = rocker_port_set_settings,
4997 .get_drvinfo = rocker_port_get_drvinfo,
4998 .get_link = ethtool_op_get_link,
4999 .get_strings = rocker_port_get_strings,
5000 .get_ethtool_stats = rocker_port_get_stats,
5001 .get_sset_count = rocker_port_get_sset_count,
5008 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
5010 return container_of(napi, struct rocker_port, napi_tx);
5013 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
5015 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
5016 const struct rocker *rocker = rocker_port->rocker;
5017 const struct rocker_desc_info *desc_info;
5021 /* Cleanup tx descriptors */
5022 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
5023 struct sk_buff *skb;
5025 err = rocker_desc_err(desc_info);
5026 if (err && net_ratelimit())
5027 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
5029 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
5031 skb = rocker_desc_cookie_ptr_get(desc_info);
5033 rocker_port->dev->stats.tx_packets++;
5034 rocker_port->dev->stats.tx_bytes += skb->len;
5036 rocker_port->dev->stats.tx_errors++;
5039 dev_kfree_skb_any(skb);
5043 if (credits && netif_queue_stopped(rocker_port->dev))
5044 netif_wake_queue(rocker_port->dev);
5046 napi_complete(napi);
5047 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
5052 static int rocker_port_rx_proc(const struct rocker *rocker,
5053 const struct rocker_port *rocker_port,
5054 struct rocker_desc_info *desc_info)
5056 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
5057 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5064 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5065 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5067 if (attrs[ROCKER_TLV_RX_FLAGS])
5068 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
5070 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5072 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5073 skb_put(skb, rx_len);
5074 skb->protocol = eth_type_trans(skb, rocker_port->dev);
5076 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5077 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5079 rocker_port->dev->stats.rx_packets++;
5080 rocker_port->dev->stats.rx_bytes += skb->len;
5082 netif_receive_skb(skb);
5084 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
5087 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5089 return container_of(napi, struct rocker_port, napi_rx);
5092 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5094 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
5095 const struct rocker *rocker = rocker_port->rocker;
5096 struct rocker_desc_info *desc_info;
5100 /* Process rx descriptors */
5101 while (credits < budget &&
5102 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5103 err = rocker_desc_err(desc_info);
5105 if (net_ratelimit())
5106 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5109 err = rocker_port_rx_proc(rocker, rocker_port,
5111 if (err && net_ratelimit())
5112 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5116 rocker_port->dev->stats.rx_errors++;
5118 rocker_desc_gen_clear(desc_info);
5119 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5123 if (credits < budget)
5124 napi_complete(napi);
5126 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5135 static void rocker_carrier_init(const struct rocker_port *rocker_port)
5137 const struct rocker *rocker = rocker_port->rocker;
5138 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5141 link_up = link_status & (1 << rocker_port->pport);
5143 netif_carrier_on(rocker_port->dev);
5145 netif_carrier_off(rocker_port->dev);
5148 static void rocker_remove_ports(struct rocker *rocker)
5150 struct rocker_port *rocker_port;
5153 for (i = 0; i < rocker->port_count; i++) {
5154 rocker_port = rocker->ports[i];
5157 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5158 rocker_world_port_fini(rocker_port);
5159 unregister_netdev(rocker_port->dev);
5160 rocker_world_port_post_fini(rocker_port);
5161 free_netdev(rocker_port->dev);
5163 rocker_world_fini(rocker);
5164 kfree(rocker->ports);
5167 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
5169 const struct rocker *rocker = rocker_port->rocker;
5170 const struct pci_dev *pdev = rocker->pdev;
5173 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5174 rocker_port->dev->dev_addr);
5176 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5177 eth_hw_addr_random(rocker_port->dev);
5181 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5183 const struct pci_dev *pdev = rocker->pdev;
5184 struct rocker_port *rocker_port;
5185 struct net_device *dev;
5186 u16 untagged_vid = 0;
5189 dev = alloc_etherdev(sizeof(struct rocker_port));
5192 rocker_port = netdev_priv(dev);
5193 rocker_port->dev = dev;
5194 rocker_port->rocker = rocker;
5195 rocker_port->port_number = port_number;
5196 rocker_port->pport = port_number + 1;
5197 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5198 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5200 err = rocker_world_check_init(rocker_port);
5202 dev_err(&pdev->dev, "world init failed\n");
5203 goto err_world_check_init;
5206 rocker_port_dev_addr_init(rocker_port);
5207 dev->netdev_ops = &rocker_port_netdev_ops;
5208 dev->ethtool_ops = &rocker_port_ethtool_ops;
5209 dev->switchdev_ops = &rocker_port_switchdev_ops;
5210 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5212 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5214 rocker_carrier_init(rocker_port);
5216 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5218 err = rocker_world_port_pre_init(rocker_port);
5220 dev_err(&pdev->dev, "port world pre-init failed\n");
5221 goto err_world_port_pre_init;
5223 err = register_netdev(dev);
5225 dev_err(&pdev->dev, "register_netdev failed\n");
5226 goto err_register_netdev;
5228 rocker->ports[port_number] = rocker_port;
5230 err = rocker_world_port_init(rocker_port);
5232 dev_err(&pdev->dev, "port world init failed\n");
5233 goto err_world_port_init;
5236 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5238 rocker_port_set_learning(rocker_port,
5239 !!(rocker_port->brport_flags & BR_LEARNING));
5241 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5243 netdev_err(rocker_port->dev, "install ig port table failed\n");
5244 goto err_port_ig_tbl;
5247 rocker_port->internal_vlan_id =
5248 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5250 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5252 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5253 goto err_untagged_vlan;
5259 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5261 rocker_world_port_fini(rocker_port);
5262 err_world_port_init:
5263 rocker->ports[port_number] = NULL;
5264 unregister_netdev(dev);
5265 err_register_netdev:
5266 rocker_world_port_post_fini(rocker_port);
5267 err_world_port_pre_init:
5268 err_world_check_init:
5273 static int rocker_probe_ports(struct rocker *rocker)
5279 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5280 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5283 for (i = 0; i < rocker->port_count; i++) {
5284 err = rocker_probe_port(rocker, i);
5291 rocker_remove_ports(rocker);
5295 static int rocker_msix_init(struct rocker *rocker)
5297 struct pci_dev *pdev = rocker->pdev;
5302 msix_entries = pci_msix_vec_count(pdev);
5303 if (msix_entries < 0)
5304 return msix_entries;
5306 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5309 rocker->msix_entries = kmalloc_array(msix_entries,
5310 sizeof(struct msix_entry),
5312 if (!rocker->msix_entries)
5315 for (i = 0; i < msix_entries; i++)
5316 rocker->msix_entries[i].entry = i;
5318 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5320 goto err_enable_msix;
5325 kfree(rocker->msix_entries);
5329 static void rocker_msix_fini(const struct rocker *rocker)
5331 pci_disable_msix(rocker->pdev);
5332 kfree(rocker->msix_entries);
5335 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5337 struct rocker *rocker;
5340 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5344 err = pci_enable_device(pdev);
5346 dev_err(&pdev->dev, "pci_enable_device failed\n");
5347 goto err_pci_enable_device;
5350 err = pci_request_regions(pdev, rocker_driver_name);
5352 dev_err(&pdev->dev, "pci_request_regions failed\n");
5353 goto err_pci_request_regions;
5356 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5358 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5360 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5361 goto err_pci_set_dma_mask;
5364 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5366 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5367 goto err_pci_set_dma_mask;
5371 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5372 dev_err(&pdev->dev, "invalid PCI region size\n");
5374 goto err_pci_resource_len_check;
5377 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5378 pci_resource_len(pdev, 0));
5379 if (!rocker->hw_addr) {
5380 dev_err(&pdev->dev, "ioremap failed\n");
5384 pci_set_master(pdev);
5386 rocker->pdev = pdev;
5387 pci_set_drvdata(pdev, rocker);
5389 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5391 err = rocker_msix_init(rocker);
5393 dev_err(&pdev->dev, "MSI-X init failed\n");
5397 err = rocker_basic_hw_test(rocker);
5399 dev_err(&pdev->dev, "basic hw test failed\n");
5400 goto err_basic_hw_test;
5403 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5405 err = rocker_dma_rings_init(rocker);
5407 goto err_dma_rings_init;
5409 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5410 rocker_cmd_irq_handler, 0,
5411 rocker_driver_name, rocker);
5413 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5414 goto err_request_cmd_irq;
5417 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5418 rocker_event_irq_handler, 0,
5419 rocker_driver_name, rocker);
5421 dev_err(&pdev->dev, "cannot assign event irq\n");
5422 goto err_request_event_irq;
5425 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5427 err = rocker_init_tbls(rocker);
5429 dev_err(&pdev->dev, "cannot init rocker tables\n");
5433 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5434 (unsigned long) rocker);
5435 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5437 err = rocker_probe_ports(rocker);
5439 dev_err(&pdev->dev, "failed to probe ports\n");
5440 goto err_probe_ports;
5443 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5444 (int)sizeof(rocker->hw.id), &rocker->hw.id);
5449 del_timer_sync(&rocker->fdb_cleanup_timer);
5450 rocker_free_tbls(rocker);
5452 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5453 err_request_event_irq:
5454 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5455 err_request_cmd_irq:
5456 rocker_dma_rings_fini(rocker);
5459 rocker_msix_fini(rocker);
5461 iounmap(rocker->hw_addr);
5463 err_pci_resource_len_check:
5464 err_pci_set_dma_mask:
5465 pci_release_regions(pdev);
5466 err_pci_request_regions:
5467 pci_disable_device(pdev);
5468 err_pci_enable_device:
5473 static void rocker_remove(struct pci_dev *pdev)
5475 struct rocker *rocker = pci_get_drvdata(pdev);
5477 del_timer_sync(&rocker->fdb_cleanup_timer);
5478 rocker_free_tbls(rocker);
5479 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5480 rocker_remove_ports(rocker);
5481 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5482 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5483 rocker_dma_rings_fini(rocker);
5484 rocker_msix_fini(rocker);
5485 iounmap(rocker->hw_addr);
5486 pci_release_regions(rocker->pdev);
5487 pci_disable_device(rocker->pdev);
5491 static struct pci_driver rocker_pci_driver = {
5492 .name = rocker_driver_name,
5493 .id_table = rocker_pci_id_table,
5494 .probe = rocker_probe,
5495 .remove = rocker_remove,
5498 /************************************
5499 * Net device notifier event handler
5500 ************************************/
5502 static bool rocker_port_dev_check(const struct net_device *dev)
5504 return dev->netdev_ops == &rocker_port_netdev_ops;
5507 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5508 struct net_device *bridge)
5510 u16 untagged_vid = 0;
5513 /* Port is joining bridge, so the internal VLAN for the
5514 * port is going to change to the bridge internal VLAN.
5515 * Let's remove untagged VLAN (vid=0) from port and
5516 * re-add once internal VLAN has changed.
5519 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5523 rocker_port_internal_vlan_id_put(rocker_port,
5524 rocker_port->dev->ifindex);
5525 rocker_port->internal_vlan_id =
5526 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5528 rocker_port->bridge_dev = bridge;
5529 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5531 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5534 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5536 u16 untagged_vid = 0;
5539 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5543 rocker_port_internal_vlan_id_put(rocker_port,
5544 rocker_port->bridge_dev->ifindex);
5545 rocker_port->internal_vlan_id =
5546 rocker_port_internal_vlan_id_get(rocker_port,
5547 rocker_port->dev->ifindex);
5549 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5551 rocker_port->bridge_dev = NULL;
5553 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5557 if (rocker_port->dev->flags & IFF_UP)
5558 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5563 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5564 struct net_device *master)
5568 rocker_port->bridge_dev = master;
5570 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5573 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5578 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5579 struct net_device *master)
5583 if (netif_is_bridge_master(master))
5584 err = rocker_port_bridge_join(rocker_port, master);
5585 else if (netif_is_ovs_master(master))
5586 err = rocker_port_ovs_changed(rocker_port, master);
5590 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5594 if (rocker_port_is_bridged(rocker_port))
5595 err = rocker_port_bridge_leave(rocker_port);
5596 else if (rocker_port_is_ovsed(rocker_port))
5597 err = rocker_port_ovs_changed(rocker_port, NULL);
5601 static int rocker_netdevice_event(struct notifier_block *unused,
5602 unsigned long event, void *ptr)
5604 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5605 struct netdev_notifier_changeupper_info *info;
5606 struct rocker_port *rocker_port;
5609 if (!rocker_port_dev_check(dev))
5613 case NETDEV_CHANGEUPPER:
5617 rocker_port = netdev_priv(dev);
5618 if (info->linking) {
5619 err = rocker_world_port_master_linked(rocker_port,
5622 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5624 err = rocker_port_master_linked(rocker_port,
5627 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5630 err = rocker_world_port_master_unlinked(rocker_port,
5633 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5635 err = rocker_port_master_unlinked(rocker_port);
5637 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5646 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5647 .notifier_call = rocker_netdevice_event,
5650 /************************************
5651 * Net event notifier event handler
5652 ************************************/
5654 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5656 struct rocker_port *rocker_port = netdev_priv(dev);
5657 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5658 ROCKER_OP_FLAG_NOWAIT;
5659 __be32 ip_addr = *(__be32 *)n->primary_key;
5661 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5664 static int rocker_netevent_event(struct notifier_block *unused,
5665 unsigned long event, void *ptr)
5667 struct rocker_port *rocker_port;
5668 struct net_device *dev;
5669 struct neighbour *n = ptr;
5673 case NETEVENT_NEIGH_UPDATE:
5674 if (n->tbl != &arp_tbl)
5677 if (!rocker_port_dev_check(dev))
5679 rocker_port = netdev_priv(dev);
5680 err = rocker_world_port_neigh_update(rocker_port, n);
5682 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5684 err = rocker_neigh_update(dev, n);
5687 "failed to handle neigh update (err %d)\n",
5695 static struct notifier_block rocker_netevent_nb __read_mostly = {
5696 .notifier_call = rocker_netevent_event,
5699 /***********************
5700 * Module init and exit
5701 ***********************/
5703 static int __init rocker_module_init(void)
5707 register_netdevice_notifier(&rocker_netdevice_nb);
5708 register_netevent_notifier(&rocker_netevent_nb);
5709 err = pci_register_driver(&rocker_pci_driver);
5711 goto err_pci_register_driver;
5714 err_pci_register_driver:
5715 unregister_netevent_notifier(&rocker_netevent_nb);
5716 unregister_netdevice_notifier(&rocker_netdevice_nb);
5720 static void __exit rocker_module_exit(void)
5722 unregister_netevent_notifier(&rocker_netevent_nb);
5723 unregister_netdevice_notifier(&rocker_netdevice_nb);
5724 pci_unregister_driver(&rocker_pci_driver);
5727 module_init(rocker_module_init);
5728 module_exit(rocker_module_exit);
5730 MODULE_LICENSE("GPL v2");
5731 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5732 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5733 MODULE_DESCRIPTION("Rocker switch device driver");
5734 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);