rocker: push tlv processing into separate files
[cascardo/linux.git] / drivers / net / ethernet / rocker / rocker_main.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker_hw.h"
43 #include "rocker.h"
44 #include "rocker_tlv.h"
45
46 static const char rocker_driver_name[] = "rocker";
47
48 static const struct pci_device_id rocker_pci_id_table[] = {
49         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50         {0, }
51 };
52
53 struct rocker_flow_tbl_key {
54         u32 priority;
55         enum rocker_of_dpa_table_id tbl_id;
56         union {
57                 struct {
58                         u32 in_pport;
59                         u32 in_pport_mask;
60                         enum rocker_of_dpa_table_id goto_tbl;
61                 } ig_port;
62                 struct {
63                         u32 in_pport;
64                         __be16 vlan_id;
65                         __be16 vlan_id_mask;
66                         enum rocker_of_dpa_table_id goto_tbl;
67                         bool untagged;
68                         __be16 new_vlan_id;
69                 } vlan;
70                 struct {
71                         u32 in_pport;
72                         u32 in_pport_mask;
73                         __be16 eth_type;
74                         u8 eth_dst[ETH_ALEN];
75                         u8 eth_dst_mask[ETH_ALEN];
76                         __be16 vlan_id;
77                         __be16 vlan_id_mask;
78                         enum rocker_of_dpa_table_id goto_tbl;
79                         bool copy_to_cpu;
80                 } term_mac;
81                 struct {
82                         __be16 eth_type;
83                         __be32 dst4;
84                         __be32 dst4_mask;
85                         enum rocker_of_dpa_table_id goto_tbl;
86                         u32 group_id;
87                 } ucast_routing;
88                 struct {
89                         u8 eth_dst[ETH_ALEN];
90                         u8 eth_dst_mask[ETH_ALEN];
91                         int has_eth_dst;
92                         int has_eth_dst_mask;
93                         __be16 vlan_id;
94                         u32 tunnel_id;
95                         enum rocker_of_dpa_table_id goto_tbl;
96                         u32 group_id;
97                         bool copy_to_cpu;
98                 } bridge;
99                 struct {
100                         u32 in_pport;
101                         u32 in_pport_mask;
102                         u8 eth_src[ETH_ALEN];
103                         u8 eth_src_mask[ETH_ALEN];
104                         u8 eth_dst[ETH_ALEN];
105                         u8 eth_dst_mask[ETH_ALEN];
106                         __be16 eth_type;
107                         __be16 vlan_id;
108                         __be16 vlan_id_mask;
109                         u8 ip_proto;
110                         u8 ip_proto_mask;
111                         u8 ip_tos;
112                         u8 ip_tos_mask;
113                         u32 group_id;
114                 } acl;
115         };
116 };
117
118 struct rocker_flow_tbl_entry {
119         struct hlist_node entry;
120         u32 cmd;
121         u64 cookie;
122         struct rocker_flow_tbl_key key;
123         size_t key_len;
124         u32 key_crc32; /* key */
125 };
126
127 struct rocker_group_tbl_entry {
128         struct hlist_node entry;
129         u32 cmd;
130         u32 group_id; /* key */
131         u16 group_count;
132         u32 *group_ids;
133         union {
134                 struct {
135                         u8 pop_vlan;
136                 } l2_interface;
137                 struct {
138                         u8 eth_src[ETH_ALEN];
139                         u8 eth_dst[ETH_ALEN];
140                         __be16 vlan_id;
141                         u32 group_id;
142                 } l2_rewrite;
143                 struct {
144                         u8 eth_src[ETH_ALEN];
145                         u8 eth_dst[ETH_ALEN];
146                         __be16 vlan_id;
147                         bool ttl_check;
148                         u32 group_id;
149                 } l3_unicast;
150         };
151 };
152
153 struct rocker_fdb_tbl_entry {
154         struct hlist_node entry;
155         u32 key_crc32; /* key */
156         bool learned;
157         unsigned long touched;
158         struct rocker_fdb_tbl_key {
159                 struct rocker_port *rocker_port;
160                 u8 addr[ETH_ALEN];
161                 __be16 vlan_id;
162         } key;
163 };
164
165 struct rocker_internal_vlan_tbl_entry {
166         struct hlist_node entry;
167         int ifindex; /* key */
168         u32 ref_count;
169         __be16 vlan_id;
170 };
171
172 struct rocker_neigh_tbl_entry {
173         struct hlist_node entry;
174         __be32 ip_addr; /* key */
175         struct net_device *dev;
176         u32 ref_count;
177         u32 index;
178         u8 eth_dst[ETH_ALEN];
179         bool ttl_check;
180 };
181
182 struct rocker_dma_ring_info {
183         size_t size;
184         u32 head;
185         u32 tail;
186         struct rocker_desc *desc; /* mapped */
187         dma_addr_t mapaddr;
188         struct rocker_desc_info *desc_info;
189         unsigned int type;
190 };
191
192 struct rocker;
193
194 enum {
195         ROCKER_CTRL_LINK_LOCAL_MCAST,
196         ROCKER_CTRL_LOCAL_ARP,
197         ROCKER_CTRL_IPV4_MCAST,
198         ROCKER_CTRL_IPV6_MCAST,
199         ROCKER_CTRL_DFLT_BRIDGING,
200         ROCKER_CTRL_DFLT_OVS,
201         ROCKER_CTRL_MAX,
202 };
203
204 #define ROCKER_INTERNAL_VLAN_ID_BASE    0x0f00
205 #define ROCKER_N_INTERNAL_VLANS         255
206 #define ROCKER_VLAN_BITMAP_LEN          BITS_TO_LONGS(VLAN_N_VID)
207 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
208
209 struct rocker_port {
210         struct net_device *dev;
211         struct net_device *bridge_dev;
212         struct rocker *rocker;
213         unsigned int port_number;
214         u32 pport;
215         __be16 internal_vlan_id;
216         int stp_state;
217         u32 brport_flags;
218         unsigned long ageing_time;
219         bool ctrls[ROCKER_CTRL_MAX];
220         unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
221         struct napi_struct napi_tx;
222         struct napi_struct napi_rx;
223         struct rocker_dma_ring_info tx_ring;
224         struct rocker_dma_ring_info rx_ring;
225 };
226
227 struct rocker {
228         struct pci_dev *pdev;
229         u8 __iomem *hw_addr;
230         struct msix_entry *msix_entries;
231         unsigned int port_count;
232         struct rocker_port **ports;
233         struct {
234                 u64 id;
235         } hw;
236         spinlock_t cmd_ring_lock;               /* for cmd ring accesses */
237         struct rocker_dma_ring_info cmd_ring;
238         struct rocker_dma_ring_info event_ring;
239         DECLARE_HASHTABLE(flow_tbl, 16);
240         spinlock_t flow_tbl_lock;               /* for flow tbl accesses */
241         u64 flow_tbl_next_cookie;
242         DECLARE_HASHTABLE(group_tbl, 16);
243         spinlock_t group_tbl_lock;              /* for group tbl accesses */
244         struct timer_list fdb_cleanup_timer;
245         DECLARE_HASHTABLE(fdb_tbl, 16);
246         spinlock_t fdb_tbl_lock;                /* for fdb tbl accesses */
247         unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
248         DECLARE_HASHTABLE(internal_vlan_tbl, 8);
249         spinlock_t internal_vlan_tbl_lock;      /* for vlan tbl accesses */
250         DECLARE_HASHTABLE(neigh_tbl, 16);
251         spinlock_t neigh_tbl_lock;              /* for neigh tbl accesses */
252         u32 neigh_tbl_next_index;
253 };
254
255 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
256 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
257 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
258 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
259 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
260 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
261 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
262 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
263 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
264
265 /* Rocker priority levels for flow table entries.  Higher
266  * priority match takes precedence over lower priority match.
267  */
268
269 enum {
270         ROCKER_PRIORITY_UNKNOWN = 0,
271         ROCKER_PRIORITY_IG_PORT = 1,
272         ROCKER_PRIORITY_VLAN = 1,
273         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
274         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
275         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
276         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
277         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
278         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
279         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
280         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
281         ROCKER_PRIORITY_ACL_CTRL = 3,
282         ROCKER_PRIORITY_ACL_NORMAL = 2,
283         ROCKER_PRIORITY_ACL_DFLT = 1,
284 };
285
286 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
287 {
288         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
289         u16 end = 0xffe;
290         u16 _vlan_id = ntohs(vlan_id);
291
292         return (_vlan_id >= start && _vlan_id <= end);
293 }
294
295 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
296                                       u16 vid, bool *pop_vlan)
297 {
298         __be16 vlan_id;
299
300         if (pop_vlan)
301                 *pop_vlan = false;
302         vlan_id = htons(vid);
303         if (!vlan_id) {
304                 vlan_id = rocker_port->internal_vlan_id;
305                 if (pop_vlan)
306                         *pop_vlan = true;
307         }
308
309         return vlan_id;
310 }
311
312 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
313                                    __be16 vlan_id)
314 {
315         if (rocker_vlan_id_is_internal(vlan_id))
316                 return 0;
317
318         return ntohs(vlan_id);
319 }
320
321 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
322 {
323         return rocker_port->bridge_dev &&
324                netif_is_bridge_master(rocker_port->bridge_dev);
325 }
326
327 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
328 {
329         return rocker_port->bridge_dev &&
330                netif_is_ovs_master(rocker_port->bridge_dev);
331 }
332
333 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
334 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
335 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
336 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
337
338 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
339                                 size_t size)
340 {
341         struct switchdev_trans_item *elem = NULL;
342         gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
343                           GFP_ATOMIC : GFP_KERNEL;
344
345         /* If in transaction prepare phase, allocate the memory
346          * and enqueue it on a transaction.  If in transaction
347          * commit phase, dequeue the memory from the transaction
348          * rather than re-allocating the memory.  The idea is the
349          * driver code paths for prepare and commit are identical
350          * so the memory allocated in the prepare phase is the
351          * memory used in the commit phase.
352          */
353
354         if (!trans) {
355                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
356         } else if (switchdev_trans_ph_prepare(trans)) {
357                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
358                 if (!elem)
359                         return NULL;
360                 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
361         } else {
362                 elem = switchdev_trans_item_dequeue(trans);
363         }
364
365         return elem ? elem + 1 : NULL;
366 }
367
368 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
369                             size_t size)
370 {
371         return __rocker_mem_alloc(trans, flags, size);
372 }
373
374 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
375                             size_t n, size_t size)
376 {
377         return __rocker_mem_alloc(trans, flags, n * size);
378 }
379
380 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
381 {
382         struct switchdev_trans_item *elem;
383
384         /* Frees are ignored if in transaction prepare phase.  The
385          * memory remains on the per-port list until freed in the
386          * commit phase.
387          */
388
389         if (switchdev_trans_ph_prepare(trans))
390                 return;
391
392         elem = (struct switchdev_trans_item *) mem - 1;
393         kfree(elem);
394 }
395
396 struct rocker_wait {
397         wait_queue_head_t wait;
398         bool done;
399         bool nowait;
400 };
401
402 static void rocker_wait_reset(struct rocker_wait *wait)
403 {
404         wait->done = false;
405         wait->nowait = false;
406 }
407
408 static void rocker_wait_init(struct rocker_wait *wait)
409 {
410         init_waitqueue_head(&wait->wait);
411         rocker_wait_reset(wait);
412 }
413
414 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
415                                               struct switchdev_trans *trans,
416                                               int flags)
417 {
418         struct rocker_wait *wait;
419
420         wait = rocker_kzalloc(trans, flags, sizeof(*wait));
421         if (!wait)
422                 return NULL;
423         rocker_wait_init(wait);
424         return wait;
425 }
426
427 static void rocker_wait_destroy(struct switchdev_trans *trans,
428                                 struct rocker_wait *wait)
429 {
430         rocker_kfree(trans, wait);
431 }
432
433 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
434                                       unsigned long timeout)
435 {
436         wait_event_timeout(wait->wait, wait->done, HZ / 10);
437         if (!wait->done)
438                 return false;
439         return true;
440 }
441
442 static void rocker_wait_wake_up(struct rocker_wait *wait)
443 {
444         wait->done = true;
445         wake_up(&wait->wait);
446 }
447
448 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
449 {
450         return rocker->msix_entries[vector].vector;
451 }
452
453 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
454 {
455         return rocker_msix_vector(rocker_port->rocker,
456                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
457 }
458
459 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
460 {
461         return rocker_msix_vector(rocker_port->rocker,
462                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
463 }
464
465 #define rocker_write32(rocker, reg, val)        \
466         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
467 #define rocker_read32(rocker, reg)      \
468         readl((rocker)->hw_addr + (ROCKER_ ## reg))
469 #define rocker_write64(rocker, reg, val)        \
470         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
471 #define rocker_read64(rocker, reg)      \
472         readq((rocker)->hw_addr + (ROCKER_ ## reg))
473
474 /*****************************
475  * HW basic testing functions
476  *****************************/
477
478 static int rocker_reg_test(const struct rocker *rocker)
479 {
480         const struct pci_dev *pdev = rocker->pdev;
481         u64 test_reg;
482         u64 rnd;
483
484         rnd = prandom_u32();
485         rnd >>= 1;
486         rocker_write32(rocker, TEST_REG, rnd);
487         test_reg = rocker_read32(rocker, TEST_REG);
488         if (test_reg != rnd * 2) {
489                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
490                         test_reg, rnd * 2);
491                 return -EIO;
492         }
493
494         rnd = prandom_u32();
495         rnd <<= 31;
496         rnd |= prandom_u32();
497         rocker_write64(rocker, TEST_REG64, rnd);
498         test_reg = rocker_read64(rocker, TEST_REG64);
499         if (test_reg != rnd * 2) {
500                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
501                         test_reg, rnd * 2);
502                 return -EIO;
503         }
504
505         return 0;
506 }
507
508 static int rocker_dma_test_one(const struct rocker *rocker,
509                                struct rocker_wait *wait, u32 test_type,
510                                dma_addr_t dma_handle, const unsigned char *buf,
511                                const unsigned char *expect, size_t size)
512 {
513         const struct pci_dev *pdev = rocker->pdev;
514         int i;
515
516         rocker_wait_reset(wait);
517         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
518
519         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
520                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
521                 return -EIO;
522         }
523
524         for (i = 0; i < size; i++) {
525                 if (buf[i] != expect[i]) {
526                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
527                                 buf[i], i, expect[i]);
528                         return -EIO;
529                 }
530         }
531         return 0;
532 }
533
534 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
535 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
536
537 static int rocker_dma_test_offset(const struct rocker *rocker,
538                                   struct rocker_wait *wait, int offset)
539 {
540         struct pci_dev *pdev = rocker->pdev;
541         unsigned char *alloc;
542         unsigned char *buf;
543         unsigned char *expect;
544         dma_addr_t dma_handle;
545         int i;
546         int err;
547
548         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
549                         GFP_KERNEL | GFP_DMA);
550         if (!alloc)
551                 return -ENOMEM;
552         buf = alloc + offset;
553         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
554
555         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
556                                     PCI_DMA_BIDIRECTIONAL);
557         if (pci_dma_mapping_error(pdev, dma_handle)) {
558                 err = -EIO;
559                 goto free_alloc;
560         }
561
562         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
563         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
564
565         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
566         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
567                                   dma_handle, buf, expect,
568                                   ROCKER_TEST_DMA_BUF_SIZE);
569         if (err)
570                 goto unmap;
571
572         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
573         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
574                                   dma_handle, buf, expect,
575                                   ROCKER_TEST_DMA_BUF_SIZE);
576         if (err)
577                 goto unmap;
578
579         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
580         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
581                 expect[i] = ~buf[i];
582         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
583                                   dma_handle, buf, expect,
584                                   ROCKER_TEST_DMA_BUF_SIZE);
585         if (err)
586                 goto unmap;
587
588 unmap:
589         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
590                          PCI_DMA_BIDIRECTIONAL);
591 free_alloc:
592         kfree(alloc);
593
594         return err;
595 }
596
597 static int rocker_dma_test(const struct rocker *rocker,
598                            struct rocker_wait *wait)
599 {
600         int i;
601         int err;
602
603         for (i = 0; i < 8; i++) {
604                 err = rocker_dma_test_offset(rocker, wait, i);
605                 if (err)
606                         return err;
607         }
608         return 0;
609 }
610
611 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
612 {
613         struct rocker_wait *wait = dev_id;
614
615         rocker_wait_wake_up(wait);
616
617         return IRQ_HANDLED;
618 }
619
620 static int rocker_basic_hw_test(const struct rocker *rocker)
621 {
622         const struct pci_dev *pdev = rocker->pdev;
623         struct rocker_wait wait;
624         int err;
625
626         err = rocker_reg_test(rocker);
627         if (err) {
628                 dev_err(&pdev->dev, "reg test failed\n");
629                 return err;
630         }
631
632         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
633                           rocker_test_irq_handler, 0,
634                           rocker_driver_name, &wait);
635         if (err) {
636                 dev_err(&pdev->dev, "cannot assign test irq\n");
637                 return err;
638         }
639
640         rocker_wait_init(&wait);
641         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
642
643         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
644                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
645                 err = -EIO;
646                 goto free_irq;
647         }
648
649         err = rocker_dma_test(rocker, &wait);
650         if (err)
651                 dev_err(&pdev->dev, "dma test failed\n");
652
653 free_irq:
654         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
655         return err;
656 }
657
658 /******************************************
659  * DMA rings and descriptors manipulations
660  ******************************************/
661
662 static u32 __pos_inc(u32 pos, size_t limit)
663 {
664         return ++pos == limit ? 0 : pos;
665 }
666
667 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
668 {
669         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
670
671         switch (err) {
672         case ROCKER_OK:
673                 return 0;
674         case -ROCKER_ENOENT:
675                 return -ENOENT;
676         case -ROCKER_ENXIO:
677                 return -ENXIO;
678         case -ROCKER_ENOMEM:
679                 return -ENOMEM;
680         case -ROCKER_EEXIST:
681                 return -EEXIST;
682         case -ROCKER_EINVAL:
683                 return -EINVAL;
684         case -ROCKER_EMSGSIZE:
685                 return -EMSGSIZE;
686         case -ROCKER_ENOTSUP:
687                 return -EOPNOTSUPP;
688         case -ROCKER_ENOBUFS:
689                 return -ENOBUFS;
690         }
691
692         return -EINVAL;
693 }
694
695 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
696 {
697         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
698 }
699
700 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
701 {
702         u32 comp_err = desc_info->desc->comp_err;
703
704         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
705 }
706
707 static void *
708 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
709 {
710         return (void *)(uintptr_t)desc_info->desc->cookie;
711 }
712
713 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
714                                        void *ptr)
715 {
716         desc_info->desc->cookie = (uintptr_t) ptr;
717 }
718
719 static struct rocker_desc_info *
720 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
721 {
722         static struct rocker_desc_info *desc_info;
723         u32 head = __pos_inc(info->head, info->size);
724
725         desc_info = &info->desc_info[info->head];
726         if (head == info->tail)
727                 return NULL; /* ring full */
728         desc_info->tlv_size = 0;
729         return desc_info;
730 }
731
732 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
733 {
734         desc_info->desc->buf_size = desc_info->data_size;
735         desc_info->desc->tlv_size = desc_info->tlv_size;
736 }
737
738 static void rocker_desc_head_set(const struct rocker *rocker,
739                                  struct rocker_dma_ring_info *info,
740                                  const struct rocker_desc_info *desc_info)
741 {
742         u32 head = __pos_inc(info->head, info->size);
743
744         BUG_ON(head == info->tail);
745         rocker_desc_commit(desc_info);
746         info->head = head;
747         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
748 }
749
750 static struct rocker_desc_info *
751 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
752 {
753         static struct rocker_desc_info *desc_info;
754
755         if (info->tail == info->head)
756                 return NULL; /* nothing to be done between head and tail */
757         desc_info = &info->desc_info[info->tail];
758         if (!rocker_desc_gen(desc_info))
759                 return NULL; /* gen bit not set, desc is not ready yet */
760         info->tail = __pos_inc(info->tail, info->size);
761         desc_info->tlv_size = desc_info->desc->tlv_size;
762         return desc_info;
763 }
764
765 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
766                                         const struct rocker_dma_ring_info *info,
767                                         u32 credits)
768 {
769         if (credits)
770                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
771 }
772
773 static unsigned long rocker_dma_ring_size_fix(size_t size)
774 {
775         return max(ROCKER_DMA_SIZE_MIN,
776                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
777 }
778
779 static int rocker_dma_ring_create(const struct rocker *rocker,
780                                   unsigned int type,
781                                   size_t size,
782                                   struct rocker_dma_ring_info *info)
783 {
784         int i;
785
786         BUG_ON(size != rocker_dma_ring_size_fix(size));
787         info->size = size;
788         info->type = type;
789         info->head = 0;
790         info->tail = 0;
791         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
792                                   GFP_KERNEL);
793         if (!info->desc_info)
794                 return -ENOMEM;
795
796         info->desc = pci_alloc_consistent(rocker->pdev,
797                                           info->size * sizeof(*info->desc),
798                                           &info->mapaddr);
799         if (!info->desc) {
800                 kfree(info->desc_info);
801                 return -ENOMEM;
802         }
803
804         for (i = 0; i < info->size; i++)
805                 info->desc_info[i].desc = &info->desc[i];
806
807         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
808                        ROCKER_DMA_DESC_CTRL_RESET);
809         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
810         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
811
812         return 0;
813 }
814
815 static void rocker_dma_ring_destroy(const struct rocker *rocker,
816                                     const struct rocker_dma_ring_info *info)
817 {
818         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
819
820         pci_free_consistent(rocker->pdev,
821                             info->size * sizeof(struct rocker_desc),
822                             info->desc, info->mapaddr);
823         kfree(info->desc_info);
824 }
825
826 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
827                                              struct rocker_dma_ring_info *info)
828 {
829         int i;
830
831         BUG_ON(info->head || info->tail);
832
833         /* When ring is consumer, we need to advance head for each desc.
834          * That tells hw that the desc is ready to be used by it.
835          */
836         for (i = 0; i < info->size - 1; i++)
837                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
838         rocker_desc_commit(&info->desc_info[i]);
839 }
840
841 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
842                                       const struct rocker_dma_ring_info *info,
843                                       int direction, size_t buf_size)
844 {
845         struct pci_dev *pdev = rocker->pdev;
846         int i;
847         int err;
848
849         for (i = 0; i < info->size; i++) {
850                 struct rocker_desc_info *desc_info = &info->desc_info[i];
851                 struct rocker_desc *desc = &info->desc[i];
852                 dma_addr_t dma_handle;
853                 char *buf;
854
855                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
856                 if (!buf) {
857                         err = -ENOMEM;
858                         goto rollback;
859                 }
860
861                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
862                 if (pci_dma_mapping_error(pdev, dma_handle)) {
863                         kfree(buf);
864                         err = -EIO;
865                         goto rollback;
866                 }
867
868                 desc_info->data = buf;
869                 desc_info->data_size = buf_size;
870                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
871
872                 desc->buf_addr = dma_handle;
873                 desc->buf_size = buf_size;
874         }
875         return 0;
876
877 rollback:
878         for (i--; i >= 0; i--) {
879                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
880
881                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
882                                  desc_info->data_size, direction);
883                 kfree(desc_info->data);
884         }
885         return err;
886 }
887
888 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
889                                       const struct rocker_dma_ring_info *info,
890                                       int direction)
891 {
892         struct pci_dev *pdev = rocker->pdev;
893         int i;
894
895         for (i = 0; i < info->size; i++) {
896                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
897                 struct rocker_desc *desc = &info->desc[i];
898
899                 desc->buf_addr = 0;
900                 desc->buf_size = 0;
901                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
902                                  desc_info->data_size, direction);
903                 kfree(desc_info->data);
904         }
905 }
906
907 static int rocker_dma_rings_init(struct rocker *rocker)
908 {
909         const struct pci_dev *pdev = rocker->pdev;
910         int err;
911
912         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
913                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
914                                      &rocker->cmd_ring);
915         if (err) {
916                 dev_err(&pdev->dev, "failed to create command dma ring\n");
917                 return err;
918         }
919
920         spin_lock_init(&rocker->cmd_ring_lock);
921
922         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
923                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
924         if (err) {
925                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
926                 goto err_dma_cmd_ring_bufs_alloc;
927         }
928
929         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
930                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
931                                      &rocker->event_ring);
932         if (err) {
933                 dev_err(&pdev->dev, "failed to create event dma ring\n");
934                 goto err_dma_event_ring_create;
935         }
936
937         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
938                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
939         if (err) {
940                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
941                 goto err_dma_event_ring_bufs_alloc;
942         }
943         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
944         return 0;
945
946 err_dma_event_ring_bufs_alloc:
947         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
948 err_dma_event_ring_create:
949         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
950                                   PCI_DMA_BIDIRECTIONAL);
951 err_dma_cmd_ring_bufs_alloc:
952         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
953         return err;
954 }
955
956 static void rocker_dma_rings_fini(struct rocker *rocker)
957 {
958         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
959                                   PCI_DMA_BIDIRECTIONAL);
960         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
961         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
962                                   PCI_DMA_BIDIRECTIONAL);
963         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
964 }
965
966 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
967                                       struct rocker_desc_info *desc_info,
968                                       struct sk_buff *skb, size_t buf_len)
969 {
970         const struct rocker *rocker = rocker_port->rocker;
971         struct pci_dev *pdev = rocker->pdev;
972         dma_addr_t dma_handle;
973
974         dma_handle = pci_map_single(pdev, skb->data, buf_len,
975                                     PCI_DMA_FROMDEVICE);
976         if (pci_dma_mapping_error(pdev, dma_handle))
977                 return -EIO;
978         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
979                 goto tlv_put_failure;
980         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
981                 goto tlv_put_failure;
982         return 0;
983
984 tlv_put_failure:
985         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
986         desc_info->tlv_size = 0;
987         return -EMSGSIZE;
988 }
989
990 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
991 {
992         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
993 }
994
995 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
996                                         struct rocker_desc_info *desc_info)
997 {
998         struct net_device *dev = rocker_port->dev;
999         struct sk_buff *skb;
1000         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1001         int err;
1002
1003         /* Ensure that hw will see tlv_size zero in case of an error.
1004          * That tells hw to use another descriptor.
1005          */
1006         rocker_desc_cookie_ptr_set(desc_info, NULL);
1007         desc_info->tlv_size = 0;
1008
1009         skb = netdev_alloc_skb_ip_align(dev, buf_len);
1010         if (!skb)
1011                 return -ENOMEM;
1012         err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1013         if (err) {
1014                 dev_kfree_skb_any(skb);
1015                 return err;
1016         }
1017         rocker_desc_cookie_ptr_set(desc_info, skb);
1018         return 0;
1019 }
1020
1021 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1022                                          const struct rocker_tlv **attrs)
1023 {
1024         struct pci_dev *pdev = rocker->pdev;
1025         dma_addr_t dma_handle;
1026         size_t len;
1027
1028         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1029             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1030                 return;
1031         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1032         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1033         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1034 }
1035
1036 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1037                                         const struct rocker_desc_info *desc_info)
1038 {
1039         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1040         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1041
1042         if (!skb)
1043                 return;
1044         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1045         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1046         dev_kfree_skb_any(skb);
1047 }
1048
1049 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1050 {
1051         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1052         const struct rocker *rocker = rocker_port->rocker;
1053         int i;
1054         int err;
1055
1056         for (i = 0; i < rx_ring->size; i++) {
1057                 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1058                                                    &rx_ring->desc_info[i]);
1059                 if (err)
1060                         goto rollback;
1061         }
1062         return 0;
1063
1064 rollback:
1065         for (i--; i >= 0; i--)
1066                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1067         return err;
1068 }
1069
1070 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1071 {
1072         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1073         const struct rocker *rocker = rocker_port->rocker;
1074         int i;
1075
1076         for (i = 0; i < rx_ring->size; i++)
1077                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1078 }
1079
1080 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1081 {
1082         struct rocker *rocker = rocker_port->rocker;
1083         int err;
1084
1085         err = rocker_dma_ring_create(rocker,
1086                                      ROCKER_DMA_TX(rocker_port->port_number),
1087                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1088                                      &rocker_port->tx_ring);
1089         if (err) {
1090                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1091                 return err;
1092         }
1093
1094         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1095                                          PCI_DMA_TODEVICE,
1096                                          ROCKER_DMA_TX_DESC_SIZE);
1097         if (err) {
1098                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1099                 goto err_dma_tx_ring_bufs_alloc;
1100         }
1101
1102         err = rocker_dma_ring_create(rocker,
1103                                      ROCKER_DMA_RX(rocker_port->port_number),
1104                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1105                                      &rocker_port->rx_ring);
1106         if (err) {
1107                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1108                 goto err_dma_rx_ring_create;
1109         }
1110
1111         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1112                                          PCI_DMA_BIDIRECTIONAL,
1113                                          ROCKER_DMA_RX_DESC_SIZE);
1114         if (err) {
1115                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1116                 goto err_dma_rx_ring_bufs_alloc;
1117         }
1118
1119         err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1120         if (err) {
1121                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1122                 goto err_dma_rx_ring_skbs_alloc;
1123         }
1124         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1125
1126         return 0;
1127
1128 err_dma_rx_ring_skbs_alloc:
1129         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1130                                   PCI_DMA_BIDIRECTIONAL);
1131 err_dma_rx_ring_bufs_alloc:
1132         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1133 err_dma_rx_ring_create:
1134         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1135                                   PCI_DMA_TODEVICE);
1136 err_dma_tx_ring_bufs_alloc:
1137         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1138         return err;
1139 }
1140
1141 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1142 {
1143         struct rocker *rocker = rocker_port->rocker;
1144
1145         rocker_dma_rx_ring_skbs_free(rocker_port);
1146         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1147                                   PCI_DMA_BIDIRECTIONAL);
1148         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1149         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1150                                   PCI_DMA_TODEVICE);
1151         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1152 }
1153
1154 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1155                                    bool enable)
1156 {
1157         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1158
1159         if (enable)
1160                 val |= 1ULL << rocker_port->pport;
1161         else
1162                 val &= ~(1ULL << rocker_port->pport);
1163         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1164 }
1165
1166 /********************************
1167  * Interrupt handler and helpers
1168  ********************************/
1169
1170 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1171 {
1172         struct rocker *rocker = dev_id;
1173         const struct rocker_desc_info *desc_info;
1174         struct rocker_wait *wait;
1175         u32 credits = 0;
1176
1177         spin_lock(&rocker->cmd_ring_lock);
1178         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1179                 wait = rocker_desc_cookie_ptr_get(desc_info);
1180                 if (wait->nowait) {
1181                         rocker_desc_gen_clear(desc_info);
1182                         rocker_wait_destroy(NULL, wait);
1183                 } else {
1184                         rocker_wait_wake_up(wait);
1185                 }
1186                 credits++;
1187         }
1188         spin_unlock(&rocker->cmd_ring_lock);
1189         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1190
1191         return IRQ_HANDLED;
1192 }
1193
1194 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1195 {
1196         netif_carrier_on(rocker_port->dev);
1197         netdev_info(rocker_port->dev, "Link is up\n");
1198 }
1199
1200 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1201 {
1202         netif_carrier_off(rocker_port->dev);
1203         netdev_info(rocker_port->dev, "Link is down\n");
1204 }
1205
1206 static int rocker_event_link_change(const struct rocker *rocker,
1207                                     const struct rocker_tlv *info)
1208 {
1209         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1210         unsigned int port_number;
1211         bool link_up;
1212         struct rocker_port *rocker_port;
1213
1214         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1215         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1216             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1217                 return -EIO;
1218         port_number =
1219                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1220         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1221
1222         if (port_number >= rocker->port_count)
1223                 return -EINVAL;
1224
1225         rocker_port = rocker->ports[port_number];
1226         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1227                 if (link_up)
1228                         rocker_port_link_up(rocker_port);
1229                 else
1230                         rocker_port_link_down(rocker_port);
1231         }
1232
1233         return 0;
1234 }
1235
1236 static int rocker_port_fdb(struct rocker_port *rocker_port,
1237                            struct switchdev_trans *trans,
1238                            const unsigned char *addr,
1239                            __be16 vlan_id, int flags);
1240
1241 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1242                                       const struct rocker_tlv *info)
1243 {
1244         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1245         unsigned int port_number;
1246         struct rocker_port *rocker_port;
1247         const unsigned char *addr;
1248         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1249         __be16 vlan_id;
1250
1251         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1252         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1253             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1254             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1255                 return -EIO;
1256         port_number =
1257                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1258         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1259         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1260
1261         if (port_number >= rocker->port_count)
1262                 return -EINVAL;
1263
1264         rocker_port = rocker->ports[port_number];
1265
1266         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1267             rocker_port->stp_state != BR_STATE_FORWARDING)
1268                 return 0;
1269
1270         return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1271 }
1272
1273 static int rocker_event_process(const struct rocker *rocker,
1274                                 const struct rocker_desc_info *desc_info)
1275 {
1276         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1277         const struct rocker_tlv *info;
1278         u16 type;
1279
1280         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1281         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1282             !attrs[ROCKER_TLV_EVENT_INFO])
1283                 return -EIO;
1284
1285         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1286         info = attrs[ROCKER_TLV_EVENT_INFO];
1287
1288         switch (type) {
1289         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1290                 return rocker_event_link_change(rocker, info);
1291         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1292                 return rocker_event_mac_vlan_seen(rocker, info);
1293         }
1294
1295         return -EOPNOTSUPP;
1296 }
1297
1298 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1299 {
1300         struct rocker *rocker = dev_id;
1301         const struct pci_dev *pdev = rocker->pdev;
1302         const struct rocker_desc_info *desc_info;
1303         u32 credits = 0;
1304         int err;
1305
1306         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1307                 err = rocker_desc_err(desc_info);
1308                 if (err) {
1309                         dev_err(&pdev->dev, "event desc received with err %d\n",
1310                                 err);
1311                 } else {
1312                         err = rocker_event_process(rocker, desc_info);
1313                         if (err)
1314                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1315                                         err);
1316                 }
1317                 rocker_desc_gen_clear(desc_info);
1318                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1319                 credits++;
1320         }
1321         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1322
1323         return IRQ_HANDLED;
1324 }
1325
1326 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1327 {
1328         struct rocker_port *rocker_port = dev_id;
1329
1330         napi_schedule(&rocker_port->napi_tx);
1331         return IRQ_HANDLED;
1332 }
1333
1334 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1335 {
1336         struct rocker_port *rocker_port = dev_id;
1337
1338         napi_schedule(&rocker_port->napi_rx);
1339         return IRQ_HANDLED;
1340 }
1341
1342 /********************
1343  * Command interface
1344  ********************/
1345
1346 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1347                                     struct rocker_desc_info *desc_info,
1348                                     void *priv);
1349
1350 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1351                                     const struct rocker_desc_info *desc_info,
1352                                     void *priv);
1353
1354 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1355                            struct switchdev_trans *trans, int flags,
1356                            rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1357                            rocker_cmd_proc_cb_t process, void *process_priv)
1358 {
1359         struct rocker *rocker = rocker_port->rocker;
1360         struct rocker_desc_info *desc_info;
1361         struct rocker_wait *wait;
1362         bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1363         unsigned long lock_flags;
1364         int err;
1365
1366         wait = rocker_wait_create(rocker_port, trans, flags);
1367         if (!wait)
1368                 return -ENOMEM;
1369         wait->nowait = nowait;
1370
1371         spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1372
1373         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1374         if (!desc_info) {
1375                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1376                 err = -EAGAIN;
1377                 goto out;
1378         }
1379
1380         err = prepare(rocker_port, desc_info, prepare_priv);
1381         if (err) {
1382                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1383                 goto out;
1384         }
1385
1386         rocker_desc_cookie_ptr_set(desc_info, wait);
1387
1388         if (!switchdev_trans_ph_prepare(trans))
1389                 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1390
1391         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1392
1393         if (nowait)
1394                 return 0;
1395
1396         if (!switchdev_trans_ph_prepare(trans))
1397                 if (!rocker_wait_event_timeout(wait, HZ / 10))
1398                         return -EIO;
1399
1400         err = rocker_desc_err(desc_info);
1401         if (err)
1402                 return err;
1403
1404         if (process)
1405                 err = process(rocker_port, desc_info, process_priv);
1406
1407         rocker_desc_gen_clear(desc_info);
1408 out:
1409         rocker_wait_destroy(trans, wait);
1410         return err;
1411 }
1412
1413 static int
1414 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1415                                   struct rocker_desc_info *desc_info,
1416                                   void *priv)
1417 {
1418         struct rocker_tlv *cmd_info;
1419
1420         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1421                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1422                 return -EMSGSIZE;
1423         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1424         if (!cmd_info)
1425                 return -EMSGSIZE;
1426         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1427                                rocker_port->pport))
1428                 return -EMSGSIZE;
1429         rocker_tlv_nest_end(desc_info, cmd_info);
1430         return 0;
1431 }
1432
1433 static int
1434 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1435                                           const struct rocker_desc_info *desc_info,
1436                                           void *priv)
1437 {
1438         struct ethtool_cmd *ecmd = priv;
1439         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1441         u32 speed;
1442         u8 duplex;
1443         u8 autoneg;
1444
1445         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1446         if (!attrs[ROCKER_TLV_CMD_INFO])
1447                 return -EIO;
1448
1449         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1450                                 attrs[ROCKER_TLV_CMD_INFO]);
1451         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1452             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1453             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1454                 return -EIO;
1455
1456         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1457         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1458         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1459
1460         ecmd->transceiver = XCVR_INTERNAL;
1461         ecmd->supported = SUPPORTED_TP;
1462         ecmd->phy_address = 0xff;
1463         ecmd->port = PORT_TP;
1464         ethtool_cmd_speed_set(ecmd, speed);
1465         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1466         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1467
1468         return 0;
1469 }
1470
1471 static int
1472 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1473                                           const struct rocker_desc_info *desc_info,
1474                                           void *priv)
1475 {
1476         unsigned char *macaddr = priv;
1477         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1478         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1479         const struct rocker_tlv *attr;
1480
1481         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1482         if (!attrs[ROCKER_TLV_CMD_INFO])
1483                 return -EIO;
1484
1485         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1486                                 attrs[ROCKER_TLV_CMD_INFO]);
1487         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1488         if (!attr)
1489                 return -EIO;
1490
1491         if (rocker_tlv_len(attr) != ETH_ALEN)
1492                 return -EINVAL;
1493
1494         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1495         return 0;
1496 }
1497
1498 struct port_name {
1499         char *buf;
1500         size_t len;
1501 };
1502
1503 static int
1504 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1505                                             const struct rocker_desc_info *desc_info,
1506                                             void *priv)
1507 {
1508         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1509         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1510         struct port_name *name = priv;
1511         const struct rocker_tlv *attr;
1512         size_t i, j, len;
1513         const char *str;
1514
1515         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1516         if (!attrs[ROCKER_TLV_CMD_INFO])
1517                 return -EIO;
1518
1519         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1520                                 attrs[ROCKER_TLV_CMD_INFO]);
1521         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1522         if (!attr)
1523                 return -EIO;
1524
1525         len = min_t(size_t, rocker_tlv_len(attr), name->len);
1526         str = rocker_tlv_data(attr);
1527
1528         /* make sure name only contains alphanumeric characters */
1529         for (i = j = 0; i < len; ++i) {
1530                 if (isalnum(str[i])) {
1531                         name->buf[j] = str[i];
1532                         j++;
1533                 }
1534         }
1535
1536         if (j == 0)
1537                 return -EIO;
1538
1539         name->buf[j] = '\0';
1540
1541         return 0;
1542 }
1543
1544 static int
1545 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1546                                           struct rocker_desc_info *desc_info,
1547                                           void *priv)
1548 {
1549         struct ethtool_cmd *ecmd = priv;
1550         struct rocker_tlv *cmd_info;
1551
1552         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1553                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1554                 return -EMSGSIZE;
1555         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1556         if (!cmd_info)
1557                 return -EMSGSIZE;
1558         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1559                                rocker_port->pport))
1560                 return -EMSGSIZE;
1561         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1562                                ethtool_cmd_speed(ecmd)))
1563                 return -EMSGSIZE;
1564         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1565                               ecmd->duplex))
1566                 return -EMSGSIZE;
1567         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1568                               ecmd->autoneg))
1569                 return -EMSGSIZE;
1570         rocker_tlv_nest_end(desc_info, cmd_info);
1571         return 0;
1572 }
1573
1574 static int
1575 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1576                                           struct rocker_desc_info *desc_info,
1577                                           void *priv)
1578 {
1579         const unsigned char *macaddr = priv;
1580         struct rocker_tlv *cmd_info;
1581
1582         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1583                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1584                 return -EMSGSIZE;
1585         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1586         if (!cmd_info)
1587                 return -EMSGSIZE;
1588         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1589                                rocker_port->pport))
1590                 return -EMSGSIZE;
1591         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1592                            ETH_ALEN, macaddr))
1593                 return -EMSGSIZE;
1594         rocker_tlv_nest_end(desc_info, cmd_info);
1595         return 0;
1596 }
1597
1598 static int
1599 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1600                                       struct rocker_desc_info *desc_info,
1601                                       void *priv)
1602 {
1603         int mtu = *(int *)priv;
1604         struct rocker_tlv *cmd_info;
1605
1606         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1607                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1608                 return -EMSGSIZE;
1609         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1610         if (!cmd_info)
1611                 return -EMSGSIZE;
1612         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1613                                rocker_port->pport))
1614                 return -EMSGSIZE;
1615         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1616                                mtu))
1617                 return -EMSGSIZE;
1618         rocker_tlv_nest_end(desc_info, cmd_info);
1619         return 0;
1620 }
1621
1622 static int
1623 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1624                                   struct rocker_desc_info *desc_info,
1625                                   void *priv)
1626 {
1627         struct rocker_tlv *cmd_info;
1628
1629         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1630                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1631                 return -EMSGSIZE;
1632         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1633         if (!cmd_info)
1634                 return -EMSGSIZE;
1635         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1636                                rocker_port->pport))
1637                 return -EMSGSIZE;
1638         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1639                               !!(rocker_port->brport_flags & BR_LEARNING)))
1640                 return -EMSGSIZE;
1641         rocker_tlv_nest_end(desc_info, cmd_info);
1642         return 0;
1643 }
1644
1645 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1646                                                 struct ethtool_cmd *ecmd)
1647 {
1648         return rocker_cmd_exec(rocker_port, NULL, 0,
1649                                rocker_cmd_get_port_settings_prep, NULL,
1650                                rocker_cmd_get_port_settings_ethtool_proc,
1651                                ecmd);
1652 }
1653
1654 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1655                                                 unsigned char *macaddr)
1656 {
1657         return rocker_cmd_exec(rocker_port, NULL, 0,
1658                                rocker_cmd_get_port_settings_prep, NULL,
1659                                rocker_cmd_get_port_settings_macaddr_proc,
1660                                macaddr);
1661 }
1662
1663 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1664                                                 struct ethtool_cmd *ecmd)
1665 {
1666         return rocker_cmd_exec(rocker_port, NULL, 0,
1667                                rocker_cmd_set_port_settings_ethtool_prep,
1668                                ecmd, NULL, NULL);
1669 }
1670
1671 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1672                                                 unsigned char *macaddr)
1673 {
1674         return rocker_cmd_exec(rocker_port, NULL, 0,
1675                                rocker_cmd_set_port_settings_macaddr_prep,
1676                                macaddr, NULL, NULL);
1677 }
1678
1679 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1680                                             int mtu)
1681 {
1682         return rocker_cmd_exec(rocker_port, NULL, 0,
1683                                rocker_cmd_set_port_settings_mtu_prep,
1684                                &mtu, NULL, NULL);
1685 }
1686
1687 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1688                                     struct switchdev_trans *trans)
1689 {
1690         return rocker_cmd_exec(rocker_port, trans, 0,
1691                                rocker_cmd_set_port_learning_prep,
1692                                NULL, NULL, NULL);
1693 }
1694
1695 static int
1696 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1697                                 const struct rocker_flow_tbl_entry *entry)
1698 {
1699         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1700                                entry->key.ig_port.in_pport))
1701                 return -EMSGSIZE;
1702         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1703                                entry->key.ig_port.in_pport_mask))
1704                 return -EMSGSIZE;
1705         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1706                                entry->key.ig_port.goto_tbl))
1707                 return -EMSGSIZE;
1708
1709         return 0;
1710 }
1711
1712 static int
1713 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1714                              const struct rocker_flow_tbl_entry *entry)
1715 {
1716         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1717                                entry->key.vlan.in_pport))
1718                 return -EMSGSIZE;
1719         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1720                                 entry->key.vlan.vlan_id))
1721                 return -EMSGSIZE;
1722         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1723                                 entry->key.vlan.vlan_id_mask))
1724                 return -EMSGSIZE;
1725         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1726                                entry->key.vlan.goto_tbl))
1727                 return -EMSGSIZE;
1728         if (entry->key.vlan.untagged &&
1729             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1730                                 entry->key.vlan.new_vlan_id))
1731                 return -EMSGSIZE;
1732
1733         return 0;
1734 }
1735
1736 static int
1737 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1738                                  const struct rocker_flow_tbl_entry *entry)
1739 {
1740         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1741                                entry->key.term_mac.in_pport))
1742                 return -EMSGSIZE;
1743         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1744                                entry->key.term_mac.in_pport_mask))
1745                 return -EMSGSIZE;
1746         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1747                                 entry->key.term_mac.eth_type))
1748                 return -EMSGSIZE;
1749         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1750                            ETH_ALEN, entry->key.term_mac.eth_dst))
1751                 return -EMSGSIZE;
1752         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1753                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1754                 return -EMSGSIZE;
1755         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1756                                 entry->key.term_mac.vlan_id))
1757                 return -EMSGSIZE;
1758         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1759                                 entry->key.term_mac.vlan_id_mask))
1760                 return -EMSGSIZE;
1761         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1762                                entry->key.term_mac.goto_tbl))
1763                 return -EMSGSIZE;
1764         if (entry->key.term_mac.copy_to_cpu &&
1765             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1766                               entry->key.term_mac.copy_to_cpu))
1767                 return -EMSGSIZE;
1768
1769         return 0;
1770 }
1771
1772 static int
1773 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1774                                       const struct rocker_flow_tbl_entry *entry)
1775 {
1776         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1777                                 entry->key.ucast_routing.eth_type))
1778                 return -EMSGSIZE;
1779         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1780                                 entry->key.ucast_routing.dst4))
1781                 return -EMSGSIZE;
1782         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1783                                 entry->key.ucast_routing.dst4_mask))
1784                 return -EMSGSIZE;
1785         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1786                                entry->key.ucast_routing.goto_tbl))
1787                 return -EMSGSIZE;
1788         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1789                                entry->key.ucast_routing.group_id))
1790                 return -EMSGSIZE;
1791
1792         return 0;
1793 }
1794
1795 static int
1796 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1797                                const struct rocker_flow_tbl_entry *entry)
1798 {
1799         if (entry->key.bridge.has_eth_dst &&
1800             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1801                            ETH_ALEN, entry->key.bridge.eth_dst))
1802                 return -EMSGSIZE;
1803         if (entry->key.bridge.has_eth_dst_mask &&
1804             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1805                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
1806                 return -EMSGSIZE;
1807         if (entry->key.bridge.vlan_id &&
1808             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1809                                 entry->key.bridge.vlan_id))
1810                 return -EMSGSIZE;
1811         if (entry->key.bridge.tunnel_id &&
1812             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1813                                entry->key.bridge.tunnel_id))
1814                 return -EMSGSIZE;
1815         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1816                                entry->key.bridge.goto_tbl))
1817                 return -EMSGSIZE;
1818         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1819                                entry->key.bridge.group_id))
1820                 return -EMSGSIZE;
1821         if (entry->key.bridge.copy_to_cpu &&
1822             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1823                               entry->key.bridge.copy_to_cpu))
1824                 return -EMSGSIZE;
1825
1826         return 0;
1827 }
1828
1829 static int
1830 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1831                             const struct rocker_flow_tbl_entry *entry)
1832 {
1833         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1834                                entry->key.acl.in_pport))
1835                 return -EMSGSIZE;
1836         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1837                                entry->key.acl.in_pport_mask))
1838                 return -EMSGSIZE;
1839         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1840                            ETH_ALEN, entry->key.acl.eth_src))
1841                 return -EMSGSIZE;
1842         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1843                            ETH_ALEN, entry->key.acl.eth_src_mask))
1844                 return -EMSGSIZE;
1845         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1846                            ETH_ALEN, entry->key.acl.eth_dst))
1847                 return -EMSGSIZE;
1848         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1849                            ETH_ALEN, entry->key.acl.eth_dst_mask))
1850                 return -EMSGSIZE;
1851         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1852                                 entry->key.acl.eth_type))
1853                 return -EMSGSIZE;
1854         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1855                                 entry->key.acl.vlan_id))
1856                 return -EMSGSIZE;
1857         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1858                                 entry->key.acl.vlan_id_mask))
1859                 return -EMSGSIZE;
1860
1861         switch (ntohs(entry->key.acl.eth_type)) {
1862         case ETH_P_IP:
1863         case ETH_P_IPV6:
1864                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1865                                       entry->key.acl.ip_proto))
1866                         return -EMSGSIZE;
1867                 if (rocker_tlv_put_u8(desc_info,
1868                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1869                                       entry->key.acl.ip_proto_mask))
1870                         return -EMSGSIZE;
1871                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1872                                       entry->key.acl.ip_tos & 0x3f))
1873                         return -EMSGSIZE;
1874                 if (rocker_tlv_put_u8(desc_info,
1875                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1876                                       entry->key.acl.ip_tos_mask & 0x3f))
1877                         return -EMSGSIZE;
1878                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1879                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
1880                         return -EMSGSIZE;
1881                 if (rocker_tlv_put_u8(desc_info,
1882                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1883                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1884                         return -EMSGSIZE;
1885                 break;
1886         }
1887
1888         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1889             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1890                                entry->key.acl.group_id))
1891                 return -EMSGSIZE;
1892
1893         return 0;
1894 }
1895
1896 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
1897                                    struct rocker_desc_info *desc_info,
1898                                    void *priv)
1899 {
1900         const struct rocker_flow_tbl_entry *entry = priv;
1901         struct rocker_tlv *cmd_info;
1902         int err = 0;
1903
1904         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
1905                 return -EMSGSIZE;
1906         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1907         if (!cmd_info)
1908                 return -EMSGSIZE;
1909         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1910                                entry->key.tbl_id))
1911                 return -EMSGSIZE;
1912         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1913                                entry->key.priority))
1914                 return -EMSGSIZE;
1915         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1916                 return -EMSGSIZE;
1917         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1918                                entry->cookie))
1919                 return -EMSGSIZE;
1920
1921         switch (entry->key.tbl_id) {
1922         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1923                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1924                 break;
1925         case ROCKER_OF_DPA_TABLE_ID_VLAN:
1926                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1927                 break;
1928         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1929                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1930                 break;
1931         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1932                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1933                 break;
1934         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1935                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1936                 break;
1937         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1938                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1939                 break;
1940         default:
1941                 err = -ENOTSUPP;
1942                 break;
1943         }
1944
1945         if (err)
1946                 return err;
1947
1948         rocker_tlv_nest_end(desc_info, cmd_info);
1949
1950         return 0;
1951 }
1952
1953 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
1954                                    struct rocker_desc_info *desc_info,
1955                                    void *priv)
1956 {
1957         const struct rocker_flow_tbl_entry *entry = priv;
1958         struct rocker_tlv *cmd_info;
1959
1960         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
1961                 return -EMSGSIZE;
1962         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1963         if (!cmd_info)
1964                 return -EMSGSIZE;
1965         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1966                                entry->cookie))
1967                 return -EMSGSIZE;
1968         rocker_tlv_nest_end(desc_info, cmd_info);
1969
1970         return 0;
1971 }
1972
1973 static int
1974 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
1975                                       struct rocker_group_tbl_entry *entry)
1976 {
1977         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
1978                                ROCKER_GROUP_PORT_GET(entry->group_id)))
1979                 return -EMSGSIZE;
1980         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
1981                               entry->l2_interface.pop_vlan))
1982                 return -EMSGSIZE;
1983
1984         return 0;
1985 }
1986
1987 static int
1988 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
1989                                     const struct rocker_group_tbl_entry *entry)
1990 {
1991         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
1992                                entry->l2_rewrite.group_id))
1993                 return -EMSGSIZE;
1994         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
1995             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1996                            ETH_ALEN, entry->l2_rewrite.eth_src))
1997                 return -EMSGSIZE;
1998         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
1999             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2000                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2001                 return -EMSGSIZE;
2002         if (entry->l2_rewrite.vlan_id &&
2003             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2004                                 entry->l2_rewrite.vlan_id))
2005                 return -EMSGSIZE;
2006
2007         return 0;
2008 }
2009
2010 static int
2011 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2012                                    const struct rocker_group_tbl_entry *entry)
2013 {
2014         int i;
2015         struct rocker_tlv *group_ids;
2016
2017         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2018                                entry->group_count))
2019                 return -EMSGSIZE;
2020
2021         group_ids = rocker_tlv_nest_start(desc_info,
2022                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2023         if (!group_ids)
2024                 return -EMSGSIZE;
2025
2026         for (i = 0; i < entry->group_count; i++)
2027                 /* Note TLV array is 1-based */
2028                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2029                         return -EMSGSIZE;
2030
2031         rocker_tlv_nest_end(desc_info, group_ids);
2032
2033         return 0;
2034 }
2035
2036 static int
2037 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2038                                     const struct rocker_group_tbl_entry *entry)
2039 {
2040         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2041             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2042                            ETH_ALEN, entry->l3_unicast.eth_src))
2043                 return -EMSGSIZE;
2044         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2045             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2046                            ETH_ALEN, entry->l3_unicast.eth_dst))
2047                 return -EMSGSIZE;
2048         if (entry->l3_unicast.vlan_id &&
2049             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2050                                 entry->l3_unicast.vlan_id))
2051                 return -EMSGSIZE;
2052         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2053                               entry->l3_unicast.ttl_check))
2054                 return -EMSGSIZE;
2055         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2056                                entry->l3_unicast.group_id))
2057                 return -EMSGSIZE;
2058
2059         return 0;
2060 }
2061
2062 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2063                                     struct rocker_desc_info *desc_info,
2064                                     void *priv)
2065 {
2066         struct rocker_group_tbl_entry *entry = priv;
2067         struct rocker_tlv *cmd_info;
2068         int err = 0;
2069
2070         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2071                 return -EMSGSIZE;
2072         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2073         if (!cmd_info)
2074                 return -EMSGSIZE;
2075
2076         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2077                                entry->group_id))
2078                 return -EMSGSIZE;
2079
2080         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2081         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2082                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2083                 break;
2084         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2085                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2086                 break;
2087         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2088         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2089                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2090                 break;
2091         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2092                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2093                 break;
2094         default:
2095                 err = -ENOTSUPP;
2096                 break;
2097         }
2098
2099         if (err)
2100                 return err;
2101
2102         rocker_tlv_nest_end(desc_info, cmd_info);
2103
2104         return 0;
2105 }
2106
2107 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2108                                     struct rocker_desc_info *desc_info,
2109                                     void *priv)
2110 {
2111         const struct rocker_group_tbl_entry *entry = priv;
2112         struct rocker_tlv *cmd_info;
2113
2114         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2115                 return -EMSGSIZE;
2116         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2117         if (!cmd_info)
2118                 return -EMSGSIZE;
2119         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2120                                entry->group_id))
2121                 return -EMSGSIZE;
2122         rocker_tlv_nest_end(desc_info, cmd_info);
2123
2124         return 0;
2125 }
2126
2127 /***************************************************
2128  * Flow, group, FDB, internal VLAN and neigh tables
2129  ***************************************************/
2130
2131 static int rocker_init_tbls(struct rocker *rocker)
2132 {
2133         hash_init(rocker->flow_tbl);
2134         spin_lock_init(&rocker->flow_tbl_lock);
2135
2136         hash_init(rocker->group_tbl);
2137         spin_lock_init(&rocker->group_tbl_lock);
2138
2139         hash_init(rocker->fdb_tbl);
2140         spin_lock_init(&rocker->fdb_tbl_lock);
2141
2142         hash_init(rocker->internal_vlan_tbl);
2143         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2144
2145         hash_init(rocker->neigh_tbl);
2146         spin_lock_init(&rocker->neigh_tbl_lock);
2147
2148         return 0;
2149 }
2150
2151 static void rocker_free_tbls(struct rocker *rocker)
2152 {
2153         unsigned long flags;
2154         struct rocker_flow_tbl_entry *flow_entry;
2155         struct rocker_group_tbl_entry *group_entry;
2156         struct rocker_fdb_tbl_entry *fdb_entry;
2157         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2158         struct rocker_neigh_tbl_entry *neigh_entry;
2159         struct hlist_node *tmp;
2160         int bkt;
2161
2162         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2163         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2164                 hash_del(&flow_entry->entry);
2165         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2166
2167         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2168         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2169                 hash_del(&group_entry->entry);
2170         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2171
2172         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2173         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2174                 hash_del(&fdb_entry->entry);
2175         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2176
2177         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2178         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2179                            tmp, internal_vlan_entry, entry)
2180                 hash_del(&internal_vlan_entry->entry);
2181         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2182
2183         spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2184         hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2185                 hash_del(&neigh_entry->entry);
2186         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2187 }
2188
2189 static struct rocker_flow_tbl_entry *
2190 rocker_flow_tbl_find(const struct rocker *rocker,
2191                      const struct rocker_flow_tbl_entry *match)
2192 {
2193         struct rocker_flow_tbl_entry *found;
2194         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2195
2196         hash_for_each_possible(rocker->flow_tbl, found,
2197                                entry, match->key_crc32) {
2198                 if (memcmp(&found->key, &match->key, key_len) == 0)
2199                         return found;
2200         }
2201
2202         return NULL;
2203 }
2204
2205 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2206                                struct switchdev_trans *trans, int flags,
2207                                struct rocker_flow_tbl_entry *match)
2208 {
2209         struct rocker *rocker = rocker_port->rocker;
2210         struct rocker_flow_tbl_entry *found;
2211         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2212         unsigned long lock_flags;
2213
2214         match->key_crc32 = crc32(~0, &match->key, key_len);
2215
2216         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2217
2218         found = rocker_flow_tbl_find(rocker, match);
2219
2220         if (found) {
2221                 match->cookie = found->cookie;
2222                 if (!switchdev_trans_ph_prepare(trans))
2223                         hash_del(&found->entry);
2224                 rocker_kfree(trans, found);
2225                 found = match;
2226                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2227         } else {
2228                 found = match;
2229                 found->cookie = rocker->flow_tbl_next_cookie++;
2230                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2231         }
2232
2233         if (!switchdev_trans_ph_prepare(trans))
2234                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2235
2236         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2237
2238         return rocker_cmd_exec(rocker_port, trans, flags,
2239                                rocker_cmd_flow_tbl_add, found, NULL, NULL);
2240 }
2241
2242 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2243                                struct switchdev_trans *trans, int flags,
2244                                struct rocker_flow_tbl_entry *match)
2245 {
2246         struct rocker *rocker = rocker_port->rocker;
2247         struct rocker_flow_tbl_entry *found;
2248         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2249         unsigned long lock_flags;
2250         int err = 0;
2251
2252         match->key_crc32 = crc32(~0, &match->key, key_len);
2253
2254         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2255
2256         found = rocker_flow_tbl_find(rocker, match);
2257
2258         if (found) {
2259                 if (!switchdev_trans_ph_prepare(trans))
2260                         hash_del(&found->entry);
2261                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2262         }
2263
2264         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2265
2266         rocker_kfree(trans, match);
2267
2268         if (found) {
2269                 err = rocker_cmd_exec(rocker_port, trans, flags,
2270                                       rocker_cmd_flow_tbl_del,
2271                                       found, NULL, NULL);
2272                 rocker_kfree(trans, found);
2273         }
2274
2275         return err;
2276 }
2277
2278 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2279                               struct switchdev_trans *trans, int flags,
2280                               struct rocker_flow_tbl_entry *entry)
2281 {
2282         if (flags & ROCKER_OP_FLAG_REMOVE)
2283                 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2284         else
2285                 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2286 }
2287
2288 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2289                                    struct switchdev_trans *trans, int flags,
2290                                    u32 in_pport, u32 in_pport_mask,
2291                                    enum rocker_of_dpa_table_id goto_tbl)
2292 {
2293         struct rocker_flow_tbl_entry *entry;
2294
2295         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2296         if (!entry)
2297                 return -ENOMEM;
2298
2299         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2300         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2301         entry->key.ig_port.in_pport = in_pport;
2302         entry->key.ig_port.in_pport_mask = in_pport_mask;
2303         entry->key.ig_port.goto_tbl = goto_tbl;
2304
2305         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2306 }
2307
2308 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2309                                 struct switchdev_trans *trans, int flags,
2310                                 u32 in_pport, __be16 vlan_id,
2311                                 __be16 vlan_id_mask,
2312                                 enum rocker_of_dpa_table_id goto_tbl,
2313                                 bool untagged, __be16 new_vlan_id)
2314 {
2315         struct rocker_flow_tbl_entry *entry;
2316
2317         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2318         if (!entry)
2319                 return -ENOMEM;
2320
2321         entry->key.priority = ROCKER_PRIORITY_VLAN;
2322         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2323         entry->key.vlan.in_pport = in_pport;
2324         entry->key.vlan.vlan_id = vlan_id;
2325         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2326         entry->key.vlan.goto_tbl = goto_tbl;
2327
2328         entry->key.vlan.untagged = untagged;
2329         entry->key.vlan.new_vlan_id = new_vlan_id;
2330
2331         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2332 }
2333
2334 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2335                                     struct switchdev_trans *trans,
2336                                     u32 in_pport, u32 in_pport_mask,
2337                                     __be16 eth_type, const u8 *eth_dst,
2338                                     const u8 *eth_dst_mask, __be16 vlan_id,
2339                                     __be16 vlan_id_mask, bool copy_to_cpu,
2340                                     int flags)
2341 {
2342         struct rocker_flow_tbl_entry *entry;
2343
2344         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2345         if (!entry)
2346                 return -ENOMEM;
2347
2348         if (is_multicast_ether_addr(eth_dst)) {
2349                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2350                 entry->key.term_mac.goto_tbl =
2351                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2352         } else {
2353                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2354                 entry->key.term_mac.goto_tbl =
2355                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2356         }
2357
2358         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2359         entry->key.term_mac.in_pport = in_pport;
2360         entry->key.term_mac.in_pport_mask = in_pport_mask;
2361         entry->key.term_mac.eth_type = eth_type;
2362         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2363         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2364         entry->key.term_mac.vlan_id = vlan_id;
2365         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2366         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2367
2368         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2369 }
2370
2371 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2372                                   struct switchdev_trans *trans, int flags,
2373                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2374                                   __be16 vlan_id, u32 tunnel_id,
2375                                   enum rocker_of_dpa_table_id goto_tbl,
2376                                   u32 group_id, bool copy_to_cpu)
2377 {
2378         struct rocker_flow_tbl_entry *entry;
2379         u32 priority;
2380         bool vlan_bridging = !!vlan_id;
2381         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2382         bool wild = false;
2383
2384         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2385         if (!entry)
2386                 return -ENOMEM;
2387
2388         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2389
2390         if (eth_dst) {
2391                 entry->key.bridge.has_eth_dst = 1;
2392                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2393         }
2394         if (eth_dst_mask) {
2395                 entry->key.bridge.has_eth_dst_mask = 1;
2396                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2397                 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2398                         wild = true;
2399         }
2400
2401         priority = ROCKER_PRIORITY_UNKNOWN;
2402         if (vlan_bridging && dflt && wild)
2403                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2404         else if (vlan_bridging && dflt && !wild)
2405                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2406         else if (vlan_bridging && !dflt)
2407                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2408         else if (!vlan_bridging && dflt && wild)
2409                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2410         else if (!vlan_bridging && dflt && !wild)
2411                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2412         else if (!vlan_bridging && !dflt)
2413                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2414
2415         entry->key.priority = priority;
2416         entry->key.bridge.vlan_id = vlan_id;
2417         entry->key.bridge.tunnel_id = tunnel_id;
2418         entry->key.bridge.goto_tbl = goto_tbl;
2419         entry->key.bridge.group_id = group_id;
2420         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2421
2422         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2423 }
2424
2425 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2426                                           struct switchdev_trans *trans,
2427                                           __be16 eth_type, __be32 dst,
2428                                           __be32 dst_mask, u32 priority,
2429                                           enum rocker_of_dpa_table_id goto_tbl,
2430                                           u32 group_id, int flags)
2431 {
2432         struct rocker_flow_tbl_entry *entry;
2433
2434         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2435         if (!entry)
2436                 return -ENOMEM;
2437
2438         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2439         entry->key.priority = priority;
2440         entry->key.ucast_routing.eth_type = eth_type;
2441         entry->key.ucast_routing.dst4 = dst;
2442         entry->key.ucast_routing.dst4_mask = dst_mask;
2443         entry->key.ucast_routing.goto_tbl = goto_tbl;
2444         entry->key.ucast_routing.group_id = group_id;
2445         entry->key_len = offsetof(struct rocker_flow_tbl_key,
2446                                   ucast_routing.group_id);
2447
2448         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2449 }
2450
2451 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2452                                struct switchdev_trans *trans, int flags,
2453                                u32 in_pport, u32 in_pport_mask,
2454                                const u8 *eth_src, const u8 *eth_src_mask,
2455                                const u8 *eth_dst, const u8 *eth_dst_mask,
2456                                __be16 eth_type, __be16 vlan_id,
2457                                __be16 vlan_id_mask, u8 ip_proto,
2458                                u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2459                                u32 group_id)
2460 {
2461         u32 priority;
2462         struct rocker_flow_tbl_entry *entry;
2463
2464         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2465         if (!entry)
2466                 return -ENOMEM;
2467
2468         priority = ROCKER_PRIORITY_ACL_NORMAL;
2469         if (eth_dst && eth_dst_mask) {
2470                 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2471                         priority = ROCKER_PRIORITY_ACL_DFLT;
2472                 else if (is_link_local_ether_addr(eth_dst))
2473                         priority = ROCKER_PRIORITY_ACL_CTRL;
2474         }
2475
2476         entry->key.priority = priority;
2477         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2478         entry->key.acl.in_pport = in_pport;
2479         entry->key.acl.in_pport_mask = in_pport_mask;
2480
2481         if (eth_src)
2482                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2483         if (eth_src_mask)
2484                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2485         if (eth_dst)
2486                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2487         if (eth_dst_mask)
2488                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2489
2490         entry->key.acl.eth_type = eth_type;
2491         entry->key.acl.vlan_id = vlan_id;
2492         entry->key.acl.vlan_id_mask = vlan_id_mask;
2493         entry->key.acl.ip_proto = ip_proto;
2494         entry->key.acl.ip_proto_mask = ip_proto_mask;
2495         entry->key.acl.ip_tos = ip_tos;
2496         entry->key.acl.ip_tos_mask = ip_tos_mask;
2497         entry->key.acl.group_id = group_id;
2498
2499         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2500 }
2501
2502 static struct rocker_group_tbl_entry *
2503 rocker_group_tbl_find(const struct rocker *rocker,
2504                       const struct rocker_group_tbl_entry *match)
2505 {
2506         struct rocker_group_tbl_entry *found;
2507
2508         hash_for_each_possible(rocker->group_tbl, found,
2509                                entry, match->group_id) {
2510                 if (found->group_id == match->group_id)
2511                         return found;
2512         }
2513
2514         return NULL;
2515 }
2516
2517 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2518                                         struct rocker_group_tbl_entry *entry)
2519 {
2520         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2521         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2522         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2523                 rocker_kfree(trans, entry->group_ids);
2524                 break;
2525         default:
2526                 break;
2527         }
2528         rocker_kfree(trans, entry);
2529 }
2530
2531 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2532                                 struct switchdev_trans *trans, int flags,
2533                                 struct rocker_group_tbl_entry *match)
2534 {
2535         struct rocker *rocker = rocker_port->rocker;
2536         struct rocker_group_tbl_entry *found;
2537         unsigned long lock_flags;
2538
2539         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2540
2541         found = rocker_group_tbl_find(rocker, match);
2542
2543         if (found) {
2544                 if (!switchdev_trans_ph_prepare(trans))
2545                         hash_del(&found->entry);
2546                 rocker_group_tbl_entry_free(trans, found);
2547                 found = match;
2548                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2549         } else {
2550                 found = match;
2551                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2552         }
2553
2554         if (!switchdev_trans_ph_prepare(trans))
2555                 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2556
2557         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2558
2559         return rocker_cmd_exec(rocker_port, trans, flags,
2560                                rocker_cmd_group_tbl_add, found, NULL, NULL);
2561 }
2562
2563 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2564                                 struct switchdev_trans *trans, int flags,
2565                                 struct rocker_group_tbl_entry *match)
2566 {
2567         struct rocker *rocker = rocker_port->rocker;
2568         struct rocker_group_tbl_entry *found;
2569         unsigned long lock_flags;
2570         int err = 0;
2571
2572         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2573
2574         found = rocker_group_tbl_find(rocker, match);
2575
2576         if (found) {
2577                 if (!switchdev_trans_ph_prepare(trans))
2578                         hash_del(&found->entry);
2579                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2580         }
2581
2582         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2583
2584         rocker_group_tbl_entry_free(trans, match);
2585
2586         if (found) {
2587                 err = rocker_cmd_exec(rocker_port, trans, flags,
2588                                       rocker_cmd_group_tbl_del,
2589                                       found, NULL, NULL);
2590                 rocker_group_tbl_entry_free(trans, found);
2591         }
2592
2593         return err;
2594 }
2595
2596 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2597                                struct switchdev_trans *trans, int flags,
2598                                struct rocker_group_tbl_entry *entry)
2599 {
2600         if (flags & ROCKER_OP_FLAG_REMOVE)
2601                 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2602         else
2603                 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2604 }
2605
2606 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2607                                      struct switchdev_trans *trans, int flags,
2608                                      __be16 vlan_id, u32 out_pport,
2609                                      int pop_vlan)
2610 {
2611         struct rocker_group_tbl_entry *entry;
2612
2613         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2614         if (!entry)
2615                 return -ENOMEM;
2616
2617         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2618         entry->l2_interface.pop_vlan = pop_vlan;
2619
2620         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2621 }
2622
2623 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2624                                    struct switchdev_trans *trans,
2625                                    int flags, u8 group_count,
2626                                    const u32 *group_ids, u32 group_id)
2627 {
2628         struct rocker_group_tbl_entry *entry;
2629
2630         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2631         if (!entry)
2632                 return -ENOMEM;
2633
2634         entry->group_id = group_id;
2635         entry->group_count = group_count;
2636
2637         entry->group_ids = rocker_kcalloc(trans, flags,
2638                                           group_count, sizeof(u32));
2639         if (!entry->group_ids) {
2640                 rocker_kfree(trans, entry);
2641                 return -ENOMEM;
2642         }
2643         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2644
2645         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2646 }
2647
2648 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2649                                  struct switchdev_trans *trans, int flags,
2650                                  __be16 vlan_id, u8 group_count,
2651                                  const u32 *group_ids, u32 group_id)
2652 {
2653         return rocker_group_l2_fan_out(rocker_port, trans, flags,
2654                                        group_count, group_ids,
2655                                        group_id);
2656 }
2657
2658 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2659                                    struct switchdev_trans *trans, int flags,
2660                                    u32 index, const u8 *src_mac, const u8 *dst_mac,
2661                                    __be16 vlan_id, bool ttl_check, u32 pport)
2662 {
2663         struct rocker_group_tbl_entry *entry;
2664
2665         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2666         if (!entry)
2667                 return -ENOMEM;
2668
2669         entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2670         if (src_mac)
2671                 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2672         if (dst_mac)
2673                 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2674         entry->l3_unicast.vlan_id = vlan_id;
2675         entry->l3_unicast.ttl_check = ttl_check;
2676         entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2677
2678         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2679 }
2680
2681 static struct rocker_neigh_tbl_entry *
2682 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2683 {
2684         struct rocker_neigh_tbl_entry *found;
2685
2686         hash_for_each_possible(rocker->neigh_tbl, found,
2687                                entry, be32_to_cpu(ip_addr))
2688                 if (found->ip_addr == ip_addr)
2689                         return found;
2690
2691         return NULL;
2692 }
2693
2694 static void _rocker_neigh_add(struct rocker *rocker,
2695                               struct switchdev_trans *trans,
2696                               struct rocker_neigh_tbl_entry *entry)
2697 {
2698         if (!switchdev_trans_ph_commit(trans))
2699                 entry->index = rocker->neigh_tbl_next_index++;
2700         if (switchdev_trans_ph_prepare(trans))
2701                 return;
2702         entry->ref_count++;
2703         hash_add(rocker->neigh_tbl, &entry->entry,
2704                  be32_to_cpu(entry->ip_addr));
2705 }
2706
2707 static void _rocker_neigh_del(struct switchdev_trans *trans,
2708                               struct rocker_neigh_tbl_entry *entry)
2709 {
2710         if (switchdev_trans_ph_prepare(trans))
2711                 return;
2712         if (--entry->ref_count == 0) {
2713                 hash_del(&entry->entry);
2714                 rocker_kfree(trans, entry);
2715         }
2716 }
2717
2718 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2719                                  struct switchdev_trans *trans,
2720                                  const u8 *eth_dst, bool ttl_check)
2721 {
2722         if (eth_dst) {
2723                 ether_addr_copy(entry->eth_dst, eth_dst);
2724                 entry->ttl_check = ttl_check;
2725         } else if (!switchdev_trans_ph_prepare(trans)) {
2726                 entry->ref_count++;
2727         }
2728 }
2729
2730 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2731                                   struct switchdev_trans *trans,
2732                                   int flags, __be32 ip_addr, const u8 *eth_dst)
2733 {
2734         struct rocker *rocker = rocker_port->rocker;
2735         struct rocker_neigh_tbl_entry *entry;
2736         struct rocker_neigh_tbl_entry *found;
2737         unsigned long lock_flags;
2738         __be16 eth_type = htons(ETH_P_IP);
2739         enum rocker_of_dpa_table_id goto_tbl =
2740                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2741         u32 group_id;
2742         u32 priority = 0;
2743         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2744         bool updating;
2745         bool removing;
2746         int err = 0;
2747
2748         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2749         if (!entry)
2750                 return -ENOMEM;
2751
2752         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2753
2754         found = rocker_neigh_tbl_find(rocker, ip_addr);
2755
2756         updating = found && adding;
2757         removing = found && !adding;
2758         adding = !found && adding;
2759
2760         if (adding) {
2761                 entry->ip_addr = ip_addr;
2762                 entry->dev = rocker_port->dev;
2763                 ether_addr_copy(entry->eth_dst, eth_dst);
2764                 entry->ttl_check = true;
2765                 _rocker_neigh_add(rocker, trans, entry);
2766         } else if (removing) {
2767                 memcpy(entry, found, sizeof(*entry));
2768                 _rocker_neigh_del(trans, found);
2769         } else if (updating) {
2770                 _rocker_neigh_update(found, trans, eth_dst, true);
2771                 memcpy(entry, found, sizeof(*entry));
2772         } else {
2773                 err = -ENOENT;
2774         }
2775
2776         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2777
2778         if (err)
2779                 goto err_out;
2780
2781         /* For each active neighbor, we have an L3 unicast group and
2782          * a /32 route to the neighbor, which uses the L3 unicast
2783          * group.  The L3 unicast group can also be referred to by
2784          * other routes' nexthops.
2785          */
2786
2787         err = rocker_group_l3_unicast(rocker_port, trans, flags,
2788                                       entry->index,
2789                                       rocker_port->dev->dev_addr,
2790                                       entry->eth_dst,
2791                                       rocker_port->internal_vlan_id,
2792                                       entry->ttl_check,
2793                                       rocker_port->pport);
2794         if (err) {
2795                 netdev_err(rocker_port->dev,
2796                            "Error (%d) L3 unicast group index %d\n",
2797                            err, entry->index);
2798                 goto err_out;
2799         }
2800
2801         if (adding || removing) {
2802                 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
2803                 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
2804                                                      eth_type, ip_addr,
2805                                                      inet_make_mask(32),
2806                                                      priority, goto_tbl,
2807                                                      group_id, flags);
2808
2809                 if (err)
2810                         netdev_err(rocker_port->dev,
2811                                    "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
2812                                    err, &entry->ip_addr, group_id);
2813         }
2814
2815 err_out:
2816         if (!adding)
2817                 rocker_kfree(trans, entry);
2818
2819         return err;
2820 }
2821
2822 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2823                                     struct switchdev_trans *trans,
2824                                     __be32 ip_addr)
2825 {
2826         struct net_device *dev = rocker_port->dev;
2827         struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
2828         int err = 0;
2829
2830         if (!n) {
2831                 n = neigh_create(&arp_tbl, &ip_addr, dev);
2832                 if (IS_ERR(n))
2833                         return IS_ERR(n);
2834         }
2835
2836         /* If the neigh is already resolved, then go ahead and
2837          * install the entry, otherwise start the ARP process to
2838          * resolve the neigh.
2839          */
2840
2841         if (n->nud_state & NUD_VALID)
2842                 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
2843                                              ip_addr, n->ha);
2844         else
2845                 neigh_event_send(n, NULL);
2846
2847         neigh_release(n);
2848         return err;
2849 }
2850
2851 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
2852                                struct switchdev_trans *trans, int flags,
2853                                __be32 ip_addr, u32 *index)
2854 {
2855         struct rocker *rocker = rocker_port->rocker;
2856         struct rocker_neigh_tbl_entry *entry;
2857         struct rocker_neigh_tbl_entry *found;
2858         unsigned long lock_flags;
2859         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2860         bool updating;
2861         bool removing;
2862         bool resolved = true;
2863         int err = 0;
2864
2865         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2866         if (!entry)
2867                 return -ENOMEM;
2868
2869         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2870
2871         found = rocker_neigh_tbl_find(rocker, ip_addr);
2872         if (found)
2873                 *index = found->index;
2874
2875         updating = found && adding;
2876         removing = found && !adding;
2877         adding = !found && adding;
2878
2879         if (adding) {
2880                 entry->ip_addr = ip_addr;
2881                 entry->dev = rocker_port->dev;
2882                 _rocker_neigh_add(rocker, trans, entry);
2883                 *index = entry->index;
2884                 resolved = false;
2885         } else if (removing) {
2886                 _rocker_neigh_del(trans, found);
2887         } else if (updating) {
2888                 _rocker_neigh_update(found, trans, NULL, false);
2889                 resolved = !is_zero_ether_addr(found->eth_dst);
2890         } else {
2891                 err = -ENOENT;
2892         }
2893
2894         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2895
2896         if (!adding)
2897                 rocker_kfree(trans, entry);
2898
2899         if (err)
2900                 return err;
2901
2902         /* Resolved means neigh ip_addr is resolved to neigh mac. */
2903
2904         if (!resolved)
2905                 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
2906
2907         return err;
2908 }
2909
2910 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2911                                         struct switchdev_trans *trans,
2912                                         int flags, __be16 vlan_id)
2913 {
2914         struct rocker_port *p;
2915         const struct rocker *rocker = rocker_port->rocker;
2916         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2917         u32 *group_ids;
2918         u8 group_count = 0;
2919         int err = 0;
2920         int i;
2921
2922         group_ids = rocker_kcalloc(trans, flags,
2923                                    rocker->port_count, sizeof(u32));
2924         if (!group_ids)
2925                 return -ENOMEM;
2926
2927         /* Adjust the flood group for this VLAN.  The flood group
2928          * references an L2 interface group for each port in this
2929          * VLAN.
2930          */
2931
2932         for (i = 0; i < rocker->port_count; i++) {
2933                 p = rocker->ports[i];
2934                 if (!p)
2935                         continue;
2936                 if (!rocker_port_is_bridged(p))
2937                         continue;
2938                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2939                         group_ids[group_count++] =
2940                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
2941                 }
2942         }
2943
2944         /* If there are no bridged ports in this VLAN, we're done */
2945         if (group_count == 0)
2946                 goto no_ports_in_vlan;
2947
2948         err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
2949                                     group_count, group_ids, group_id);
2950         if (err)
2951                 netdev_err(rocker_port->dev,
2952                            "Error (%d) port VLAN l2 flood group\n", err);
2953
2954 no_ports_in_vlan:
2955         rocker_kfree(trans, group_ids);
2956         return err;
2957 }
2958
2959 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2960                                       struct switchdev_trans *trans, int flags,
2961                                       __be16 vlan_id, bool pop_vlan)
2962 {
2963         const struct rocker *rocker = rocker_port->rocker;
2964         struct rocker_port *p;
2965         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2966         u32 out_pport;
2967         int ref = 0;
2968         int err;
2969         int i;
2970
2971         /* An L2 interface group for this port in this VLAN, but
2972          * only when port STP state is LEARNING|FORWARDING.
2973          */
2974
2975         if (rocker_port->stp_state == BR_STATE_LEARNING ||
2976             rocker_port->stp_state == BR_STATE_FORWARDING) {
2977                 out_pport = rocker_port->pport;
2978                 err = rocker_group_l2_interface(rocker_port, trans, flags,
2979                                                 vlan_id, out_pport, pop_vlan);
2980                 if (err) {
2981                         netdev_err(rocker_port->dev,
2982                                    "Error (%d) port VLAN l2 group for pport %d\n",
2983                                    err, out_pport);
2984                         return err;
2985                 }
2986         }
2987
2988         /* An L2 interface group for this VLAN to CPU port.
2989          * Add when first port joins this VLAN and destroy when
2990          * last port leaves this VLAN.
2991          */
2992
2993         for (i = 0; i < rocker->port_count; i++) {
2994                 p = rocker->ports[i];
2995                 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
2996                         ref++;
2997         }
2998
2999         if ((!adding || ref != 1) && (adding || ref != 0))
3000                 return 0;
3001
3002         out_pport = 0;
3003         err = rocker_group_l2_interface(rocker_port, trans, flags,
3004                                         vlan_id, out_pport, pop_vlan);
3005         if (err) {
3006                 netdev_err(rocker_port->dev,
3007                            "Error (%d) port VLAN l2 group for CPU port\n", err);
3008                 return err;
3009         }
3010
3011         return 0;
3012 }
3013
3014 static struct rocker_ctrl {
3015         const u8 *eth_dst;
3016         const u8 *eth_dst_mask;
3017         __be16 eth_type;
3018         bool acl;
3019         bool bridge;
3020         bool term;
3021         bool copy_to_cpu;
3022 } rocker_ctrls[] = {
3023         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3024                 /* pass link local multicast pkts up to CPU for filtering */
3025                 .eth_dst = ll_mac,
3026                 .eth_dst_mask = ll_mask,
3027                 .acl = true,
3028         },
3029         [ROCKER_CTRL_LOCAL_ARP] = {
3030                 /* pass local ARP pkts up to CPU */
3031                 .eth_dst = zero_mac,
3032                 .eth_dst_mask = zero_mac,
3033                 .eth_type = htons(ETH_P_ARP),
3034                 .acl = true,
3035         },
3036         [ROCKER_CTRL_IPV4_MCAST] = {
3037                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3038                 .eth_dst = ipv4_mcast,
3039                 .eth_dst_mask = ipv4_mask,
3040                 .eth_type = htons(ETH_P_IP),
3041                 .term  = true,
3042                 .copy_to_cpu = true,
3043         },
3044         [ROCKER_CTRL_IPV6_MCAST] = {
3045                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3046                 .eth_dst = ipv6_mcast,
3047                 .eth_dst_mask = ipv6_mask,
3048                 .eth_type = htons(ETH_P_IPV6),
3049                 .term  = true,
3050                 .copy_to_cpu = true,
3051         },
3052         [ROCKER_CTRL_DFLT_BRIDGING] = {
3053                 /* flood any pkts on vlan */
3054                 .bridge = true,
3055                 .copy_to_cpu = true,
3056         },
3057         [ROCKER_CTRL_DFLT_OVS] = {
3058                 /* pass all pkts up to CPU */
3059                 .eth_dst = zero_mac,
3060                 .eth_dst_mask = zero_mac,
3061                 .acl = true,
3062         },
3063 };
3064
3065 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3066                                      struct switchdev_trans *trans, int flags,
3067                                      const struct rocker_ctrl *ctrl, __be16 vlan_id)
3068 {
3069         u32 in_pport = rocker_port->pport;
3070         u32 in_pport_mask = 0xffffffff;
3071         u32 out_pport = 0;
3072         const u8 *eth_src = NULL;
3073         const u8 *eth_src_mask = NULL;
3074         __be16 vlan_id_mask = htons(0xffff);
3075         u8 ip_proto = 0;
3076         u8 ip_proto_mask = 0;
3077         u8 ip_tos = 0;
3078         u8 ip_tos_mask = 0;
3079         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3080         int err;
3081
3082         err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3083                                   in_pport, in_pport_mask,
3084                                   eth_src, eth_src_mask,
3085                                   ctrl->eth_dst, ctrl->eth_dst_mask,
3086                                   ctrl->eth_type,
3087                                   vlan_id, vlan_id_mask,
3088                                   ip_proto, ip_proto_mask,
3089                                   ip_tos, ip_tos_mask,
3090                                   group_id);
3091
3092         if (err)
3093                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3094
3095         return err;
3096 }
3097
3098 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3099                                         struct switchdev_trans *trans,
3100                                         int flags,
3101                                         const struct rocker_ctrl *ctrl,
3102                                         __be16 vlan_id)
3103 {
3104         enum rocker_of_dpa_table_id goto_tbl =
3105                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3106         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3107         u32 tunnel_id = 0;
3108         int err;
3109
3110         if (!rocker_port_is_bridged(rocker_port))
3111                 return 0;
3112
3113         err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3114                                      ctrl->eth_dst, ctrl->eth_dst_mask,
3115                                      vlan_id, tunnel_id,
3116                                      goto_tbl, group_id, ctrl->copy_to_cpu);
3117
3118         if (err)
3119                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3120
3121         return err;
3122 }
3123
3124 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3125                                       struct switchdev_trans *trans, int flags,
3126                                       const struct rocker_ctrl *ctrl, __be16 vlan_id)
3127 {
3128         u32 in_pport_mask = 0xffffffff;
3129         __be16 vlan_id_mask = htons(0xffff);
3130         int err;
3131
3132         if (ntohs(vlan_id) == 0)
3133                 vlan_id = rocker_port->internal_vlan_id;
3134
3135         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3136                                        rocker_port->pport, in_pport_mask,
3137                                        ctrl->eth_type, ctrl->eth_dst,
3138                                        ctrl->eth_dst_mask, vlan_id,
3139                                        vlan_id_mask, ctrl->copy_to_cpu,
3140                                        flags);
3141
3142         if (err)
3143                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3144
3145         return err;
3146 }
3147
3148 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3149                                  struct switchdev_trans *trans, int flags,
3150                                  const struct rocker_ctrl *ctrl, __be16 vlan_id)
3151 {
3152         if (ctrl->acl)
3153                 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3154                                                  ctrl, vlan_id);
3155         if (ctrl->bridge)
3156                 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3157                                                     ctrl, vlan_id);
3158
3159         if (ctrl->term)
3160                 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3161                                                   ctrl, vlan_id);
3162
3163         return -EOPNOTSUPP;
3164 }
3165
3166 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3167                                      struct switchdev_trans *trans, int flags,
3168                                      __be16 vlan_id)
3169 {
3170         int err = 0;
3171         int i;
3172
3173         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3174                 if (rocker_port->ctrls[i]) {
3175                         err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3176                                                     &rocker_ctrls[i], vlan_id);
3177                         if (err)
3178                                 return err;
3179                 }
3180         }
3181
3182         return err;
3183 }
3184
3185 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3186                             struct switchdev_trans *trans, int flags,
3187                             const struct rocker_ctrl *ctrl)
3188 {
3189         u16 vid;
3190         int err = 0;
3191
3192         for (vid = 1; vid < VLAN_N_VID; vid++) {
3193                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3194                         continue;
3195                 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3196                                             ctrl, htons(vid));
3197                 if (err)
3198                         break;
3199         }
3200
3201         return err;
3202 }
3203
3204 static int rocker_port_vlan(struct rocker_port *rocker_port,
3205                             struct switchdev_trans *trans, int flags, u16 vid)
3206 {
3207         enum rocker_of_dpa_table_id goto_tbl =
3208                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3209         u32 in_pport = rocker_port->pport;
3210         __be16 vlan_id = htons(vid);
3211         __be16 vlan_id_mask = htons(0xffff);
3212         __be16 internal_vlan_id;
3213         bool untagged;
3214         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3215         int err;
3216
3217         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3218
3219         if (adding && test_bit(ntohs(internal_vlan_id),
3220                                rocker_port->vlan_bitmap))
3221                 return 0; /* already added */
3222         else if (!adding && !test_bit(ntohs(internal_vlan_id),
3223                                       rocker_port->vlan_bitmap))
3224                 return 0; /* already removed */
3225
3226         change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3227
3228         if (adding) {
3229                 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3230                                                 internal_vlan_id);
3231                 if (err) {
3232                         netdev_err(rocker_port->dev,
3233                                    "Error (%d) port ctrl vlan add\n", err);
3234                         goto err_out;
3235                 }
3236         }
3237
3238         err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3239                                          internal_vlan_id, untagged);
3240         if (err) {
3241                 netdev_err(rocker_port->dev,
3242                            "Error (%d) port VLAN l2 groups\n", err);
3243                 goto err_out;
3244         }
3245
3246         err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3247                                            internal_vlan_id);
3248         if (err) {
3249                 netdev_err(rocker_port->dev,
3250                            "Error (%d) port VLAN l2 flood group\n", err);
3251                 goto err_out;
3252         }
3253
3254         err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3255                                    in_pport, vlan_id, vlan_id_mask,
3256                                    goto_tbl, untagged, internal_vlan_id);
3257         if (err)
3258                 netdev_err(rocker_port->dev,
3259                            "Error (%d) port VLAN table\n", err);
3260
3261 err_out:
3262         if (switchdev_trans_ph_prepare(trans))
3263                 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3264
3265         return err;
3266 }
3267
3268 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3269                               struct switchdev_trans *trans, int flags)
3270 {
3271         enum rocker_of_dpa_table_id goto_tbl;
3272         u32 in_pport;
3273         u32 in_pport_mask;
3274         int err;
3275
3276         /* Normal Ethernet Frames.  Matches pkts from any local physical
3277          * ports.  Goto VLAN tbl.
3278          */
3279
3280         in_pport = 0;
3281         in_pport_mask = 0xffff0000;
3282         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3283
3284         err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3285                                       in_pport, in_pport_mask,
3286                                       goto_tbl);
3287         if (err)
3288                 netdev_err(rocker_port->dev,
3289                            "Error (%d) ingress port table entry\n", err);
3290
3291         return err;
3292 }
3293
3294 struct rocker_fdb_learn_work {
3295         struct work_struct work;
3296         struct rocker_port *rocker_port;
3297         struct switchdev_trans *trans;
3298         int flags;
3299         u8 addr[ETH_ALEN];
3300         u16 vid;
3301 };
3302
3303 static void rocker_port_fdb_learn_work(struct work_struct *work)
3304 {
3305         const struct rocker_fdb_learn_work *lw =
3306                 container_of(work, struct rocker_fdb_learn_work, work);
3307         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3308         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3309         struct switchdev_notifier_fdb_info info;
3310
3311         info.addr = lw->addr;
3312         info.vid = lw->vid;
3313
3314         rtnl_lock();
3315         if (learned && removing)
3316                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3317                                          lw->rocker_port->dev, &info.info);
3318         else if (learned && !removing)
3319                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3320                                          lw->rocker_port->dev, &info.info);
3321         rtnl_unlock();
3322
3323         rocker_kfree(lw->trans, work);
3324 }
3325
3326 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3327                                  struct switchdev_trans *trans, int flags,
3328                                  const u8 *addr, __be16 vlan_id)
3329 {
3330         struct rocker_fdb_learn_work *lw;
3331         enum rocker_of_dpa_table_id goto_tbl =
3332                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3333         u32 out_pport = rocker_port->pport;
3334         u32 tunnel_id = 0;
3335         u32 group_id = ROCKER_GROUP_NONE;
3336         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3337         bool copy_to_cpu = false;
3338         int err;
3339
3340         if (rocker_port_is_bridged(rocker_port))
3341                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3342
3343         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3344                 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3345                                              NULL, vlan_id, tunnel_id, goto_tbl,
3346                                              group_id, copy_to_cpu);
3347                 if (err)
3348                         return err;
3349         }
3350
3351         if (!syncing)
3352                 return 0;
3353
3354         if (!rocker_port_is_bridged(rocker_port))
3355                 return 0;
3356
3357         lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3358         if (!lw)
3359                 return -ENOMEM;
3360
3361         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3362
3363         lw->rocker_port = rocker_port;
3364         lw->trans = trans;
3365         lw->flags = flags;
3366         ether_addr_copy(lw->addr, addr);
3367         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3368
3369         if (switchdev_trans_ph_prepare(trans))
3370                 rocker_kfree(trans, lw);
3371         else
3372                 schedule_work(&lw->work);
3373
3374         return 0;
3375 }
3376
3377 static struct rocker_fdb_tbl_entry *
3378 rocker_fdb_tbl_find(const struct rocker *rocker,
3379                     const struct rocker_fdb_tbl_entry *match)
3380 {
3381         struct rocker_fdb_tbl_entry *found;
3382
3383         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3384                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3385                         return found;
3386
3387         return NULL;
3388 }
3389
3390 static int rocker_port_fdb(struct rocker_port *rocker_port,
3391                            struct switchdev_trans *trans,
3392                            const unsigned char *addr,
3393                            __be16 vlan_id, int flags)
3394 {
3395         struct rocker *rocker = rocker_port->rocker;
3396         struct rocker_fdb_tbl_entry *fdb;
3397         struct rocker_fdb_tbl_entry *found;
3398         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3399         unsigned long lock_flags;
3400
3401         fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3402         if (!fdb)
3403                 return -ENOMEM;
3404
3405         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3406         fdb->touched = jiffies;
3407         fdb->key.rocker_port = rocker_port;
3408         ether_addr_copy(fdb->key.addr, addr);
3409         fdb->key.vlan_id = vlan_id;
3410         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3411
3412         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3413
3414         found = rocker_fdb_tbl_find(rocker, fdb);
3415
3416         if (found) {
3417                 found->touched = jiffies;
3418                 if (removing) {
3419                         rocker_kfree(trans, fdb);
3420                         if (!switchdev_trans_ph_prepare(trans))
3421                                 hash_del(&found->entry);
3422                 }
3423         } else if (!removing) {
3424                 if (!switchdev_trans_ph_prepare(trans))
3425                         hash_add(rocker->fdb_tbl, &fdb->entry,
3426                                  fdb->key_crc32);
3427         }
3428
3429         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3430
3431         /* Check if adding and already exists, or removing and can't find */
3432         if (!found != !removing) {
3433                 rocker_kfree(trans, fdb);
3434                 if (!found && removing)
3435                         return 0;
3436                 /* Refreshing existing to update aging timers */
3437                 flags |= ROCKER_OP_FLAG_REFRESH;
3438         }
3439
3440         return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3441 }
3442
3443 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3444                                  struct switchdev_trans *trans, int flags)
3445 {
3446         struct rocker *rocker = rocker_port->rocker;
3447         struct rocker_fdb_tbl_entry *found;
3448         unsigned long lock_flags;
3449         struct hlist_node *tmp;
3450         int bkt;
3451         int err = 0;
3452
3453         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3454             rocker_port->stp_state == BR_STATE_FORWARDING)
3455                 return 0;
3456
3457         flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3458
3459         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3460
3461         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3462                 if (found->key.rocker_port != rocker_port)
3463                         continue;
3464                 if (!found->learned)
3465                         continue;
3466                 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3467                                             found->key.addr,
3468                                             found->key.vlan_id);
3469                 if (err)
3470                         goto err_out;
3471                 if (!switchdev_trans_ph_prepare(trans))
3472                         hash_del(&found->entry);
3473         }
3474
3475 err_out:
3476         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3477
3478         return err;
3479 }
3480
3481 static void rocker_fdb_cleanup(unsigned long data)
3482 {
3483         struct rocker *rocker = (struct rocker *)data;
3484         struct rocker_port *rocker_port;
3485         struct rocker_fdb_tbl_entry *entry;
3486         struct hlist_node *tmp;
3487         unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3488         unsigned long expires;
3489         unsigned long lock_flags;
3490         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3491                     ROCKER_OP_FLAG_LEARNED;
3492         int bkt;
3493
3494         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3495
3496         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3497                 if (!entry->learned)
3498                         continue;
3499                 rocker_port = entry->key.rocker_port;
3500                 expires = entry->touched + rocker_port->ageing_time;
3501                 if (time_before_eq(expires, jiffies)) {
3502                         rocker_port_fdb_learn(rocker_port, NULL,
3503                                               flags, entry->key.addr,
3504                                               entry->key.vlan_id);
3505                         hash_del(&entry->entry);
3506                 } else if (time_before(expires, next_timer)) {
3507                         next_timer = expires;
3508                 }
3509         }
3510
3511         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3512
3513         mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3514 }
3515
3516 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3517                                   struct switchdev_trans *trans, int flags,
3518                                   __be16 vlan_id)
3519 {
3520         u32 in_pport_mask = 0xffffffff;
3521         __be16 eth_type;
3522         const u8 *dst_mac_mask = ff_mac;
3523         __be16 vlan_id_mask = htons(0xffff);
3524         bool copy_to_cpu = false;
3525         int err;
3526
3527         if (ntohs(vlan_id) == 0)
3528                 vlan_id = rocker_port->internal_vlan_id;
3529
3530         eth_type = htons(ETH_P_IP);
3531         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3532                                        rocker_port->pport, in_pport_mask,
3533                                        eth_type, rocker_port->dev->dev_addr,
3534                                        dst_mac_mask, vlan_id, vlan_id_mask,
3535                                        copy_to_cpu, flags);
3536         if (err)
3537                 return err;
3538
3539         eth_type = htons(ETH_P_IPV6);
3540         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3541                                        rocker_port->pport, in_pport_mask,
3542                                        eth_type, rocker_port->dev->dev_addr,
3543                                        dst_mac_mask, vlan_id, vlan_id_mask,
3544                                        copy_to_cpu, flags);
3545
3546         return err;
3547 }
3548
3549 static int rocker_port_fwding(struct rocker_port *rocker_port,
3550                               struct switchdev_trans *trans, int flags)
3551 {
3552         bool pop_vlan;
3553         u32 out_pport;
3554         __be16 vlan_id;
3555         u16 vid;
3556         int err;
3557
3558         /* Port will be forwarding-enabled if its STP state is LEARNING
3559          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3560          * port STP state.  Use L2 interface group on port VLANs as a way
3561          * to toggle port forwarding: if forwarding is disabled, L2
3562          * interface group will not exist.
3563          */
3564
3565         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3566             rocker_port->stp_state != BR_STATE_FORWARDING)
3567                 flags |= ROCKER_OP_FLAG_REMOVE;
3568
3569         out_pport = rocker_port->pport;
3570         for (vid = 1; vid < VLAN_N_VID; vid++) {
3571                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3572                         continue;
3573                 vlan_id = htons(vid);
3574                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3575                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3576                                                 vlan_id, out_pport, pop_vlan);
3577                 if (err) {
3578                         netdev_err(rocker_port->dev,
3579                                    "Error (%d) port VLAN l2 group for pport %d\n",
3580                                    err, out_pport);
3581                         return err;
3582                 }
3583         }
3584
3585         return 0;
3586 }
3587
3588 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3589                                   struct switchdev_trans *trans, int flags,
3590                                   u8 state)
3591 {
3592         bool want[ROCKER_CTRL_MAX] = { 0, };
3593         bool prev_ctrls[ROCKER_CTRL_MAX];
3594         u8 uninitialized_var(prev_state);
3595         int err;
3596         int i;
3597
3598         if (switchdev_trans_ph_prepare(trans)) {
3599                 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3600                 prev_state = rocker_port->stp_state;
3601         }
3602
3603         if (rocker_port->stp_state == state)
3604                 return 0;
3605
3606         rocker_port->stp_state = state;
3607
3608         switch (state) {
3609         case BR_STATE_DISABLED:
3610                 /* port is completely disabled */
3611                 break;
3612         case BR_STATE_LISTENING:
3613         case BR_STATE_BLOCKING:
3614                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3615                 break;
3616         case BR_STATE_LEARNING:
3617         case BR_STATE_FORWARDING:
3618                 if (!rocker_port_is_ovsed(rocker_port))
3619                         want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3620                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3621                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3622                 if (rocker_port_is_bridged(rocker_port))
3623                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3624                 else if (rocker_port_is_ovsed(rocker_port))
3625                         want[ROCKER_CTRL_DFLT_OVS] = true;
3626                 else
3627                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3628                 break;
3629         }
3630
3631         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3632                 if (want[i] != rocker_port->ctrls[i]) {
3633                         int ctrl_flags = flags |
3634                                          (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3635                         err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3636                                                &rocker_ctrls[i]);
3637                         if (err)
3638                                 goto err_out;
3639                         rocker_port->ctrls[i] = want[i];
3640                 }
3641         }
3642
3643         err = rocker_port_fdb_flush(rocker_port, trans, flags);
3644         if (err)
3645                 goto err_out;
3646
3647         err = rocker_port_fwding(rocker_port, trans, flags);
3648
3649 err_out:
3650         if (switchdev_trans_ph_prepare(trans)) {
3651                 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3652                 rocker_port->stp_state = prev_state;
3653         }
3654
3655         return err;
3656 }
3657
3658 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3659                                   struct switchdev_trans *trans, int flags)
3660 {
3661         if (rocker_port_is_bridged(rocker_port))
3662                 /* bridge STP will enable port */
3663                 return 0;
3664
3665         /* port is not bridged, so simulate going to FORWARDING state */
3666         return rocker_port_stp_update(rocker_port, trans, flags,
3667                                       BR_STATE_FORWARDING);
3668 }
3669
3670 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3671                                    struct switchdev_trans *trans, int flags)
3672 {
3673         if (rocker_port_is_bridged(rocker_port))
3674                 /* bridge STP will disable port */
3675                 return 0;
3676
3677         /* port is not bridged, so simulate going to DISABLED state */
3678         return rocker_port_stp_update(rocker_port, trans, flags,
3679                                       BR_STATE_DISABLED);
3680 }
3681
3682 static struct rocker_internal_vlan_tbl_entry *
3683 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3684 {
3685         struct rocker_internal_vlan_tbl_entry *found;
3686
3687         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3688                                entry, ifindex) {
3689                 if (found->ifindex == ifindex)
3690                         return found;
3691         }
3692
3693         return NULL;
3694 }
3695
3696 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3697                                                int ifindex)
3698 {
3699         struct rocker *rocker = rocker_port->rocker;
3700         struct rocker_internal_vlan_tbl_entry *entry;
3701         struct rocker_internal_vlan_tbl_entry *found;
3702         unsigned long lock_flags;
3703         int i;
3704
3705         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3706         if (!entry)
3707                 return 0;
3708
3709         entry->ifindex = ifindex;
3710
3711         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3712
3713         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3714         if (found) {
3715                 kfree(entry);
3716                 goto found;
3717         }
3718
3719         found = entry;
3720         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3721
3722         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3723                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3724                         continue;
3725                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3726                 goto found;
3727         }
3728
3729         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3730
3731 found:
3732         found->ref_count++;
3733         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3734
3735         return found->vlan_id;
3736 }
3737
3738 static void
3739 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3740                                  int ifindex)
3741 {
3742         struct rocker *rocker = rocker_port->rocker;
3743         struct rocker_internal_vlan_tbl_entry *found;
3744         unsigned long lock_flags;
3745         unsigned long bit;
3746
3747         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3748
3749         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3750         if (!found) {
3751                 netdev_err(rocker_port->dev,
3752                            "ifindex (%d) not found in internal VLAN tbl\n",
3753                            ifindex);
3754                 goto not_found;
3755         }
3756
3757         if (--found->ref_count <= 0) {
3758                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3759                 clear_bit(bit, rocker->internal_vlan_bitmap);
3760                 hash_del(&found->entry);
3761                 kfree(found);
3762         }
3763
3764 not_found:
3765         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3766 }
3767
3768 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3769                                 struct switchdev_trans *trans, __be32 dst,
3770                                 int dst_len, const struct fib_info *fi,
3771                                 u32 tb_id, int flags)
3772 {
3773         const struct fib_nh *nh;
3774         __be16 eth_type = htons(ETH_P_IP);
3775         __be32 dst_mask = inet_make_mask(dst_len);
3776         __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3777         u32 priority = fi->fib_priority;
3778         enum rocker_of_dpa_table_id goto_tbl =
3779                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3780         u32 group_id;
3781         bool nh_on_port;
3782         bool has_gw;
3783         u32 index;
3784         int err;
3785
3786         /* XXX support ECMP */
3787
3788         nh = fi->fib_nh;
3789         nh_on_port = (fi->fib_dev == rocker_port->dev);
3790         has_gw = !!nh->nh_gw;
3791
3792         if (has_gw && nh_on_port) {
3793                 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
3794                                           nh->nh_gw, &index);
3795                 if (err)
3796                         return err;
3797
3798                 group_id = ROCKER_GROUP_L3_UNICAST(index);
3799         } else {
3800                 /* Send to CPU for processing */
3801                 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3802         }
3803
3804         err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
3805                                              dst_mask, priority, goto_tbl,
3806                                              group_id, flags);
3807         if (err)
3808                 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3809                            err, &dst);
3810
3811         return err;
3812 }
3813
3814 /*****************
3815  * Net device ops
3816  *****************/
3817
3818 static int rocker_port_open(struct net_device *dev)
3819 {
3820         struct rocker_port *rocker_port = netdev_priv(dev);
3821         int err;
3822
3823         err = rocker_port_dma_rings_init(rocker_port);
3824         if (err)
3825                 return err;
3826
3827         err = request_irq(rocker_msix_tx_vector(rocker_port),
3828                           rocker_tx_irq_handler, 0,
3829                           rocker_driver_name, rocker_port);
3830         if (err) {
3831                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3832                 goto err_request_tx_irq;
3833         }
3834
3835         err = request_irq(rocker_msix_rx_vector(rocker_port),
3836                           rocker_rx_irq_handler, 0,
3837                           rocker_driver_name, rocker_port);
3838         if (err) {
3839                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3840                 goto err_request_rx_irq;
3841         }
3842
3843         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
3844         if (err)
3845                 goto err_fwd_enable;
3846
3847         napi_enable(&rocker_port->napi_tx);
3848         napi_enable(&rocker_port->napi_rx);
3849         if (!dev->proto_down)
3850                 rocker_port_set_enable(rocker_port, true);
3851         netif_start_queue(dev);
3852         return 0;
3853
3854 err_fwd_enable:
3855         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3856 err_request_rx_irq:
3857         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3858 err_request_tx_irq:
3859         rocker_port_dma_rings_fini(rocker_port);
3860         return err;
3861 }
3862
3863 static int rocker_port_stop(struct net_device *dev)
3864 {
3865         struct rocker_port *rocker_port = netdev_priv(dev);
3866
3867         netif_stop_queue(dev);
3868         rocker_port_set_enable(rocker_port, false);
3869         napi_disable(&rocker_port->napi_rx);
3870         napi_disable(&rocker_port->napi_tx);
3871         rocker_port_fwd_disable(rocker_port, NULL,
3872                                 ROCKER_OP_FLAG_NOWAIT);
3873         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3874         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3875         rocker_port_dma_rings_fini(rocker_port);
3876
3877         return 0;
3878 }
3879
3880 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
3881                                        const struct rocker_desc_info *desc_info)
3882 {
3883         const struct rocker *rocker = rocker_port->rocker;
3884         struct pci_dev *pdev = rocker->pdev;
3885         const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3886         struct rocker_tlv *attr;
3887         int rem;
3888
3889         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3890         if (!attrs[ROCKER_TLV_TX_FRAGS])
3891                 return;
3892         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3893                 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3894                 dma_addr_t dma_handle;
3895                 size_t len;
3896
3897                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3898                         continue;
3899                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3900                                         attr);
3901                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3902                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3903                         continue;
3904                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3905                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3906                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3907         }
3908 }
3909
3910 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
3911                                        struct rocker_desc_info *desc_info,
3912                                        char *buf, size_t buf_len)
3913 {
3914         const struct rocker *rocker = rocker_port->rocker;
3915         struct pci_dev *pdev = rocker->pdev;
3916         dma_addr_t dma_handle;
3917         struct rocker_tlv *frag;
3918
3919         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3920         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3921                 if (net_ratelimit())
3922                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3923                 return -EIO;
3924         }
3925         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3926         if (!frag)
3927                 goto unmap_frag;
3928         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3929                                dma_handle))
3930                 goto nest_cancel;
3931         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3932                                buf_len))
3933                 goto nest_cancel;
3934         rocker_tlv_nest_end(desc_info, frag);
3935         return 0;
3936
3937 nest_cancel:
3938         rocker_tlv_nest_cancel(desc_info, frag);
3939 unmap_frag:
3940         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3941         return -EMSGSIZE;
3942 }
3943
3944 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3945 {
3946         struct rocker_port *rocker_port = netdev_priv(dev);
3947         struct rocker *rocker = rocker_port->rocker;
3948         struct rocker_desc_info *desc_info;
3949         struct rocker_tlv *frags;
3950         int i;
3951         int err;
3952
3953         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3954         if (unlikely(!desc_info)) {
3955                 if (net_ratelimit())
3956                         netdev_err(dev, "tx ring full when queue awake\n");
3957                 return NETDEV_TX_BUSY;
3958         }
3959
3960         rocker_desc_cookie_ptr_set(desc_info, skb);
3961
3962         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3963         if (!frags)
3964                 goto out;
3965         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3966                                           skb->data, skb_headlen(skb));
3967         if (err)
3968                 goto nest_cancel;
3969         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
3970                 err = skb_linearize(skb);
3971                 if (err)
3972                         goto unmap_frags;
3973         }
3974
3975         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3976                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3977
3978                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3979                                                   skb_frag_address(frag),
3980                                                   skb_frag_size(frag));
3981                 if (err)
3982                         goto unmap_frags;
3983         }
3984         rocker_tlv_nest_end(desc_info, frags);
3985
3986         rocker_desc_gen_clear(desc_info);
3987         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
3988
3989         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3990         if (!desc_info)
3991                 netif_stop_queue(dev);
3992
3993         return NETDEV_TX_OK;
3994
3995 unmap_frags:
3996         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3997 nest_cancel:
3998         rocker_tlv_nest_cancel(desc_info, frags);
3999 out:
4000         dev_kfree_skb(skb);
4001         dev->stats.tx_dropped++;
4002
4003         return NETDEV_TX_OK;
4004 }
4005
4006 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4007 {
4008         struct sockaddr *addr = p;
4009         struct rocker_port *rocker_port = netdev_priv(dev);
4010         int err;
4011
4012         if (!is_valid_ether_addr(addr->sa_data))
4013                 return -EADDRNOTAVAIL;
4014
4015         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4016         if (err)
4017                 return err;
4018         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4019         return 0;
4020 }
4021
4022 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4023 {
4024         struct rocker_port *rocker_port = netdev_priv(dev);
4025         int running = netif_running(dev);
4026         int err;
4027
4028 #define ROCKER_PORT_MIN_MTU     68
4029 #define ROCKER_PORT_MAX_MTU     9000
4030
4031         if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4032                 return -EINVAL;
4033
4034         if (running)
4035                 rocker_port_stop(dev);
4036
4037         netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4038         dev->mtu = new_mtu;
4039
4040         err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4041         if (err)
4042                 return err;
4043
4044         if (running)
4045                 err = rocker_port_open(dev);
4046
4047         return err;
4048 }
4049
4050 static int rocker_port_get_phys_port_name(struct net_device *dev,
4051                                           char *buf, size_t len)
4052 {
4053         struct rocker_port *rocker_port = netdev_priv(dev);
4054         struct port_name name = { .buf = buf, .len = len };
4055         int err;
4056
4057         err = rocker_cmd_exec(rocker_port, NULL, 0,
4058                               rocker_cmd_get_port_settings_prep, NULL,
4059                               rocker_cmd_get_port_settings_phys_name_proc,
4060                               &name);
4061
4062         return err ? -EOPNOTSUPP : 0;
4063 }
4064
4065 static int rocker_port_change_proto_down(struct net_device *dev,
4066                                          bool proto_down)
4067 {
4068         struct rocker_port *rocker_port = netdev_priv(dev);
4069
4070         if (rocker_port->dev->flags & IFF_UP)
4071                 rocker_port_set_enable(rocker_port, !proto_down);
4072         rocker_port->dev->proto_down = proto_down;
4073         return 0;
4074 }
4075
4076 static void rocker_port_neigh_destroy(struct neighbour *n)
4077 {
4078         struct rocker_port *rocker_port = netdev_priv(n->dev);
4079         int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4080         __be32 ip_addr = *(__be32 *)n->primary_key;
4081
4082         rocker_port_ipv4_neigh(rocker_port, NULL,
4083                                flags, ip_addr, n->ha);
4084 }
4085
4086 static const struct net_device_ops rocker_port_netdev_ops = {
4087         .ndo_open                       = rocker_port_open,
4088         .ndo_stop                       = rocker_port_stop,
4089         .ndo_start_xmit                 = rocker_port_xmit,
4090         .ndo_set_mac_address            = rocker_port_set_mac_address,
4091         .ndo_change_mtu                 = rocker_port_change_mtu,
4092         .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
4093         .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
4094         .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
4095         .ndo_fdb_add                    = switchdev_port_fdb_add,
4096         .ndo_fdb_del                    = switchdev_port_fdb_del,
4097         .ndo_fdb_dump                   = switchdev_port_fdb_dump,
4098         .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
4099         .ndo_change_proto_down          = rocker_port_change_proto_down,
4100         .ndo_neigh_destroy              = rocker_port_neigh_destroy,
4101 };
4102
4103 /********************
4104  * swdev interface
4105  ********************/
4106
4107 static int rocker_port_attr_get(struct net_device *dev,
4108                                 struct switchdev_attr *attr)
4109 {
4110         const struct rocker_port *rocker_port = netdev_priv(dev);
4111         const struct rocker *rocker = rocker_port->rocker;
4112
4113         switch (attr->id) {
4114         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4115                 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4116                 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4117                 break;
4118         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4119                 attr->u.brport_flags = rocker_port->brport_flags;
4120                 break;
4121         default:
4122                 return -EOPNOTSUPP;
4123         }
4124
4125         return 0;
4126 }
4127
4128 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4129                                         struct switchdev_trans *trans,
4130                                         unsigned long brport_flags)
4131 {
4132         unsigned long orig_flags;
4133         int err = 0;
4134
4135         orig_flags = rocker_port->brport_flags;
4136         rocker_port->brport_flags = brport_flags;
4137         if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4138                 err = rocker_port_set_learning(rocker_port, trans);
4139
4140         if (switchdev_trans_ph_prepare(trans))
4141                 rocker_port->brport_flags = orig_flags;
4142
4143         return err;
4144 }
4145
4146 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4147                                           struct switchdev_trans *trans,
4148                                           u32 ageing_time)
4149 {
4150         if (!switchdev_trans_ph_prepare(trans)) {
4151                 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4152                 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4153         }
4154
4155         return 0;
4156 }
4157
4158 static int rocker_port_attr_set(struct net_device *dev,
4159                                 const struct switchdev_attr *attr,
4160                                 struct switchdev_trans *trans)
4161 {
4162         struct rocker_port *rocker_port = netdev_priv(dev);
4163         int err = 0;
4164
4165         switch (attr->id) {
4166         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4167                 err = rocker_port_stp_update(rocker_port, trans, 0,
4168                                              attr->u.stp_state);
4169                 break;
4170         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4171                 err = rocker_port_brport_flags_set(rocker_port, trans,
4172                                                    attr->u.brport_flags);
4173                 break;
4174         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4175                 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4176                                                      attr->u.ageing_time);
4177                 break;
4178         default:
4179                 err = -EOPNOTSUPP;
4180                 break;
4181         }
4182
4183         return err;
4184 }
4185
4186 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4187                                 struct switchdev_trans *trans,
4188                                 u16 vid, u16 flags)
4189 {
4190         int err;
4191
4192         /* XXX deal with flags for PVID and untagged */
4193
4194         err = rocker_port_vlan(rocker_port, trans, 0, vid);
4195         if (err)
4196                 return err;
4197
4198         err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4199         if (err)
4200                 rocker_port_vlan(rocker_port, trans,
4201                                  ROCKER_OP_FLAG_REMOVE, vid);
4202
4203         return err;
4204 }
4205
4206 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4207                                  struct switchdev_trans *trans,
4208                                  const struct switchdev_obj_port_vlan *vlan)
4209 {
4210         u16 vid;
4211         int err;
4212
4213         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4214                 err = rocker_port_vlan_add(rocker_port, trans,
4215                                            vid, vlan->flags);
4216                 if (err)
4217                         return err;
4218         }
4219
4220         return 0;
4221 }
4222
4223 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4224                                struct switchdev_trans *trans,
4225                                const struct switchdev_obj_port_fdb *fdb)
4226 {
4227         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4228         int flags = 0;
4229
4230         if (!rocker_port_is_bridged(rocker_port))
4231                 return -EINVAL;
4232
4233         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4234 }
4235
4236 static int rocker_port_obj_add(struct net_device *dev,
4237                                const struct switchdev_obj *obj,
4238                                struct switchdev_trans *trans)
4239 {
4240         struct rocker_port *rocker_port = netdev_priv(dev);
4241         const struct switchdev_obj_ipv4_fib *fib4;
4242         int err = 0;
4243
4244         switch (obj->id) {
4245         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4246                 err = rocker_port_vlans_add(rocker_port, trans,
4247                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4248                 break;
4249         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4250                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4251                 err = rocker_port_fib_ipv4(rocker_port, trans,
4252                                            htonl(fib4->dst), fib4->dst_len,
4253                                            &fib4->fi, fib4->tb_id, 0);
4254                 break;
4255         case SWITCHDEV_OBJ_ID_PORT_FDB:
4256                 err = rocker_port_fdb_add(rocker_port, trans,
4257                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4258                 break;
4259         default:
4260                 err = -EOPNOTSUPP;
4261                 break;
4262         }
4263
4264         return err;
4265 }
4266
4267 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4268                                 u16 vid, u16 flags)
4269 {
4270         int err;
4271
4272         err = rocker_port_router_mac(rocker_port, NULL,
4273                                      ROCKER_OP_FLAG_REMOVE, htons(vid));
4274         if (err)
4275                 return err;
4276
4277         return rocker_port_vlan(rocker_port, NULL,
4278                                 ROCKER_OP_FLAG_REMOVE, vid);
4279 }
4280
4281 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4282                                  const struct switchdev_obj_port_vlan *vlan)
4283 {
4284         u16 vid;
4285         int err;
4286
4287         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4288                 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4289                 if (err)
4290                         return err;
4291         }
4292
4293         return 0;
4294 }
4295
4296 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4297                                struct switchdev_trans *trans,
4298                                const struct switchdev_obj_port_fdb *fdb)
4299 {
4300         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4301         int flags = ROCKER_OP_FLAG_REMOVE;
4302
4303         if (!rocker_port_is_bridged(rocker_port))
4304                 return -EINVAL;
4305
4306         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4307 }
4308
4309 static int rocker_port_obj_del(struct net_device *dev,
4310                                const struct switchdev_obj *obj)
4311 {
4312         struct rocker_port *rocker_port = netdev_priv(dev);
4313         const struct switchdev_obj_ipv4_fib *fib4;
4314         int err = 0;
4315
4316         switch (obj->id) {
4317         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4318                 err = rocker_port_vlans_del(rocker_port,
4319                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4320                 break;
4321         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4322                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4323                 err = rocker_port_fib_ipv4(rocker_port, NULL,
4324                                            htonl(fib4->dst), fib4->dst_len,
4325                                            &fib4->fi, fib4->tb_id,
4326                                            ROCKER_OP_FLAG_REMOVE);
4327                 break;
4328         case SWITCHDEV_OBJ_ID_PORT_FDB:
4329                 err = rocker_port_fdb_del(rocker_port, NULL,
4330                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4331                 break;
4332         default:
4333                 err = -EOPNOTSUPP;
4334                 break;
4335         }
4336
4337         return err;
4338 }
4339
4340 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4341                                 struct switchdev_obj_port_fdb *fdb,
4342                                 switchdev_obj_dump_cb_t *cb)
4343 {
4344         struct rocker *rocker = rocker_port->rocker;
4345         struct rocker_fdb_tbl_entry *found;
4346         struct hlist_node *tmp;
4347         unsigned long lock_flags;
4348         int bkt;
4349         int err = 0;
4350
4351         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4352         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4353                 if (found->key.rocker_port != rocker_port)
4354                         continue;
4355                 ether_addr_copy(fdb->addr, found->key.addr);
4356                 fdb->ndm_state = NUD_REACHABLE;
4357                 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4358                                                    found->key.vlan_id);
4359                 err = cb(&fdb->obj);
4360                 if (err)
4361                         break;
4362         }
4363         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4364
4365         return err;
4366 }
4367
4368 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4369                                  struct switchdev_obj_port_vlan *vlan,
4370                                  switchdev_obj_dump_cb_t *cb)
4371 {
4372         u16 vid;
4373         int err = 0;
4374
4375         for (vid = 1; vid < VLAN_N_VID; vid++) {
4376                 if (!test_bit(vid, rocker_port->vlan_bitmap))
4377                         continue;
4378                 vlan->flags = 0;
4379                 if (rocker_vlan_id_is_internal(htons(vid)))
4380                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4381                 vlan->vid_begin = vid;
4382                 vlan->vid_end = vid;
4383                 err = cb(&vlan->obj);
4384                 if (err)
4385                         break;
4386         }
4387
4388         return err;
4389 }
4390
4391 static int rocker_port_obj_dump(struct net_device *dev,
4392                                 struct switchdev_obj *obj,
4393                                 switchdev_obj_dump_cb_t *cb)
4394 {
4395         const struct rocker_port *rocker_port = netdev_priv(dev);
4396         int err = 0;
4397
4398         switch (obj->id) {
4399         case SWITCHDEV_OBJ_ID_PORT_FDB:
4400                 err = rocker_port_fdb_dump(rocker_port,
4401                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4402                 break;
4403         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4404                 err = rocker_port_vlan_dump(rocker_port,
4405                                             SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4406                 break;
4407         default:
4408                 err = -EOPNOTSUPP;
4409                 break;
4410         }
4411
4412         return err;
4413 }
4414
4415 static const struct switchdev_ops rocker_port_switchdev_ops = {
4416         .switchdev_port_attr_get        = rocker_port_attr_get,
4417         .switchdev_port_attr_set        = rocker_port_attr_set,
4418         .switchdev_port_obj_add         = rocker_port_obj_add,
4419         .switchdev_port_obj_del         = rocker_port_obj_del,
4420         .switchdev_port_obj_dump        = rocker_port_obj_dump,
4421 };
4422
4423 /********************
4424  * ethtool interface
4425  ********************/
4426
4427 static int rocker_port_get_settings(struct net_device *dev,
4428                                     struct ethtool_cmd *ecmd)
4429 {
4430         struct rocker_port *rocker_port = netdev_priv(dev);
4431
4432         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4433 }
4434
4435 static int rocker_port_set_settings(struct net_device *dev,
4436                                     struct ethtool_cmd *ecmd)
4437 {
4438         struct rocker_port *rocker_port = netdev_priv(dev);
4439
4440         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4441 }
4442
4443 static void rocker_port_get_drvinfo(struct net_device *dev,
4444                                     struct ethtool_drvinfo *drvinfo)
4445 {
4446         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4447         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4448 }
4449
4450 static struct rocker_port_stats {
4451         char str[ETH_GSTRING_LEN];
4452         int type;
4453 } rocker_port_stats[] = {
4454         { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4455         { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4456         { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4457         { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4458
4459         { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4460         { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4461         { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4462         { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4463 };
4464
4465 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4466
4467 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4468                                     u8 *data)
4469 {
4470         u8 *p = data;
4471         int i;
4472
4473         switch (stringset) {
4474         case ETH_SS_STATS:
4475                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4476                         memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4477                         p += ETH_GSTRING_LEN;
4478                 }
4479                 break;
4480         }
4481 }
4482
4483 static int
4484 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4485                                struct rocker_desc_info *desc_info,
4486                                void *priv)
4487 {
4488         struct rocker_tlv *cmd_stats;
4489
4490         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4491                                ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4492                 return -EMSGSIZE;
4493
4494         cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4495         if (!cmd_stats)
4496                 return -EMSGSIZE;
4497
4498         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4499                                rocker_port->pport))
4500                 return -EMSGSIZE;
4501
4502         rocker_tlv_nest_end(desc_info, cmd_stats);
4503
4504         return 0;
4505 }
4506
4507 static int
4508 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4509                                        const struct rocker_desc_info *desc_info,
4510                                        void *priv)
4511 {
4512         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4513         const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4514         const struct rocker_tlv *pattr;
4515         u32 pport;
4516         u64 *data = priv;
4517         int i;
4518
4519         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4520
4521         if (!attrs[ROCKER_TLV_CMD_INFO])
4522                 return -EIO;
4523
4524         rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4525                                 attrs[ROCKER_TLV_CMD_INFO]);
4526
4527         if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4528                 return -EIO;
4529
4530         pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4531         if (pport != rocker_port->pport)
4532                 return -EIO;
4533
4534         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4535                 pattr = stats_attrs[rocker_port_stats[i].type];
4536                 if (!pattr)
4537                         continue;
4538
4539                 data[i] = rocker_tlv_get_u64(pattr);
4540         }
4541
4542         return 0;
4543 }
4544
4545 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4546                                              void *priv)
4547 {
4548         return rocker_cmd_exec(rocker_port, NULL, 0,
4549                                rocker_cmd_get_port_stats_prep, NULL,
4550                                rocker_cmd_get_port_stats_ethtool_proc,
4551                                priv);
4552 }
4553
4554 static void rocker_port_get_stats(struct net_device *dev,
4555                                   struct ethtool_stats *stats, u64 *data)
4556 {
4557         struct rocker_port *rocker_port = netdev_priv(dev);
4558
4559         if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4560                 int i;
4561
4562                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4563                         data[i] = 0;
4564         }
4565 }
4566
4567 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4568 {
4569         switch (sset) {
4570         case ETH_SS_STATS:
4571                 return ROCKER_PORT_STATS_LEN;
4572         default:
4573                 return -EOPNOTSUPP;
4574         }
4575 }
4576
4577 static const struct ethtool_ops rocker_port_ethtool_ops = {
4578         .get_settings           = rocker_port_get_settings,
4579         .set_settings           = rocker_port_set_settings,
4580         .get_drvinfo            = rocker_port_get_drvinfo,
4581         .get_link               = ethtool_op_get_link,
4582         .get_strings            = rocker_port_get_strings,
4583         .get_ethtool_stats      = rocker_port_get_stats,
4584         .get_sset_count         = rocker_port_get_sset_count,
4585 };
4586
4587 /*****************
4588  * NAPI interface
4589  *****************/
4590
4591 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4592 {
4593         return container_of(napi, struct rocker_port, napi_tx);
4594 }
4595
4596 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4597 {
4598         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4599         const struct rocker *rocker = rocker_port->rocker;
4600         const struct rocker_desc_info *desc_info;
4601         u32 credits = 0;
4602         int err;
4603
4604         /* Cleanup tx descriptors */
4605         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4606                 struct sk_buff *skb;
4607
4608                 err = rocker_desc_err(desc_info);
4609                 if (err && net_ratelimit())
4610                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4611                                    err);
4612                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4613
4614                 skb = rocker_desc_cookie_ptr_get(desc_info);
4615                 if (err == 0) {
4616                         rocker_port->dev->stats.tx_packets++;
4617                         rocker_port->dev->stats.tx_bytes += skb->len;
4618                 } else {
4619                         rocker_port->dev->stats.tx_errors++;
4620                 }
4621
4622                 dev_kfree_skb_any(skb);
4623                 credits++;
4624         }
4625
4626         if (credits && netif_queue_stopped(rocker_port->dev))
4627                 netif_wake_queue(rocker_port->dev);
4628
4629         napi_complete(napi);
4630         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4631
4632         return 0;
4633 }
4634
4635 static int rocker_port_rx_proc(const struct rocker *rocker,
4636                                const struct rocker_port *rocker_port,
4637                                struct rocker_desc_info *desc_info)
4638 {
4639         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4640         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4641         size_t rx_len;
4642         u16 rx_flags = 0;
4643
4644         if (!skb)
4645                 return -ENOENT;
4646
4647         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4648         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4649                 return -EINVAL;
4650         if (attrs[ROCKER_TLV_RX_FLAGS])
4651                 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4652
4653         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4654
4655         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4656         skb_put(skb, rx_len);
4657         skb->protocol = eth_type_trans(skb, rocker_port->dev);
4658
4659         if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4660                 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4661
4662         rocker_port->dev->stats.rx_packets++;
4663         rocker_port->dev->stats.rx_bytes += skb->len;
4664
4665         netif_receive_skb(skb);
4666
4667         return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4668 }
4669
4670 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4671 {
4672         return container_of(napi, struct rocker_port, napi_rx);
4673 }
4674
4675 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4676 {
4677         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4678         const struct rocker *rocker = rocker_port->rocker;
4679         struct rocker_desc_info *desc_info;
4680         u32 credits = 0;
4681         int err;
4682
4683         /* Process rx descriptors */
4684         while (credits < budget &&
4685                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4686                 err = rocker_desc_err(desc_info);
4687                 if (err) {
4688                         if (net_ratelimit())
4689                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4690                                            err);
4691                 } else {
4692                         err = rocker_port_rx_proc(rocker, rocker_port,
4693                                                   desc_info);
4694                         if (err && net_ratelimit())
4695                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4696                                            err);
4697                 }
4698                 if (err)
4699                         rocker_port->dev->stats.rx_errors++;
4700
4701                 rocker_desc_gen_clear(desc_info);
4702                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4703                 credits++;
4704         }
4705
4706         if (credits < budget)
4707                 napi_complete(napi);
4708
4709         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4710
4711         return credits;
4712 }
4713
4714 /*****************
4715  * PCI driver ops
4716  *****************/
4717
4718 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4719 {
4720         const struct rocker *rocker = rocker_port->rocker;
4721         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4722         bool link_up;
4723
4724         link_up = link_status & (1 << rocker_port->pport);
4725         if (link_up)
4726                 netif_carrier_on(rocker_port->dev);
4727         else
4728                 netif_carrier_off(rocker_port->dev);
4729 }
4730
4731 static void rocker_remove_ports(const struct rocker *rocker)
4732 {
4733         struct rocker_port *rocker_port;
4734         int i;
4735
4736         for (i = 0; i < rocker->port_count; i++) {
4737                 rocker_port = rocker->ports[i];
4738                 if (!rocker_port)
4739                         continue;
4740                 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4741                 unregister_netdev(rocker_port->dev);
4742                 free_netdev(rocker_port->dev);
4743         }
4744         kfree(rocker->ports);
4745 }
4746
4747 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4748 {
4749         const struct rocker *rocker = rocker_port->rocker;
4750         const struct pci_dev *pdev = rocker->pdev;
4751         int err;
4752
4753         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4754                                                    rocker_port->dev->dev_addr);
4755         if (err) {
4756                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4757                 eth_hw_addr_random(rocker_port->dev);
4758         }
4759 }
4760
4761 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4762 {
4763         const struct pci_dev *pdev = rocker->pdev;
4764         struct rocker_port *rocker_port;
4765         struct net_device *dev;
4766         u16 untagged_vid = 0;
4767         int err;
4768
4769         dev = alloc_etherdev(sizeof(struct rocker_port));
4770         if (!dev)
4771                 return -ENOMEM;
4772         rocker_port = netdev_priv(dev);
4773         rocker_port->dev = dev;
4774         rocker_port->rocker = rocker;
4775         rocker_port->port_number = port_number;
4776         rocker_port->pport = port_number + 1;
4777         rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4778         rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
4779
4780         rocker_port_dev_addr_init(rocker_port);
4781         dev->netdev_ops = &rocker_port_netdev_ops;
4782         dev->ethtool_ops = &rocker_port_ethtool_ops;
4783         dev->switchdev_ops = &rocker_port_switchdev_ops;
4784         netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4785                           NAPI_POLL_WEIGHT);
4786         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4787                        NAPI_POLL_WEIGHT);
4788         rocker_carrier_init(rocker_port);
4789
4790         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
4791
4792         err = register_netdev(dev);
4793         if (err) {
4794                 dev_err(&pdev->dev, "register_netdev failed\n");
4795                 goto err_register_netdev;
4796         }
4797         rocker->ports[port_number] = rocker_port;
4798
4799         switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
4800
4801         rocker_port_set_learning(rocker_port, NULL);
4802
4803         err = rocker_port_ig_tbl(rocker_port, NULL, 0);
4804         if (err) {
4805                 netdev_err(rocker_port->dev, "install ig port table failed\n");
4806                 goto err_port_ig_tbl;
4807         }
4808
4809         rocker_port->internal_vlan_id =
4810                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4811
4812         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
4813         if (err) {
4814                 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
4815                 goto err_untagged_vlan;
4816         }
4817
4818         return 0;
4819
4820 err_untagged_vlan:
4821         rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4822 err_port_ig_tbl:
4823         rocker->ports[port_number] = NULL;
4824         unregister_netdev(dev);
4825 err_register_netdev:
4826         free_netdev(dev);
4827         return err;
4828 }
4829
4830 static int rocker_probe_ports(struct rocker *rocker)
4831 {
4832         int i;
4833         size_t alloc_size;
4834         int err;
4835
4836         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4837         rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
4838         if (!rocker->ports)
4839                 return -ENOMEM;
4840         for (i = 0; i < rocker->port_count; i++) {
4841                 err = rocker_probe_port(rocker, i);
4842                 if (err)
4843                         goto remove_ports;
4844         }
4845         return 0;
4846
4847 remove_ports:
4848         rocker_remove_ports(rocker);
4849         return err;
4850 }
4851
4852 static int rocker_msix_init(struct rocker *rocker)
4853 {
4854         struct pci_dev *pdev = rocker->pdev;
4855         int msix_entries;
4856         int i;
4857         int err;
4858
4859         msix_entries = pci_msix_vec_count(pdev);
4860         if (msix_entries < 0)
4861                 return msix_entries;
4862
4863         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4864                 return -EINVAL;
4865
4866         rocker->msix_entries = kmalloc_array(msix_entries,
4867                                              sizeof(struct msix_entry),
4868                                              GFP_KERNEL);
4869         if (!rocker->msix_entries)
4870                 return -ENOMEM;
4871
4872         for (i = 0; i < msix_entries; i++)
4873                 rocker->msix_entries[i].entry = i;
4874
4875         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4876         if (err < 0)
4877                 goto err_enable_msix;
4878
4879         return 0;
4880
4881 err_enable_msix:
4882         kfree(rocker->msix_entries);
4883         return err;
4884 }
4885
4886 static void rocker_msix_fini(const struct rocker *rocker)
4887 {
4888         pci_disable_msix(rocker->pdev);
4889         kfree(rocker->msix_entries);
4890 }
4891
4892 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4893 {
4894         struct rocker *rocker;
4895         int err;
4896
4897         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4898         if (!rocker)
4899                 return -ENOMEM;
4900
4901         err = pci_enable_device(pdev);
4902         if (err) {
4903                 dev_err(&pdev->dev, "pci_enable_device failed\n");
4904                 goto err_pci_enable_device;
4905         }
4906
4907         err = pci_request_regions(pdev, rocker_driver_name);
4908         if (err) {
4909                 dev_err(&pdev->dev, "pci_request_regions failed\n");
4910                 goto err_pci_request_regions;
4911         }
4912
4913         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4914         if (!err) {
4915                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4916                 if (err) {
4917                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4918                         goto err_pci_set_dma_mask;
4919                 }
4920         } else {
4921                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4922                 if (err) {
4923                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4924                         goto err_pci_set_dma_mask;
4925                 }
4926         }
4927
4928         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4929                 dev_err(&pdev->dev, "invalid PCI region size\n");
4930                 err = -EINVAL;
4931                 goto err_pci_resource_len_check;
4932         }
4933
4934         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4935                                   pci_resource_len(pdev, 0));
4936         if (!rocker->hw_addr) {
4937                 dev_err(&pdev->dev, "ioremap failed\n");
4938                 err = -EIO;
4939                 goto err_ioremap;
4940         }
4941         pci_set_master(pdev);
4942
4943         rocker->pdev = pdev;
4944         pci_set_drvdata(pdev, rocker);
4945
4946         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4947
4948         err = rocker_msix_init(rocker);
4949         if (err) {
4950                 dev_err(&pdev->dev, "MSI-X init failed\n");
4951                 goto err_msix_init;
4952         }
4953
4954         err = rocker_basic_hw_test(rocker);
4955         if (err) {
4956                 dev_err(&pdev->dev, "basic hw test failed\n");
4957                 goto err_basic_hw_test;
4958         }
4959
4960         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4961
4962         err = rocker_dma_rings_init(rocker);
4963         if (err)
4964                 goto err_dma_rings_init;
4965
4966         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4967                           rocker_cmd_irq_handler, 0,
4968                           rocker_driver_name, rocker);
4969         if (err) {
4970                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
4971                 goto err_request_cmd_irq;
4972         }
4973
4974         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
4975                           rocker_event_irq_handler, 0,
4976                           rocker_driver_name, rocker);
4977         if (err) {
4978                 dev_err(&pdev->dev, "cannot assign event irq\n");
4979                 goto err_request_event_irq;
4980         }
4981
4982         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
4983
4984         err = rocker_init_tbls(rocker);
4985         if (err) {
4986                 dev_err(&pdev->dev, "cannot init rocker tables\n");
4987                 goto err_init_tbls;
4988         }
4989
4990         setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
4991                     (unsigned long) rocker);
4992         mod_timer(&rocker->fdb_cleanup_timer, jiffies);
4993
4994         err = rocker_probe_ports(rocker);
4995         if (err) {
4996                 dev_err(&pdev->dev, "failed to probe ports\n");
4997                 goto err_probe_ports;
4998         }
4999
5000         dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5001                  (int)sizeof(rocker->hw.id), &rocker->hw.id);
5002
5003         return 0;
5004
5005 err_probe_ports:
5006         del_timer_sync(&rocker->fdb_cleanup_timer);
5007         rocker_free_tbls(rocker);
5008 err_init_tbls:
5009         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5010 err_request_event_irq:
5011         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5012 err_request_cmd_irq:
5013         rocker_dma_rings_fini(rocker);
5014 err_dma_rings_init:
5015 err_basic_hw_test:
5016         rocker_msix_fini(rocker);
5017 err_msix_init:
5018         iounmap(rocker->hw_addr);
5019 err_ioremap:
5020 err_pci_resource_len_check:
5021 err_pci_set_dma_mask:
5022         pci_release_regions(pdev);
5023 err_pci_request_regions:
5024         pci_disable_device(pdev);
5025 err_pci_enable_device:
5026         kfree(rocker);
5027         return err;
5028 }
5029
5030 static void rocker_remove(struct pci_dev *pdev)
5031 {
5032         struct rocker *rocker = pci_get_drvdata(pdev);
5033
5034         del_timer_sync(&rocker->fdb_cleanup_timer);
5035         rocker_free_tbls(rocker);
5036         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5037         rocker_remove_ports(rocker);
5038         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5039         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5040         rocker_dma_rings_fini(rocker);
5041         rocker_msix_fini(rocker);
5042         iounmap(rocker->hw_addr);
5043         pci_release_regions(rocker->pdev);
5044         pci_disable_device(rocker->pdev);
5045         kfree(rocker);
5046 }
5047
5048 static struct pci_driver rocker_pci_driver = {
5049         .name           = rocker_driver_name,
5050         .id_table       = rocker_pci_id_table,
5051         .probe          = rocker_probe,
5052         .remove         = rocker_remove,
5053 };
5054
5055 /************************************
5056  * Net device notifier event handler
5057  ************************************/
5058
5059 static bool rocker_port_dev_check(const struct net_device *dev)
5060 {
5061         return dev->netdev_ops == &rocker_port_netdev_ops;
5062 }
5063
5064 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5065                                    struct net_device *bridge)
5066 {
5067         u16 untagged_vid = 0;
5068         int err;
5069
5070         /* Port is joining bridge, so the internal VLAN for the
5071          * port is going to change to the bridge internal VLAN.
5072          * Let's remove untagged VLAN (vid=0) from port and
5073          * re-add once internal VLAN has changed.
5074          */
5075
5076         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5077         if (err)
5078                 return err;
5079
5080         rocker_port_internal_vlan_id_put(rocker_port,
5081                                          rocker_port->dev->ifindex);
5082         rocker_port->internal_vlan_id =
5083                 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5084
5085         rocker_port->bridge_dev = bridge;
5086         switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5087
5088         return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5089 }
5090
5091 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5092 {
5093         u16 untagged_vid = 0;
5094         int err;
5095
5096         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5097         if (err)
5098                 return err;
5099
5100         rocker_port_internal_vlan_id_put(rocker_port,
5101                                          rocker_port->bridge_dev->ifindex);
5102         rocker_port->internal_vlan_id =
5103                 rocker_port_internal_vlan_id_get(rocker_port,
5104                                                  rocker_port->dev->ifindex);
5105
5106         switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5107                                     false);
5108         rocker_port->bridge_dev = NULL;
5109
5110         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5111         if (err)
5112                 return err;
5113
5114         if (rocker_port->dev->flags & IFF_UP)
5115                 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5116
5117         return err;
5118 }
5119
5120 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5121                                    struct net_device *master)
5122 {
5123         int err;
5124
5125         rocker_port->bridge_dev = master;
5126
5127         err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5128         if (err)
5129                 return err;
5130         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5131
5132         return err;
5133 }
5134
5135 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5136                                      struct net_device *master)
5137 {
5138         int err = 0;
5139
5140         if (netif_is_bridge_master(master))
5141                 err = rocker_port_bridge_join(rocker_port, master);
5142         else if (netif_is_ovs_master(master))
5143                 err = rocker_port_ovs_changed(rocker_port, master);
5144         return err;
5145 }
5146
5147 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5148 {
5149         int err = 0;
5150
5151         if (rocker_port_is_bridged(rocker_port))
5152                 err = rocker_port_bridge_leave(rocker_port);
5153         else if (rocker_port_is_ovsed(rocker_port))
5154                 err = rocker_port_ovs_changed(rocker_port, NULL);
5155         return err;
5156 }
5157
5158 static int rocker_netdevice_event(struct notifier_block *unused,
5159                                   unsigned long event, void *ptr)
5160 {
5161         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5162         struct netdev_notifier_changeupper_info *info;
5163         struct rocker_port *rocker_port;
5164         int err;
5165
5166         if (!rocker_port_dev_check(dev))
5167                 return NOTIFY_DONE;
5168
5169         switch (event) {
5170         case NETDEV_CHANGEUPPER:
5171                 info = ptr;
5172                 if (!info->master)
5173                         goto out;
5174                 rocker_port = netdev_priv(dev);
5175                 if (info->linking) {
5176                         err = rocker_port_master_linked(rocker_port,
5177                                                         info->upper_dev);
5178                         if (err)
5179                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5180                                             err);
5181                 } else {
5182                         err = rocker_port_master_unlinked(rocker_port);
5183                         if (err)
5184                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5185                                             err);
5186                 }
5187                 break;
5188         }
5189 out:
5190         return NOTIFY_DONE;
5191 }
5192
5193 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5194         .notifier_call = rocker_netdevice_event,
5195 };
5196
5197 /************************************
5198  * Net event notifier event handler
5199  ************************************/
5200
5201 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5202 {
5203         struct rocker_port *rocker_port = netdev_priv(dev);
5204         int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5205                     ROCKER_OP_FLAG_NOWAIT;
5206         __be32 ip_addr = *(__be32 *)n->primary_key;
5207
5208         return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5209 }
5210
5211 static int rocker_netevent_event(struct notifier_block *unused,
5212                                  unsigned long event, void *ptr)
5213 {
5214         struct net_device *dev;
5215         struct neighbour *n = ptr;
5216         int err;
5217
5218         switch (event) {
5219         case NETEVENT_NEIGH_UPDATE:
5220                 if (n->tbl != &arp_tbl)
5221                         return NOTIFY_DONE;
5222                 dev = n->dev;
5223                 if (!rocker_port_dev_check(dev))
5224                         return NOTIFY_DONE;
5225                 err = rocker_neigh_update(dev, n);
5226                 if (err)
5227                         netdev_warn(dev,
5228                                     "failed to handle neigh update (err %d)\n",
5229                                     err);
5230                 break;
5231         }
5232
5233         return NOTIFY_DONE;
5234 }
5235
5236 static struct notifier_block rocker_netevent_nb __read_mostly = {
5237         .notifier_call = rocker_netevent_event,
5238 };
5239
5240 /***********************
5241  * Module init and exit
5242  ***********************/
5243
5244 static int __init rocker_module_init(void)
5245 {
5246         int err;
5247
5248         register_netdevice_notifier(&rocker_netdevice_nb);
5249         register_netevent_notifier(&rocker_netevent_nb);
5250         err = pci_register_driver(&rocker_pci_driver);
5251         if (err)
5252                 goto err_pci_register_driver;
5253         return 0;
5254
5255 err_pci_register_driver:
5256         unregister_netevent_notifier(&rocker_netevent_nb);
5257         unregister_netdevice_notifier(&rocker_netdevice_nb);
5258         return err;
5259 }
5260
5261 static void __exit rocker_module_exit(void)
5262 {
5263         unregister_netevent_notifier(&rocker_netevent_nb);
5264         unregister_netdevice_notifier(&rocker_netdevice_nb);
5265         pci_unregister_driver(&rocker_pci_driver);
5266 }
5267
5268 module_init(rocker_module_init);
5269 module_exit(rocker_module_exit);
5270
5271 MODULE_LICENSE("GPL v2");
5272 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5273 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5274 MODULE_DESCRIPTION("Rocker switch device driver");
5275 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);