1a99b358afd03d1bbc792e01c8db2ee442494d49
[cascardo/linux.git] / drivers / net / ethernet / rocker / rocker_main.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker_hw.h"
43 #include "rocker.h"
44 #include "rocker_tlv.h"
45
46 static const char rocker_driver_name[] = "rocker";
47
48 static const struct pci_device_id rocker_pci_id_table[] = {
49         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50         {0, }
51 };
52
53 struct rocker_flow_tbl_key {
54         u32 priority;
55         enum rocker_of_dpa_table_id tbl_id;
56         union {
57                 struct {
58                         u32 in_pport;
59                         u32 in_pport_mask;
60                         enum rocker_of_dpa_table_id goto_tbl;
61                 } ig_port;
62                 struct {
63                         u32 in_pport;
64                         __be16 vlan_id;
65                         __be16 vlan_id_mask;
66                         enum rocker_of_dpa_table_id goto_tbl;
67                         bool untagged;
68                         __be16 new_vlan_id;
69                 } vlan;
70                 struct {
71                         u32 in_pport;
72                         u32 in_pport_mask;
73                         __be16 eth_type;
74                         u8 eth_dst[ETH_ALEN];
75                         u8 eth_dst_mask[ETH_ALEN];
76                         __be16 vlan_id;
77                         __be16 vlan_id_mask;
78                         enum rocker_of_dpa_table_id goto_tbl;
79                         bool copy_to_cpu;
80                 } term_mac;
81                 struct {
82                         __be16 eth_type;
83                         __be32 dst4;
84                         __be32 dst4_mask;
85                         enum rocker_of_dpa_table_id goto_tbl;
86                         u32 group_id;
87                 } ucast_routing;
88                 struct {
89                         u8 eth_dst[ETH_ALEN];
90                         u8 eth_dst_mask[ETH_ALEN];
91                         int has_eth_dst;
92                         int has_eth_dst_mask;
93                         __be16 vlan_id;
94                         u32 tunnel_id;
95                         enum rocker_of_dpa_table_id goto_tbl;
96                         u32 group_id;
97                         bool copy_to_cpu;
98                 } bridge;
99                 struct {
100                         u32 in_pport;
101                         u32 in_pport_mask;
102                         u8 eth_src[ETH_ALEN];
103                         u8 eth_src_mask[ETH_ALEN];
104                         u8 eth_dst[ETH_ALEN];
105                         u8 eth_dst_mask[ETH_ALEN];
106                         __be16 eth_type;
107                         __be16 vlan_id;
108                         __be16 vlan_id_mask;
109                         u8 ip_proto;
110                         u8 ip_proto_mask;
111                         u8 ip_tos;
112                         u8 ip_tos_mask;
113                         u32 group_id;
114                 } acl;
115         };
116 };
117
118 struct rocker_flow_tbl_entry {
119         struct hlist_node entry;
120         u32 cmd;
121         u64 cookie;
122         struct rocker_flow_tbl_key key;
123         size_t key_len;
124         u32 key_crc32; /* key */
125 };
126
127 struct rocker_group_tbl_entry {
128         struct hlist_node entry;
129         u32 cmd;
130         u32 group_id; /* key */
131         u16 group_count;
132         u32 *group_ids;
133         union {
134                 struct {
135                         u8 pop_vlan;
136                 } l2_interface;
137                 struct {
138                         u8 eth_src[ETH_ALEN];
139                         u8 eth_dst[ETH_ALEN];
140                         __be16 vlan_id;
141                         u32 group_id;
142                 } l2_rewrite;
143                 struct {
144                         u8 eth_src[ETH_ALEN];
145                         u8 eth_dst[ETH_ALEN];
146                         __be16 vlan_id;
147                         bool ttl_check;
148                         u32 group_id;
149                 } l3_unicast;
150         };
151 };
152
153 struct rocker_fdb_tbl_entry {
154         struct hlist_node entry;
155         u32 key_crc32; /* key */
156         bool learned;
157         unsigned long touched;
158         struct rocker_fdb_tbl_key {
159                 struct rocker_port *rocker_port;
160                 u8 addr[ETH_ALEN];
161                 __be16 vlan_id;
162         } key;
163 };
164
165 struct rocker_internal_vlan_tbl_entry {
166         struct hlist_node entry;
167         int ifindex; /* key */
168         u32 ref_count;
169         __be16 vlan_id;
170 };
171
172 struct rocker_neigh_tbl_entry {
173         struct hlist_node entry;
174         __be32 ip_addr; /* key */
175         struct net_device *dev;
176         u32 ref_count;
177         u32 index;
178         u8 eth_dst[ETH_ALEN];
179         bool ttl_check;
180 };
181
182 struct rocker_dma_ring_info {
183         size_t size;
184         u32 head;
185         u32 tail;
186         struct rocker_desc *desc; /* mapped */
187         dma_addr_t mapaddr;
188         struct rocker_desc_info *desc_info;
189         unsigned int type;
190 };
191
192 struct rocker;
193
194 enum {
195         ROCKER_CTRL_LINK_LOCAL_MCAST,
196         ROCKER_CTRL_LOCAL_ARP,
197         ROCKER_CTRL_IPV4_MCAST,
198         ROCKER_CTRL_IPV6_MCAST,
199         ROCKER_CTRL_DFLT_BRIDGING,
200         ROCKER_CTRL_DFLT_OVS,
201         ROCKER_CTRL_MAX,
202 };
203
204 #define ROCKER_INTERNAL_VLAN_ID_BASE    0x0f00
205 #define ROCKER_N_INTERNAL_VLANS         255
206 #define ROCKER_VLAN_BITMAP_LEN          BITS_TO_LONGS(VLAN_N_VID)
207 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
208
209 struct rocker_port {
210         struct net_device *dev;
211         struct net_device *bridge_dev;
212         struct rocker *rocker;
213         unsigned int port_number;
214         u32 pport;
215         __be16 internal_vlan_id;
216         int stp_state;
217         u32 brport_flags;
218         unsigned long ageing_time;
219         bool ctrls[ROCKER_CTRL_MAX];
220         unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
221         struct napi_struct napi_tx;
222         struct napi_struct napi_rx;
223         struct rocker_dma_ring_info tx_ring;
224         struct rocker_dma_ring_info rx_ring;
225 };
226
227 struct rocker {
228         struct pci_dev *pdev;
229         u8 __iomem *hw_addr;
230         struct msix_entry *msix_entries;
231         unsigned int port_count;
232         struct rocker_port **ports;
233         struct {
234                 u64 id;
235         } hw;
236         spinlock_t cmd_ring_lock;               /* for cmd ring accesses */
237         struct rocker_dma_ring_info cmd_ring;
238         struct rocker_dma_ring_info event_ring;
239         DECLARE_HASHTABLE(flow_tbl, 16);
240         spinlock_t flow_tbl_lock;               /* for flow tbl accesses */
241         u64 flow_tbl_next_cookie;
242         DECLARE_HASHTABLE(group_tbl, 16);
243         spinlock_t group_tbl_lock;              /* for group tbl accesses */
244         struct timer_list fdb_cleanup_timer;
245         DECLARE_HASHTABLE(fdb_tbl, 16);
246         spinlock_t fdb_tbl_lock;                /* for fdb tbl accesses */
247         unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
248         DECLARE_HASHTABLE(internal_vlan_tbl, 8);
249         spinlock_t internal_vlan_tbl_lock;      /* for vlan tbl accesses */
250         DECLARE_HASHTABLE(neigh_tbl, 16);
251         spinlock_t neigh_tbl_lock;              /* for neigh tbl accesses */
252         u32 neigh_tbl_next_index;
253 };
254
255 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
256 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
257 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
258 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
259 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
260 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
261 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
262 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
263 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
264
265 /* Rocker priority levels for flow table entries.  Higher
266  * priority match takes precedence over lower priority match.
267  */
268
269 enum {
270         ROCKER_PRIORITY_UNKNOWN = 0,
271         ROCKER_PRIORITY_IG_PORT = 1,
272         ROCKER_PRIORITY_VLAN = 1,
273         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
274         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
275         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
276         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
277         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
278         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
279         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
280         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
281         ROCKER_PRIORITY_ACL_CTRL = 3,
282         ROCKER_PRIORITY_ACL_NORMAL = 2,
283         ROCKER_PRIORITY_ACL_DFLT = 1,
284 };
285
286 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
287 {
288         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
289         u16 end = 0xffe;
290         u16 _vlan_id = ntohs(vlan_id);
291
292         return (_vlan_id >= start && _vlan_id <= end);
293 }
294
295 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
296                                       u16 vid, bool *pop_vlan)
297 {
298         __be16 vlan_id;
299
300         if (pop_vlan)
301                 *pop_vlan = false;
302         vlan_id = htons(vid);
303         if (!vlan_id) {
304                 vlan_id = rocker_port->internal_vlan_id;
305                 if (pop_vlan)
306                         *pop_vlan = true;
307         }
308
309         return vlan_id;
310 }
311
312 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
313                                    __be16 vlan_id)
314 {
315         if (rocker_vlan_id_is_internal(vlan_id))
316                 return 0;
317
318         return ntohs(vlan_id);
319 }
320
321 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
322 {
323         return rocker_port->bridge_dev &&
324                netif_is_bridge_master(rocker_port->bridge_dev);
325 }
326
327 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
328 {
329         return rocker_port->bridge_dev &&
330                netif_is_ovs_master(rocker_port->bridge_dev);
331 }
332
333 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
334 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
335 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
336 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
337
338 static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
339                                 size_t size)
340 {
341         struct switchdev_trans_item *elem = NULL;
342         gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
343                           GFP_ATOMIC : GFP_KERNEL;
344
345         /* If in transaction prepare phase, allocate the memory
346          * and enqueue it on a transaction.  If in transaction
347          * commit phase, dequeue the memory from the transaction
348          * rather than re-allocating the memory.  The idea is the
349          * driver code paths for prepare and commit are identical
350          * so the memory allocated in the prepare phase is the
351          * memory used in the commit phase.
352          */
353
354         if (!trans) {
355                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
356         } else if (switchdev_trans_ph_prepare(trans)) {
357                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
358                 if (!elem)
359                         return NULL;
360                 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
361         } else {
362                 elem = switchdev_trans_item_dequeue(trans);
363         }
364
365         return elem ? elem + 1 : NULL;
366 }
367
368 static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
369                             size_t size)
370 {
371         return __rocker_mem_alloc(trans, flags, size);
372 }
373
374 static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
375                             size_t n, size_t size)
376 {
377         return __rocker_mem_alloc(trans, flags, n * size);
378 }
379
380 static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
381 {
382         struct switchdev_trans_item *elem;
383
384         /* Frees are ignored if in transaction prepare phase.  The
385          * memory remains on the per-port list until freed in the
386          * commit phase.
387          */
388
389         if (switchdev_trans_ph_prepare(trans))
390                 return;
391
392         elem = (struct switchdev_trans_item *) mem - 1;
393         kfree(elem);
394 }
395
396 struct rocker_wait {
397         wait_queue_head_t wait;
398         bool done;
399         bool nowait;
400 };
401
402 static void rocker_wait_reset(struct rocker_wait *wait)
403 {
404         wait->done = false;
405         wait->nowait = false;
406 }
407
408 static void rocker_wait_init(struct rocker_wait *wait)
409 {
410         init_waitqueue_head(&wait->wait);
411         rocker_wait_reset(wait);
412 }
413
414 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
415                                               struct switchdev_trans *trans,
416                                               int flags)
417 {
418         struct rocker_wait *wait;
419
420         wait = rocker_kzalloc(trans, flags, sizeof(*wait));
421         if (!wait)
422                 return NULL;
423         rocker_wait_init(wait);
424         return wait;
425 }
426
427 static void rocker_wait_destroy(struct switchdev_trans *trans,
428                                 struct rocker_wait *wait)
429 {
430         rocker_kfree(trans, wait);
431 }
432
433 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
434                                       unsigned long timeout)
435 {
436         wait_event_timeout(wait->wait, wait->done, HZ / 10);
437         if (!wait->done)
438                 return false;
439         return true;
440 }
441
442 static void rocker_wait_wake_up(struct rocker_wait *wait)
443 {
444         wait->done = true;
445         wake_up(&wait->wait);
446 }
447
448 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
449 {
450         return rocker->msix_entries[vector].vector;
451 }
452
453 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
454 {
455         return rocker_msix_vector(rocker_port->rocker,
456                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
457 }
458
459 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
460 {
461         return rocker_msix_vector(rocker_port->rocker,
462                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
463 }
464
465 #define rocker_write32(rocker, reg, val)        \
466         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
467 #define rocker_read32(rocker, reg)      \
468         readl((rocker)->hw_addr + (ROCKER_ ## reg))
469 #define rocker_write64(rocker, reg, val)        \
470         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
471 #define rocker_read64(rocker, reg)      \
472         readq((rocker)->hw_addr + (ROCKER_ ## reg))
473
474 /*****************************
475  * HW basic testing functions
476  *****************************/
477
478 static int rocker_reg_test(const struct rocker *rocker)
479 {
480         const struct pci_dev *pdev = rocker->pdev;
481         u64 test_reg;
482         u64 rnd;
483
484         rnd = prandom_u32();
485         rnd >>= 1;
486         rocker_write32(rocker, TEST_REG, rnd);
487         test_reg = rocker_read32(rocker, TEST_REG);
488         if (test_reg != rnd * 2) {
489                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
490                         test_reg, rnd * 2);
491                 return -EIO;
492         }
493
494         rnd = prandom_u32();
495         rnd <<= 31;
496         rnd |= prandom_u32();
497         rocker_write64(rocker, TEST_REG64, rnd);
498         test_reg = rocker_read64(rocker, TEST_REG64);
499         if (test_reg != rnd * 2) {
500                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
501                         test_reg, rnd * 2);
502                 return -EIO;
503         }
504
505         return 0;
506 }
507
508 static int rocker_dma_test_one(const struct rocker *rocker,
509                                struct rocker_wait *wait, u32 test_type,
510                                dma_addr_t dma_handle, const unsigned char *buf,
511                                const unsigned char *expect, size_t size)
512 {
513         const struct pci_dev *pdev = rocker->pdev;
514         int i;
515
516         rocker_wait_reset(wait);
517         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
518
519         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
520                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
521                 return -EIO;
522         }
523
524         for (i = 0; i < size; i++) {
525                 if (buf[i] != expect[i]) {
526                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
527                                 buf[i], i, expect[i]);
528                         return -EIO;
529                 }
530         }
531         return 0;
532 }
533
534 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
535 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
536
537 static int rocker_dma_test_offset(const struct rocker *rocker,
538                                   struct rocker_wait *wait, int offset)
539 {
540         struct pci_dev *pdev = rocker->pdev;
541         unsigned char *alloc;
542         unsigned char *buf;
543         unsigned char *expect;
544         dma_addr_t dma_handle;
545         int i;
546         int err;
547
548         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
549                         GFP_KERNEL | GFP_DMA);
550         if (!alloc)
551                 return -ENOMEM;
552         buf = alloc + offset;
553         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
554
555         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
556                                     PCI_DMA_BIDIRECTIONAL);
557         if (pci_dma_mapping_error(pdev, dma_handle)) {
558                 err = -EIO;
559                 goto free_alloc;
560         }
561
562         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
563         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
564
565         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
566         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
567                                   dma_handle, buf, expect,
568                                   ROCKER_TEST_DMA_BUF_SIZE);
569         if (err)
570                 goto unmap;
571
572         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
573         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
574                                   dma_handle, buf, expect,
575                                   ROCKER_TEST_DMA_BUF_SIZE);
576         if (err)
577                 goto unmap;
578
579         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
580         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
581                 expect[i] = ~buf[i];
582         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
583                                   dma_handle, buf, expect,
584                                   ROCKER_TEST_DMA_BUF_SIZE);
585         if (err)
586                 goto unmap;
587
588 unmap:
589         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
590                          PCI_DMA_BIDIRECTIONAL);
591 free_alloc:
592         kfree(alloc);
593
594         return err;
595 }
596
597 static int rocker_dma_test(const struct rocker *rocker,
598                            struct rocker_wait *wait)
599 {
600         int i;
601         int err;
602
603         for (i = 0; i < 8; i++) {
604                 err = rocker_dma_test_offset(rocker, wait, i);
605                 if (err)
606                         return err;
607         }
608         return 0;
609 }
610
611 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
612 {
613         struct rocker_wait *wait = dev_id;
614
615         rocker_wait_wake_up(wait);
616
617         return IRQ_HANDLED;
618 }
619
620 static int rocker_basic_hw_test(const struct rocker *rocker)
621 {
622         const struct pci_dev *pdev = rocker->pdev;
623         struct rocker_wait wait;
624         int err;
625
626         err = rocker_reg_test(rocker);
627         if (err) {
628                 dev_err(&pdev->dev, "reg test failed\n");
629                 return err;
630         }
631
632         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
633                           rocker_test_irq_handler, 0,
634                           rocker_driver_name, &wait);
635         if (err) {
636                 dev_err(&pdev->dev, "cannot assign test irq\n");
637                 return err;
638         }
639
640         rocker_wait_init(&wait);
641         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
642
643         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
644                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
645                 err = -EIO;
646                 goto free_irq;
647         }
648
649         err = rocker_dma_test(rocker, &wait);
650         if (err)
651                 dev_err(&pdev->dev, "dma test failed\n");
652
653 free_irq:
654         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
655         return err;
656 }
657
658 /******************************************
659  * DMA rings and descriptors manipulations
660  ******************************************/
661
662 static u32 __pos_inc(u32 pos, size_t limit)
663 {
664         return ++pos == limit ? 0 : pos;
665 }
666
667 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
668 {
669         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
670
671         switch (err) {
672         case ROCKER_OK:
673                 return 0;
674         case -ROCKER_ENOENT:
675                 return -ENOENT;
676         case -ROCKER_ENXIO:
677                 return -ENXIO;
678         case -ROCKER_ENOMEM:
679                 return -ENOMEM;
680         case -ROCKER_EEXIST:
681                 return -EEXIST;
682         case -ROCKER_EINVAL:
683                 return -EINVAL;
684         case -ROCKER_EMSGSIZE:
685                 return -EMSGSIZE;
686         case -ROCKER_ENOTSUP:
687                 return -EOPNOTSUPP;
688         case -ROCKER_ENOBUFS:
689                 return -ENOBUFS;
690         }
691
692         return -EINVAL;
693 }
694
695 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
696 {
697         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
698 }
699
700 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
701 {
702         u32 comp_err = desc_info->desc->comp_err;
703
704         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
705 }
706
707 static void *
708 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
709 {
710         return (void *)(uintptr_t)desc_info->desc->cookie;
711 }
712
713 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
714                                        void *ptr)
715 {
716         desc_info->desc->cookie = (uintptr_t) ptr;
717 }
718
719 static struct rocker_desc_info *
720 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
721 {
722         static struct rocker_desc_info *desc_info;
723         u32 head = __pos_inc(info->head, info->size);
724
725         desc_info = &info->desc_info[info->head];
726         if (head == info->tail)
727                 return NULL; /* ring full */
728         desc_info->tlv_size = 0;
729         return desc_info;
730 }
731
732 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
733 {
734         desc_info->desc->buf_size = desc_info->data_size;
735         desc_info->desc->tlv_size = desc_info->tlv_size;
736 }
737
738 static void rocker_desc_head_set(const struct rocker *rocker,
739                                  struct rocker_dma_ring_info *info,
740                                  const struct rocker_desc_info *desc_info)
741 {
742         u32 head = __pos_inc(info->head, info->size);
743
744         BUG_ON(head == info->tail);
745         rocker_desc_commit(desc_info);
746         info->head = head;
747         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
748 }
749
750 static struct rocker_desc_info *
751 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
752 {
753         static struct rocker_desc_info *desc_info;
754
755         if (info->tail == info->head)
756                 return NULL; /* nothing to be done between head and tail */
757         desc_info = &info->desc_info[info->tail];
758         if (!rocker_desc_gen(desc_info))
759                 return NULL; /* gen bit not set, desc is not ready yet */
760         info->tail = __pos_inc(info->tail, info->size);
761         desc_info->tlv_size = desc_info->desc->tlv_size;
762         return desc_info;
763 }
764
765 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
766                                         const struct rocker_dma_ring_info *info,
767                                         u32 credits)
768 {
769         if (credits)
770                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
771 }
772
773 static unsigned long rocker_dma_ring_size_fix(size_t size)
774 {
775         return max(ROCKER_DMA_SIZE_MIN,
776                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
777 }
778
779 static int rocker_dma_ring_create(const struct rocker *rocker,
780                                   unsigned int type,
781                                   size_t size,
782                                   struct rocker_dma_ring_info *info)
783 {
784         int i;
785
786         BUG_ON(size != rocker_dma_ring_size_fix(size));
787         info->size = size;
788         info->type = type;
789         info->head = 0;
790         info->tail = 0;
791         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
792                                   GFP_KERNEL);
793         if (!info->desc_info)
794                 return -ENOMEM;
795
796         info->desc = pci_alloc_consistent(rocker->pdev,
797                                           info->size * sizeof(*info->desc),
798                                           &info->mapaddr);
799         if (!info->desc) {
800                 kfree(info->desc_info);
801                 return -ENOMEM;
802         }
803
804         for (i = 0; i < info->size; i++)
805                 info->desc_info[i].desc = &info->desc[i];
806
807         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
808                        ROCKER_DMA_DESC_CTRL_RESET);
809         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
810         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
811
812         return 0;
813 }
814
815 static void rocker_dma_ring_destroy(const struct rocker *rocker,
816                                     const struct rocker_dma_ring_info *info)
817 {
818         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
819
820         pci_free_consistent(rocker->pdev,
821                             info->size * sizeof(struct rocker_desc),
822                             info->desc, info->mapaddr);
823         kfree(info->desc_info);
824 }
825
826 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
827                                              struct rocker_dma_ring_info *info)
828 {
829         int i;
830
831         BUG_ON(info->head || info->tail);
832
833         /* When ring is consumer, we need to advance head for each desc.
834          * That tells hw that the desc is ready to be used by it.
835          */
836         for (i = 0; i < info->size - 1; i++)
837                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
838         rocker_desc_commit(&info->desc_info[i]);
839 }
840
841 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
842                                       const struct rocker_dma_ring_info *info,
843                                       int direction, size_t buf_size)
844 {
845         struct pci_dev *pdev = rocker->pdev;
846         int i;
847         int err;
848
849         for (i = 0; i < info->size; i++) {
850                 struct rocker_desc_info *desc_info = &info->desc_info[i];
851                 struct rocker_desc *desc = &info->desc[i];
852                 dma_addr_t dma_handle;
853                 char *buf;
854
855                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
856                 if (!buf) {
857                         err = -ENOMEM;
858                         goto rollback;
859                 }
860
861                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
862                 if (pci_dma_mapping_error(pdev, dma_handle)) {
863                         kfree(buf);
864                         err = -EIO;
865                         goto rollback;
866                 }
867
868                 desc_info->data = buf;
869                 desc_info->data_size = buf_size;
870                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
871
872                 desc->buf_addr = dma_handle;
873                 desc->buf_size = buf_size;
874         }
875         return 0;
876
877 rollback:
878         for (i--; i >= 0; i--) {
879                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
880
881                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
882                                  desc_info->data_size, direction);
883                 kfree(desc_info->data);
884         }
885         return err;
886 }
887
888 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
889                                       const struct rocker_dma_ring_info *info,
890                                       int direction)
891 {
892         struct pci_dev *pdev = rocker->pdev;
893         int i;
894
895         for (i = 0; i < info->size; i++) {
896                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
897                 struct rocker_desc *desc = &info->desc[i];
898
899                 desc->buf_addr = 0;
900                 desc->buf_size = 0;
901                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
902                                  desc_info->data_size, direction);
903                 kfree(desc_info->data);
904         }
905 }
906
907 static int rocker_dma_rings_init(struct rocker *rocker)
908 {
909         const struct pci_dev *pdev = rocker->pdev;
910         int err;
911
912         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
913                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
914                                      &rocker->cmd_ring);
915         if (err) {
916                 dev_err(&pdev->dev, "failed to create command dma ring\n");
917                 return err;
918         }
919
920         spin_lock_init(&rocker->cmd_ring_lock);
921
922         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
923                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
924         if (err) {
925                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
926                 goto err_dma_cmd_ring_bufs_alloc;
927         }
928
929         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
930                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
931                                      &rocker->event_ring);
932         if (err) {
933                 dev_err(&pdev->dev, "failed to create event dma ring\n");
934                 goto err_dma_event_ring_create;
935         }
936
937         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
938                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
939         if (err) {
940                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
941                 goto err_dma_event_ring_bufs_alloc;
942         }
943         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
944         return 0;
945
946 err_dma_event_ring_bufs_alloc:
947         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
948 err_dma_event_ring_create:
949         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
950                                   PCI_DMA_BIDIRECTIONAL);
951 err_dma_cmd_ring_bufs_alloc:
952         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
953         return err;
954 }
955
956 static void rocker_dma_rings_fini(struct rocker *rocker)
957 {
958         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
959                                   PCI_DMA_BIDIRECTIONAL);
960         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
961         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
962                                   PCI_DMA_BIDIRECTIONAL);
963         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
964 }
965
966 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
967                                       struct rocker_desc_info *desc_info,
968                                       struct sk_buff *skb, size_t buf_len)
969 {
970         const struct rocker *rocker = rocker_port->rocker;
971         struct pci_dev *pdev = rocker->pdev;
972         dma_addr_t dma_handle;
973
974         dma_handle = pci_map_single(pdev, skb->data, buf_len,
975                                     PCI_DMA_FROMDEVICE);
976         if (pci_dma_mapping_error(pdev, dma_handle))
977                 return -EIO;
978         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
979                 goto tlv_put_failure;
980         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
981                 goto tlv_put_failure;
982         return 0;
983
984 tlv_put_failure:
985         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
986         desc_info->tlv_size = 0;
987         return -EMSGSIZE;
988 }
989
990 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
991 {
992         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
993 }
994
995 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
996                                         struct rocker_desc_info *desc_info)
997 {
998         struct net_device *dev = rocker_port->dev;
999         struct sk_buff *skb;
1000         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1001         int err;
1002
1003         /* Ensure that hw will see tlv_size zero in case of an error.
1004          * That tells hw to use another descriptor.
1005          */
1006         rocker_desc_cookie_ptr_set(desc_info, NULL);
1007         desc_info->tlv_size = 0;
1008
1009         skb = netdev_alloc_skb_ip_align(dev, buf_len);
1010         if (!skb)
1011                 return -ENOMEM;
1012         err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1013         if (err) {
1014                 dev_kfree_skb_any(skb);
1015                 return err;
1016         }
1017         rocker_desc_cookie_ptr_set(desc_info, skb);
1018         return 0;
1019 }
1020
1021 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1022                                          const struct rocker_tlv **attrs)
1023 {
1024         struct pci_dev *pdev = rocker->pdev;
1025         dma_addr_t dma_handle;
1026         size_t len;
1027
1028         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1029             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1030                 return;
1031         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1032         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1033         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1034 }
1035
1036 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1037                                         const struct rocker_desc_info *desc_info)
1038 {
1039         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1040         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1041
1042         if (!skb)
1043                 return;
1044         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1045         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1046         dev_kfree_skb_any(skb);
1047 }
1048
1049 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1050 {
1051         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1052         const struct rocker *rocker = rocker_port->rocker;
1053         int i;
1054         int err;
1055
1056         for (i = 0; i < rx_ring->size; i++) {
1057                 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1058                                                    &rx_ring->desc_info[i]);
1059                 if (err)
1060                         goto rollback;
1061         }
1062         return 0;
1063
1064 rollback:
1065         for (i--; i >= 0; i--)
1066                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1067         return err;
1068 }
1069
1070 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1071 {
1072         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1073         const struct rocker *rocker = rocker_port->rocker;
1074         int i;
1075
1076         for (i = 0; i < rx_ring->size; i++)
1077                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1078 }
1079
1080 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1081 {
1082         struct rocker *rocker = rocker_port->rocker;
1083         int err;
1084
1085         err = rocker_dma_ring_create(rocker,
1086                                      ROCKER_DMA_TX(rocker_port->port_number),
1087                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1088                                      &rocker_port->tx_ring);
1089         if (err) {
1090                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1091                 return err;
1092         }
1093
1094         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1095                                          PCI_DMA_TODEVICE,
1096                                          ROCKER_DMA_TX_DESC_SIZE);
1097         if (err) {
1098                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1099                 goto err_dma_tx_ring_bufs_alloc;
1100         }
1101
1102         err = rocker_dma_ring_create(rocker,
1103                                      ROCKER_DMA_RX(rocker_port->port_number),
1104                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1105                                      &rocker_port->rx_ring);
1106         if (err) {
1107                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1108                 goto err_dma_rx_ring_create;
1109         }
1110
1111         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1112                                          PCI_DMA_BIDIRECTIONAL,
1113                                          ROCKER_DMA_RX_DESC_SIZE);
1114         if (err) {
1115                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1116                 goto err_dma_rx_ring_bufs_alloc;
1117         }
1118
1119         err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1120         if (err) {
1121                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1122                 goto err_dma_rx_ring_skbs_alloc;
1123         }
1124         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1125
1126         return 0;
1127
1128 err_dma_rx_ring_skbs_alloc:
1129         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1130                                   PCI_DMA_BIDIRECTIONAL);
1131 err_dma_rx_ring_bufs_alloc:
1132         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1133 err_dma_rx_ring_create:
1134         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1135                                   PCI_DMA_TODEVICE);
1136 err_dma_tx_ring_bufs_alloc:
1137         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1138         return err;
1139 }
1140
1141 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1142 {
1143         struct rocker *rocker = rocker_port->rocker;
1144
1145         rocker_dma_rx_ring_skbs_free(rocker_port);
1146         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1147                                   PCI_DMA_BIDIRECTIONAL);
1148         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1149         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1150                                   PCI_DMA_TODEVICE);
1151         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1152 }
1153
1154 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1155                                    bool enable)
1156 {
1157         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1158
1159         if (enable)
1160                 val |= 1ULL << rocker_port->pport;
1161         else
1162                 val &= ~(1ULL << rocker_port->pport);
1163         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1164 }
1165
1166 /********************************
1167  * Interrupt handler and helpers
1168  ********************************/
1169
1170 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1171 {
1172         struct rocker *rocker = dev_id;
1173         const struct rocker_desc_info *desc_info;
1174         struct rocker_wait *wait;
1175         u32 credits = 0;
1176
1177         spin_lock(&rocker->cmd_ring_lock);
1178         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1179                 wait = rocker_desc_cookie_ptr_get(desc_info);
1180                 if (wait->nowait) {
1181                         rocker_desc_gen_clear(desc_info);
1182                         rocker_wait_destroy(NULL, wait);
1183                 } else {
1184                         rocker_wait_wake_up(wait);
1185                 }
1186                 credits++;
1187         }
1188         spin_unlock(&rocker->cmd_ring_lock);
1189         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1190
1191         return IRQ_HANDLED;
1192 }
1193
1194 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1195 {
1196         netif_carrier_on(rocker_port->dev);
1197         netdev_info(rocker_port->dev, "Link is up\n");
1198 }
1199
1200 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1201 {
1202         netif_carrier_off(rocker_port->dev);
1203         netdev_info(rocker_port->dev, "Link is down\n");
1204 }
1205
1206 static int rocker_event_link_change(const struct rocker *rocker,
1207                                     const struct rocker_tlv *info)
1208 {
1209         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1210         unsigned int port_number;
1211         bool link_up;
1212         struct rocker_port *rocker_port;
1213
1214         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1215         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1216             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1217                 return -EIO;
1218         port_number =
1219                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1220         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1221
1222         if (port_number >= rocker->port_count)
1223                 return -EINVAL;
1224
1225         rocker_port = rocker->ports[port_number];
1226         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1227                 if (link_up)
1228                         rocker_port_link_up(rocker_port);
1229                 else
1230                         rocker_port_link_down(rocker_port);
1231         }
1232
1233         return 0;
1234 }
1235
1236 static int rocker_port_fdb(struct rocker_port *rocker_port,
1237                            struct switchdev_trans *trans,
1238                            const unsigned char *addr,
1239                            __be16 vlan_id, int flags);
1240
1241 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1242                                       const struct rocker_tlv *info)
1243 {
1244         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1245         unsigned int port_number;
1246         struct rocker_port *rocker_port;
1247         const unsigned char *addr;
1248         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1249         __be16 vlan_id;
1250
1251         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1252         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1253             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1254             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1255                 return -EIO;
1256         port_number =
1257                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1258         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1259         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1260
1261         if (port_number >= rocker->port_count)
1262                 return -EINVAL;
1263
1264         rocker_port = rocker->ports[port_number];
1265
1266         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1267             rocker_port->stp_state != BR_STATE_FORWARDING)
1268                 return 0;
1269
1270         return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1271 }
1272
1273 static int rocker_event_process(const struct rocker *rocker,
1274                                 const struct rocker_desc_info *desc_info)
1275 {
1276         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1277         const struct rocker_tlv *info;
1278         u16 type;
1279
1280         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1281         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1282             !attrs[ROCKER_TLV_EVENT_INFO])
1283                 return -EIO;
1284
1285         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1286         info = attrs[ROCKER_TLV_EVENT_INFO];
1287
1288         switch (type) {
1289         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1290                 return rocker_event_link_change(rocker, info);
1291         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1292                 return rocker_event_mac_vlan_seen(rocker, info);
1293         }
1294
1295         return -EOPNOTSUPP;
1296 }
1297
1298 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1299 {
1300         struct rocker *rocker = dev_id;
1301         const struct pci_dev *pdev = rocker->pdev;
1302         const struct rocker_desc_info *desc_info;
1303         u32 credits = 0;
1304         int err;
1305
1306         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1307                 err = rocker_desc_err(desc_info);
1308                 if (err) {
1309                         dev_err(&pdev->dev, "event desc received with err %d\n",
1310                                 err);
1311                 } else {
1312                         err = rocker_event_process(rocker, desc_info);
1313                         if (err)
1314                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1315                                         err);
1316                 }
1317                 rocker_desc_gen_clear(desc_info);
1318                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1319                 credits++;
1320         }
1321         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1322
1323         return IRQ_HANDLED;
1324 }
1325
1326 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1327 {
1328         struct rocker_port *rocker_port = dev_id;
1329
1330         napi_schedule(&rocker_port->napi_tx);
1331         return IRQ_HANDLED;
1332 }
1333
1334 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1335 {
1336         struct rocker_port *rocker_port = dev_id;
1337
1338         napi_schedule(&rocker_port->napi_rx);
1339         return IRQ_HANDLED;
1340 }
1341
1342 /********************
1343  * Command interface
1344  ********************/
1345
1346 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1347                                     struct rocker_desc_info *desc_info,
1348                                     void *priv);
1349
1350 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1351                                     const struct rocker_desc_info *desc_info,
1352                                     void *priv);
1353
1354 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1355                            struct switchdev_trans *trans, int flags,
1356                            rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1357                            rocker_cmd_proc_cb_t process, void *process_priv)
1358 {
1359         struct rocker *rocker = rocker_port->rocker;
1360         struct rocker_desc_info *desc_info;
1361         struct rocker_wait *wait;
1362         bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1363         unsigned long lock_flags;
1364         int err;
1365
1366         wait = rocker_wait_create(rocker_port, trans, flags);
1367         if (!wait)
1368                 return -ENOMEM;
1369         wait->nowait = nowait;
1370
1371         spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1372
1373         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1374         if (!desc_info) {
1375                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1376                 err = -EAGAIN;
1377                 goto out;
1378         }
1379
1380         err = prepare(rocker_port, desc_info, prepare_priv);
1381         if (err) {
1382                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1383                 goto out;
1384         }
1385
1386         rocker_desc_cookie_ptr_set(desc_info, wait);
1387
1388         if (!switchdev_trans_ph_prepare(trans))
1389                 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1390
1391         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1392
1393         if (nowait)
1394                 return 0;
1395
1396         if (!switchdev_trans_ph_prepare(trans))
1397                 if (!rocker_wait_event_timeout(wait, HZ / 10))
1398                         return -EIO;
1399
1400         err = rocker_desc_err(desc_info);
1401         if (err)
1402                 return err;
1403
1404         if (process)
1405                 err = process(rocker_port, desc_info, process_priv);
1406
1407         rocker_desc_gen_clear(desc_info);
1408 out:
1409         rocker_wait_destroy(trans, wait);
1410         return err;
1411 }
1412
1413 static int
1414 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1415                                   struct rocker_desc_info *desc_info,
1416                                   void *priv)
1417 {
1418         struct rocker_tlv *cmd_info;
1419
1420         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1421                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1422                 return -EMSGSIZE;
1423         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1424         if (!cmd_info)
1425                 return -EMSGSIZE;
1426         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1427                                rocker_port->pport))
1428                 return -EMSGSIZE;
1429         rocker_tlv_nest_end(desc_info, cmd_info);
1430         return 0;
1431 }
1432
1433 static int
1434 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1435                                           const struct rocker_desc_info *desc_info,
1436                                           void *priv)
1437 {
1438         struct ethtool_cmd *ecmd = priv;
1439         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1441         u32 speed;
1442         u8 duplex;
1443         u8 autoneg;
1444
1445         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1446         if (!attrs[ROCKER_TLV_CMD_INFO])
1447                 return -EIO;
1448
1449         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1450                                 attrs[ROCKER_TLV_CMD_INFO]);
1451         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1452             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1453             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1454                 return -EIO;
1455
1456         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1457         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1458         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1459
1460         ecmd->transceiver = XCVR_INTERNAL;
1461         ecmd->supported = SUPPORTED_TP;
1462         ecmd->phy_address = 0xff;
1463         ecmd->port = PORT_TP;
1464         ethtool_cmd_speed_set(ecmd, speed);
1465         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1466         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1467
1468         return 0;
1469 }
1470
1471 static int
1472 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1473                                           const struct rocker_desc_info *desc_info,
1474                                           void *priv)
1475 {
1476         unsigned char *macaddr = priv;
1477         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1478         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1479         const struct rocker_tlv *attr;
1480
1481         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1482         if (!attrs[ROCKER_TLV_CMD_INFO])
1483                 return -EIO;
1484
1485         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1486                                 attrs[ROCKER_TLV_CMD_INFO]);
1487         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1488         if (!attr)
1489                 return -EIO;
1490
1491         if (rocker_tlv_len(attr) != ETH_ALEN)
1492                 return -EINVAL;
1493
1494         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1495         return 0;
1496 }
1497
1498 static int
1499 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1500                                        const struct rocker_desc_info *desc_info,
1501                                        void *priv)
1502 {
1503         u8 *p_mode = priv;
1504         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1505         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1506         const struct rocker_tlv *attr;
1507
1508         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1509         if (!attrs[ROCKER_TLV_CMD_INFO])
1510                 return -EIO;
1511
1512         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1513                                 attrs[ROCKER_TLV_CMD_INFO]);
1514         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1515         if (!attr)
1516                 return -EIO;
1517
1518         *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1519         return 0;
1520 }
1521
1522 struct port_name {
1523         char *buf;
1524         size_t len;
1525 };
1526
1527 static int
1528 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1529                                             const struct rocker_desc_info *desc_info,
1530                                             void *priv)
1531 {
1532         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1533         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1534         struct port_name *name = priv;
1535         const struct rocker_tlv *attr;
1536         size_t i, j, len;
1537         const char *str;
1538
1539         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1540         if (!attrs[ROCKER_TLV_CMD_INFO])
1541                 return -EIO;
1542
1543         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1544                                 attrs[ROCKER_TLV_CMD_INFO]);
1545         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1546         if (!attr)
1547                 return -EIO;
1548
1549         len = min_t(size_t, rocker_tlv_len(attr), name->len);
1550         str = rocker_tlv_data(attr);
1551
1552         /* make sure name only contains alphanumeric characters */
1553         for (i = j = 0; i < len; ++i) {
1554                 if (isalnum(str[i])) {
1555                         name->buf[j] = str[i];
1556                         j++;
1557                 }
1558         }
1559
1560         if (j == 0)
1561                 return -EIO;
1562
1563         name->buf[j] = '\0';
1564
1565         return 0;
1566 }
1567
1568 static int
1569 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1570                                           struct rocker_desc_info *desc_info,
1571                                           void *priv)
1572 {
1573         struct ethtool_cmd *ecmd = priv;
1574         struct rocker_tlv *cmd_info;
1575
1576         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1577                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1578                 return -EMSGSIZE;
1579         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1580         if (!cmd_info)
1581                 return -EMSGSIZE;
1582         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1583                                rocker_port->pport))
1584                 return -EMSGSIZE;
1585         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1586                                ethtool_cmd_speed(ecmd)))
1587                 return -EMSGSIZE;
1588         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1589                               ecmd->duplex))
1590                 return -EMSGSIZE;
1591         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1592                               ecmd->autoneg))
1593                 return -EMSGSIZE;
1594         rocker_tlv_nest_end(desc_info, cmd_info);
1595         return 0;
1596 }
1597
1598 static int
1599 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1600                                           struct rocker_desc_info *desc_info,
1601                                           void *priv)
1602 {
1603         const unsigned char *macaddr = priv;
1604         struct rocker_tlv *cmd_info;
1605
1606         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1607                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1608                 return -EMSGSIZE;
1609         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1610         if (!cmd_info)
1611                 return -EMSGSIZE;
1612         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1613                                rocker_port->pport))
1614                 return -EMSGSIZE;
1615         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1616                            ETH_ALEN, macaddr))
1617                 return -EMSGSIZE;
1618         rocker_tlv_nest_end(desc_info, cmd_info);
1619         return 0;
1620 }
1621
1622 static int
1623 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1624                                       struct rocker_desc_info *desc_info,
1625                                       void *priv)
1626 {
1627         int mtu = *(int *)priv;
1628         struct rocker_tlv *cmd_info;
1629
1630         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1631                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1632                 return -EMSGSIZE;
1633         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1634         if (!cmd_info)
1635                 return -EMSGSIZE;
1636         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1637                                rocker_port->pport))
1638                 return -EMSGSIZE;
1639         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1640                                mtu))
1641                 return -EMSGSIZE;
1642         rocker_tlv_nest_end(desc_info, cmd_info);
1643         return 0;
1644 }
1645
1646 static int
1647 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1648                                   struct rocker_desc_info *desc_info,
1649                                   void *priv)
1650 {
1651         struct rocker_tlv *cmd_info;
1652
1653         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1654                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1655                 return -EMSGSIZE;
1656         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1657         if (!cmd_info)
1658                 return -EMSGSIZE;
1659         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1660                                rocker_port->pport))
1661                 return -EMSGSIZE;
1662         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1663                               !!(rocker_port->brport_flags & BR_LEARNING)))
1664                 return -EMSGSIZE;
1665         rocker_tlv_nest_end(desc_info, cmd_info);
1666         return 0;
1667 }
1668
1669 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1670                                                 struct ethtool_cmd *ecmd)
1671 {
1672         return rocker_cmd_exec(rocker_port, NULL, 0,
1673                                rocker_cmd_get_port_settings_prep, NULL,
1674                                rocker_cmd_get_port_settings_ethtool_proc,
1675                                ecmd);
1676 }
1677
1678 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1679                                                 unsigned char *macaddr)
1680 {
1681         return rocker_cmd_exec(rocker_port, NULL, 0,
1682                                rocker_cmd_get_port_settings_prep, NULL,
1683                                rocker_cmd_get_port_settings_macaddr_proc,
1684                                macaddr);
1685 }
1686
1687 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1688                                              u8 *p_mode)
1689 {
1690         return rocker_cmd_exec(rocker_port, NULL, 0,
1691                                rocker_cmd_get_port_settings_prep, NULL,
1692                                rocker_cmd_get_port_settings_mode_proc, p_mode);
1693 }
1694
1695 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1696                                                 struct ethtool_cmd *ecmd)
1697 {
1698         return rocker_cmd_exec(rocker_port, NULL, 0,
1699                                rocker_cmd_set_port_settings_ethtool_prep,
1700                                ecmd, NULL, NULL);
1701 }
1702
1703 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1704                                                 unsigned char *macaddr)
1705 {
1706         return rocker_cmd_exec(rocker_port, NULL, 0,
1707                                rocker_cmd_set_port_settings_macaddr_prep,
1708                                macaddr, NULL, NULL);
1709 }
1710
1711 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1712                                             int mtu)
1713 {
1714         return rocker_cmd_exec(rocker_port, NULL, 0,
1715                                rocker_cmd_set_port_settings_mtu_prep,
1716                                &mtu, NULL, NULL);
1717 }
1718
1719 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1720                                     struct switchdev_trans *trans)
1721 {
1722         return rocker_cmd_exec(rocker_port, trans, 0,
1723                                rocker_cmd_set_port_learning_prep,
1724                                NULL, NULL, NULL);
1725 }
1726
1727 static int
1728 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1729                                 const struct rocker_flow_tbl_entry *entry)
1730 {
1731         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1732                                entry->key.ig_port.in_pport))
1733                 return -EMSGSIZE;
1734         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1735                                entry->key.ig_port.in_pport_mask))
1736                 return -EMSGSIZE;
1737         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1738                                entry->key.ig_port.goto_tbl))
1739                 return -EMSGSIZE;
1740
1741         return 0;
1742 }
1743
1744 static int
1745 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1746                              const struct rocker_flow_tbl_entry *entry)
1747 {
1748         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1749                                entry->key.vlan.in_pport))
1750                 return -EMSGSIZE;
1751         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1752                                 entry->key.vlan.vlan_id))
1753                 return -EMSGSIZE;
1754         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1755                                 entry->key.vlan.vlan_id_mask))
1756                 return -EMSGSIZE;
1757         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1758                                entry->key.vlan.goto_tbl))
1759                 return -EMSGSIZE;
1760         if (entry->key.vlan.untagged &&
1761             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1762                                 entry->key.vlan.new_vlan_id))
1763                 return -EMSGSIZE;
1764
1765         return 0;
1766 }
1767
1768 static int
1769 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1770                                  const struct rocker_flow_tbl_entry *entry)
1771 {
1772         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1773                                entry->key.term_mac.in_pport))
1774                 return -EMSGSIZE;
1775         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1776                                entry->key.term_mac.in_pport_mask))
1777                 return -EMSGSIZE;
1778         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1779                                 entry->key.term_mac.eth_type))
1780                 return -EMSGSIZE;
1781         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1782                            ETH_ALEN, entry->key.term_mac.eth_dst))
1783                 return -EMSGSIZE;
1784         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1785                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1786                 return -EMSGSIZE;
1787         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1788                                 entry->key.term_mac.vlan_id))
1789                 return -EMSGSIZE;
1790         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1791                                 entry->key.term_mac.vlan_id_mask))
1792                 return -EMSGSIZE;
1793         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1794                                entry->key.term_mac.goto_tbl))
1795                 return -EMSGSIZE;
1796         if (entry->key.term_mac.copy_to_cpu &&
1797             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1798                               entry->key.term_mac.copy_to_cpu))
1799                 return -EMSGSIZE;
1800
1801         return 0;
1802 }
1803
1804 static int
1805 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1806                                       const struct rocker_flow_tbl_entry *entry)
1807 {
1808         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1809                                 entry->key.ucast_routing.eth_type))
1810                 return -EMSGSIZE;
1811         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1812                                 entry->key.ucast_routing.dst4))
1813                 return -EMSGSIZE;
1814         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1815                                 entry->key.ucast_routing.dst4_mask))
1816                 return -EMSGSIZE;
1817         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1818                                entry->key.ucast_routing.goto_tbl))
1819                 return -EMSGSIZE;
1820         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1821                                entry->key.ucast_routing.group_id))
1822                 return -EMSGSIZE;
1823
1824         return 0;
1825 }
1826
1827 static int
1828 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1829                                const struct rocker_flow_tbl_entry *entry)
1830 {
1831         if (entry->key.bridge.has_eth_dst &&
1832             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1833                            ETH_ALEN, entry->key.bridge.eth_dst))
1834                 return -EMSGSIZE;
1835         if (entry->key.bridge.has_eth_dst_mask &&
1836             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1837                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
1838                 return -EMSGSIZE;
1839         if (entry->key.bridge.vlan_id &&
1840             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1841                                 entry->key.bridge.vlan_id))
1842                 return -EMSGSIZE;
1843         if (entry->key.bridge.tunnel_id &&
1844             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1845                                entry->key.bridge.tunnel_id))
1846                 return -EMSGSIZE;
1847         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1848                                entry->key.bridge.goto_tbl))
1849                 return -EMSGSIZE;
1850         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1851                                entry->key.bridge.group_id))
1852                 return -EMSGSIZE;
1853         if (entry->key.bridge.copy_to_cpu &&
1854             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1855                               entry->key.bridge.copy_to_cpu))
1856                 return -EMSGSIZE;
1857
1858         return 0;
1859 }
1860
1861 static int
1862 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1863                             const struct rocker_flow_tbl_entry *entry)
1864 {
1865         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1866                                entry->key.acl.in_pport))
1867                 return -EMSGSIZE;
1868         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1869                                entry->key.acl.in_pport_mask))
1870                 return -EMSGSIZE;
1871         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1872                            ETH_ALEN, entry->key.acl.eth_src))
1873                 return -EMSGSIZE;
1874         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1875                            ETH_ALEN, entry->key.acl.eth_src_mask))
1876                 return -EMSGSIZE;
1877         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1878                            ETH_ALEN, entry->key.acl.eth_dst))
1879                 return -EMSGSIZE;
1880         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1881                            ETH_ALEN, entry->key.acl.eth_dst_mask))
1882                 return -EMSGSIZE;
1883         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1884                                 entry->key.acl.eth_type))
1885                 return -EMSGSIZE;
1886         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1887                                 entry->key.acl.vlan_id))
1888                 return -EMSGSIZE;
1889         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1890                                 entry->key.acl.vlan_id_mask))
1891                 return -EMSGSIZE;
1892
1893         switch (ntohs(entry->key.acl.eth_type)) {
1894         case ETH_P_IP:
1895         case ETH_P_IPV6:
1896                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1897                                       entry->key.acl.ip_proto))
1898                         return -EMSGSIZE;
1899                 if (rocker_tlv_put_u8(desc_info,
1900                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1901                                       entry->key.acl.ip_proto_mask))
1902                         return -EMSGSIZE;
1903                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1904                                       entry->key.acl.ip_tos & 0x3f))
1905                         return -EMSGSIZE;
1906                 if (rocker_tlv_put_u8(desc_info,
1907                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1908                                       entry->key.acl.ip_tos_mask & 0x3f))
1909                         return -EMSGSIZE;
1910                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1911                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
1912                         return -EMSGSIZE;
1913                 if (rocker_tlv_put_u8(desc_info,
1914                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1915                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1916                         return -EMSGSIZE;
1917                 break;
1918         }
1919
1920         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1921             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1922                                entry->key.acl.group_id))
1923                 return -EMSGSIZE;
1924
1925         return 0;
1926 }
1927
1928 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
1929                                    struct rocker_desc_info *desc_info,
1930                                    void *priv)
1931 {
1932         const struct rocker_flow_tbl_entry *entry = priv;
1933         struct rocker_tlv *cmd_info;
1934         int err = 0;
1935
1936         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
1937                 return -EMSGSIZE;
1938         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1939         if (!cmd_info)
1940                 return -EMSGSIZE;
1941         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1942                                entry->key.tbl_id))
1943                 return -EMSGSIZE;
1944         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1945                                entry->key.priority))
1946                 return -EMSGSIZE;
1947         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1948                 return -EMSGSIZE;
1949         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1950                                entry->cookie))
1951                 return -EMSGSIZE;
1952
1953         switch (entry->key.tbl_id) {
1954         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1955                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1956                 break;
1957         case ROCKER_OF_DPA_TABLE_ID_VLAN:
1958                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1959                 break;
1960         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1961                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1962                 break;
1963         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1964                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1965                 break;
1966         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1967                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1968                 break;
1969         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1970                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1971                 break;
1972         default:
1973                 err = -ENOTSUPP;
1974                 break;
1975         }
1976
1977         if (err)
1978                 return err;
1979
1980         rocker_tlv_nest_end(desc_info, cmd_info);
1981
1982         return 0;
1983 }
1984
1985 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
1986                                    struct rocker_desc_info *desc_info,
1987                                    void *priv)
1988 {
1989         const struct rocker_flow_tbl_entry *entry = priv;
1990         struct rocker_tlv *cmd_info;
1991
1992         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
1993                 return -EMSGSIZE;
1994         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1995         if (!cmd_info)
1996                 return -EMSGSIZE;
1997         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1998                                entry->cookie))
1999                 return -EMSGSIZE;
2000         rocker_tlv_nest_end(desc_info, cmd_info);
2001
2002         return 0;
2003 }
2004
2005 static int
2006 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2007                                       struct rocker_group_tbl_entry *entry)
2008 {
2009         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2010                                ROCKER_GROUP_PORT_GET(entry->group_id)))
2011                 return -EMSGSIZE;
2012         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2013                               entry->l2_interface.pop_vlan))
2014                 return -EMSGSIZE;
2015
2016         return 0;
2017 }
2018
2019 static int
2020 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2021                                     const struct rocker_group_tbl_entry *entry)
2022 {
2023         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2024                                entry->l2_rewrite.group_id))
2025                 return -EMSGSIZE;
2026         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2027             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2028                            ETH_ALEN, entry->l2_rewrite.eth_src))
2029                 return -EMSGSIZE;
2030         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2031             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2032                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2033                 return -EMSGSIZE;
2034         if (entry->l2_rewrite.vlan_id &&
2035             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2036                                 entry->l2_rewrite.vlan_id))
2037                 return -EMSGSIZE;
2038
2039         return 0;
2040 }
2041
2042 static int
2043 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2044                                    const struct rocker_group_tbl_entry *entry)
2045 {
2046         int i;
2047         struct rocker_tlv *group_ids;
2048
2049         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2050                                entry->group_count))
2051                 return -EMSGSIZE;
2052
2053         group_ids = rocker_tlv_nest_start(desc_info,
2054                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2055         if (!group_ids)
2056                 return -EMSGSIZE;
2057
2058         for (i = 0; i < entry->group_count; i++)
2059                 /* Note TLV array is 1-based */
2060                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2061                         return -EMSGSIZE;
2062
2063         rocker_tlv_nest_end(desc_info, group_ids);
2064
2065         return 0;
2066 }
2067
2068 static int
2069 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2070                                     const struct rocker_group_tbl_entry *entry)
2071 {
2072         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2073             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2074                            ETH_ALEN, entry->l3_unicast.eth_src))
2075                 return -EMSGSIZE;
2076         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2077             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2078                            ETH_ALEN, entry->l3_unicast.eth_dst))
2079                 return -EMSGSIZE;
2080         if (entry->l3_unicast.vlan_id &&
2081             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2082                                 entry->l3_unicast.vlan_id))
2083                 return -EMSGSIZE;
2084         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2085                               entry->l3_unicast.ttl_check))
2086                 return -EMSGSIZE;
2087         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2088                                entry->l3_unicast.group_id))
2089                 return -EMSGSIZE;
2090
2091         return 0;
2092 }
2093
2094 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2095                                     struct rocker_desc_info *desc_info,
2096                                     void *priv)
2097 {
2098         struct rocker_group_tbl_entry *entry = priv;
2099         struct rocker_tlv *cmd_info;
2100         int err = 0;
2101
2102         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2103                 return -EMSGSIZE;
2104         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2105         if (!cmd_info)
2106                 return -EMSGSIZE;
2107
2108         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2109                                entry->group_id))
2110                 return -EMSGSIZE;
2111
2112         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2113         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2114                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2115                 break;
2116         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2117                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2118                 break;
2119         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2120         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2121                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2122                 break;
2123         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2124                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2125                 break;
2126         default:
2127                 err = -ENOTSUPP;
2128                 break;
2129         }
2130
2131         if (err)
2132                 return err;
2133
2134         rocker_tlv_nest_end(desc_info, cmd_info);
2135
2136         return 0;
2137 }
2138
2139 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2140                                     struct rocker_desc_info *desc_info,
2141                                     void *priv)
2142 {
2143         const struct rocker_group_tbl_entry *entry = priv;
2144         struct rocker_tlv *cmd_info;
2145
2146         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2147                 return -EMSGSIZE;
2148         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2149         if (!cmd_info)
2150                 return -EMSGSIZE;
2151         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2152                                entry->group_id))
2153                 return -EMSGSIZE;
2154         rocker_tlv_nest_end(desc_info, cmd_info);
2155
2156         return 0;
2157 }
2158
2159 /***************************************************
2160  * Flow, group, FDB, internal VLAN and neigh tables
2161  ***************************************************/
2162
2163 static int rocker_init_tbls(struct rocker *rocker)
2164 {
2165         hash_init(rocker->flow_tbl);
2166         spin_lock_init(&rocker->flow_tbl_lock);
2167
2168         hash_init(rocker->group_tbl);
2169         spin_lock_init(&rocker->group_tbl_lock);
2170
2171         hash_init(rocker->fdb_tbl);
2172         spin_lock_init(&rocker->fdb_tbl_lock);
2173
2174         hash_init(rocker->internal_vlan_tbl);
2175         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2176
2177         hash_init(rocker->neigh_tbl);
2178         spin_lock_init(&rocker->neigh_tbl_lock);
2179
2180         return 0;
2181 }
2182
2183 static void rocker_free_tbls(struct rocker *rocker)
2184 {
2185         unsigned long flags;
2186         struct rocker_flow_tbl_entry *flow_entry;
2187         struct rocker_group_tbl_entry *group_entry;
2188         struct rocker_fdb_tbl_entry *fdb_entry;
2189         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2190         struct rocker_neigh_tbl_entry *neigh_entry;
2191         struct hlist_node *tmp;
2192         int bkt;
2193
2194         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2195         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2196                 hash_del(&flow_entry->entry);
2197         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2198
2199         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2200         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2201                 hash_del(&group_entry->entry);
2202         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2203
2204         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2205         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2206                 hash_del(&fdb_entry->entry);
2207         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2208
2209         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2210         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2211                            tmp, internal_vlan_entry, entry)
2212                 hash_del(&internal_vlan_entry->entry);
2213         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2214
2215         spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2216         hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2217                 hash_del(&neigh_entry->entry);
2218         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2219 }
2220
2221 static struct rocker_flow_tbl_entry *
2222 rocker_flow_tbl_find(const struct rocker *rocker,
2223                      const struct rocker_flow_tbl_entry *match)
2224 {
2225         struct rocker_flow_tbl_entry *found;
2226         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2227
2228         hash_for_each_possible(rocker->flow_tbl, found,
2229                                entry, match->key_crc32) {
2230                 if (memcmp(&found->key, &match->key, key_len) == 0)
2231                         return found;
2232         }
2233
2234         return NULL;
2235 }
2236
2237 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2238                                struct switchdev_trans *trans, int flags,
2239                                struct rocker_flow_tbl_entry *match)
2240 {
2241         struct rocker *rocker = rocker_port->rocker;
2242         struct rocker_flow_tbl_entry *found;
2243         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2244         unsigned long lock_flags;
2245
2246         match->key_crc32 = crc32(~0, &match->key, key_len);
2247
2248         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2249
2250         found = rocker_flow_tbl_find(rocker, match);
2251
2252         if (found) {
2253                 match->cookie = found->cookie;
2254                 if (!switchdev_trans_ph_prepare(trans))
2255                         hash_del(&found->entry);
2256                 rocker_kfree(trans, found);
2257                 found = match;
2258                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2259         } else {
2260                 found = match;
2261                 found->cookie = rocker->flow_tbl_next_cookie++;
2262                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2263         }
2264
2265         if (!switchdev_trans_ph_prepare(trans))
2266                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2267
2268         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2269
2270         return rocker_cmd_exec(rocker_port, trans, flags,
2271                                rocker_cmd_flow_tbl_add, found, NULL, NULL);
2272 }
2273
2274 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2275                                struct switchdev_trans *trans, int flags,
2276                                struct rocker_flow_tbl_entry *match)
2277 {
2278         struct rocker *rocker = rocker_port->rocker;
2279         struct rocker_flow_tbl_entry *found;
2280         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2281         unsigned long lock_flags;
2282         int err = 0;
2283
2284         match->key_crc32 = crc32(~0, &match->key, key_len);
2285
2286         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2287
2288         found = rocker_flow_tbl_find(rocker, match);
2289
2290         if (found) {
2291                 if (!switchdev_trans_ph_prepare(trans))
2292                         hash_del(&found->entry);
2293                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2294         }
2295
2296         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2297
2298         rocker_kfree(trans, match);
2299
2300         if (found) {
2301                 err = rocker_cmd_exec(rocker_port, trans, flags,
2302                                       rocker_cmd_flow_tbl_del,
2303                                       found, NULL, NULL);
2304                 rocker_kfree(trans, found);
2305         }
2306
2307         return err;
2308 }
2309
2310 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2311                               struct switchdev_trans *trans, int flags,
2312                               struct rocker_flow_tbl_entry *entry)
2313 {
2314         if (flags & ROCKER_OP_FLAG_REMOVE)
2315                 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2316         else
2317                 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2318 }
2319
2320 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2321                                    struct switchdev_trans *trans, int flags,
2322                                    u32 in_pport, u32 in_pport_mask,
2323                                    enum rocker_of_dpa_table_id goto_tbl)
2324 {
2325         struct rocker_flow_tbl_entry *entry;
2326
2327         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2328         if (!entry)
2329                 return -ENOMEM;
2330
2331         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2332         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2333         entry->key.ig_port.in_pport = in_pport;
2334         entry->key.ig_port.in_pport_mask = in_pport_mask;
2335         entry->key.ig_port.goto_tbl = goto_tbl;
2336
2337         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2338 }
2339
2340 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2341                                 struct switchdev_trans *trans, int flags,
2342                                 u32 in_pport, __be16 vlan_id,
2343                                 __be16 vlan_id_mask,
2344                                 enum rocker_of_dpa_table_id goto_tbl,
2345                                 bool untagged, __be16 new_vlan_id)
2346 {
2347         struct rocker_flow_tbl_entry *entry;
2348
2349         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2350         if (!entry)
2351                 return -ENOMEM;
2352
2353         entry->key.priority = ROCKER_PRIORITY_VLAN;
2354         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2355         entry->key.vlan.in_pport = in_pport;
2356         entry->key.vlan.vlan_id = vlan_id;
2357         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2358         entry->key.vlan.goto_tbl = goto_tbl;
2359
2360         entry->key.vlan.untagged = untagged;
2361         entry->key.vlan.new_vlan_id = new_vlan_id;
2362
2363         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2364 }
2365
2366 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2367                                     struct switchdev_trans *trans,
2368                                     u32 in_pport, u32 in_pport_mask,
2369                                     __be16 eth_type, const u8 *eth_dst,
2370                                     const u8 *eth_dst_mask, __be16 vlan_id,
2371                                     __be16 vlan_id_mask, bool copy_to_cpu,
2372                                     int flags)
2373 {
2374         struct rocker_flow_tbl_entry *entry;
2375
2376         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2377         if (!entry)
2378                 return -ENOMEM;
2379
2380         if (is_multicast_ether_addr(eth_dst)) {
2381                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2382                 entry->key.term_mac.goto_tbl =
2383                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2384         } else {
2385                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2386                 entry->key.term_mac.goto_tbl =
2387                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2388         }
2389
2390         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2391         entry->key.term_mac.in_pport = in_pport;
2392         entry->key.term_mac.in_pport_mask = in_pport_mask;
2393         entry->key.term_mac.eth_type = eth_type;
2394         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2395         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2396         entry->key.term_mac.vlan_id = vlan_id;
2397         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2398         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2399
2400         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2401 }
2402
2403 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2404                                   struct switchdev_trans *trans, int flags,
2405                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2406                                   __be16 vlan_id, u32 tunnel_id,
2407                                   enum rocker_of_dpa_table_id goto_tbl,
2408                                   u32 group_id, bool copy_to_cpu)
2409 {
2410         struct rocker_flow_tbl_entry *entry;
2411         u32 priority;
2412         bool vlan_bridging = !!vlan_id;
2413         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2414         bool wild = false;
2415
2416         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2417         if (!entry)
2418                 return -ENOMEM;
2419
2420         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2421
2422         if (eth_dst) {
2423                 entry->key.bridge.has_eth_dst = 1;
2424                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2425         }
2426         if (eth_dst_mask) {
2427                 entry->key.bridge.has_eth_dst_mask = 1;
2428                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2429                 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2430                         wild = true;
2431         }
2432
2433         priority = ROCKER_PRIORITY_UNKNOWN;
2434         if (vlan_bridging && dflt && wild)
2435                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2436         else if (vlan_bridging && dflt && !wild)
2437                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2438         else if (vlan_bridging && !dflt)
2439                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2440         else if (!vlan_bridging && dflt && wild)
2441                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2442         else if (!vlan_bridging && dflt && !wild)
2443                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2444         else if (!vlan_bridging && !dflt)
2445                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2446
2447         entry->key.priority = priority;
2448         entry->key.bridge.vlan_id = vlan_id;
2449         entry->key.bridge.tunnel_id = tunnel_id;
2450         entry->key.bridge.goto_tbl = goto_tbl;
2451         entry->key.bridge.group_id = group_id;
2452         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2453
2454         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2455 }
2456
2457 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2458                                           struct switchdev_trans *trans,
2459                                           __be16 eth_type, __be32 dst,
2460                                           __be32 dst_mask, u32 priority,
2461                                           enum rocker_of_dpa_table_id goto_tbl,
2462                                           u32 group_id, int flags)
2463 {
2464         struct rocker_flow_tbl_entry *entry;
2465
2466         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2467         if (!entry)
2468                 return -ENOMEM;
2469
2470         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2471         entry->key.priority = priority;
2472         entry->key.ucast_routing.eth_type = eth_type;
2473         entry->key.ucast_routing.dst4 = dst;
2474         entry->key.ucast_routing.dst4_mask = dst_mask;
2475         entry->key.ucast_routing.goto_tbl = goto_tbl;
2476         entry->key.ucast_routing.group_id = group_id;
2477         entry->key_len = offsetof(struct rocker_flow_tbl_key,
2478                                   ucast_routing.group_id);
2479
2480         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2481 }
2482
2483 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2484                                struct switchdev_trans *trans, int flags,
2485                                u32 in_pport, u32 in_pport_mask,
2486                                const u8 *eth_src, const u8 *eth_src_mask,
2487                                const u8 *eth_dst, const u8 *eth_dst_mask,
2488                                __be16 eth_type, __be16 vlan_id,
2489                                __be16 vlan_id_mask, u8 ip_proto,
2490                                u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2491                                u32 group_id)
2492 {
2493         u32 priority;
2494         struct rocker_flow_tbl_entry *entry;
2495
2496         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2497         if (!entry)
2498                 return -ENOMEM;
2499
2500         priority = ROCKER_PRIORITY_ACL_NORMAL;
2501         if (eth_dst && eth_dst_mask) {
2502                 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2503                         priority = ROCKER_PRIORITY_ACL_DFLT;
2504                 else if (is_link_local_ether_addr(eth_dst))
2505                         priority = ROCKER_PRIORITY_ACL_CTRL;
2506         }
2507
2508         entry->key.priority = priority;
2509         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2510         entry->key.acl.in_pport = in_pport;
2511         entry->key.acl.in_pport_mask = in_pport_mask;
2512
2513         if (eth_src)
2514                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2515         if (eth_src_mask)
2516                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2517         if (eth_dst)
2518                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2519         if (eth_dst_mask)
2520                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2521
2522         entry->key.acl.eth_type = eth_type;
2523         entry->key.acl.vlan_id = vlan_id;
2524         entry->key.acl.vlan_id_mask = vlan_id_mask;
2525         entry->key.acl.ip_proto = ip_proto;
2526         entry->key.acl.ip_proto_mask = ip_proto_mask;
2527         entry->key.acl.ip_tos = ip_tos;
2528         entry->key.acl.ip_tos_mask = ip_tos_mask;
2529         entry->key.acl.group_id = group_id;
2530
2531         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2532 }
2533
2534 static struct rocker_group_tbl_entry *
2535 rocker_group_tbl_find(const struct rocker *rocker,
2536                       const struct rocker_group_tbl_entry *match)
2537 {
2538         struct rocker_group_tbl_entry *found;
2539
2540         hash_for_each_possible(rocker->group_tbl, found,
2541                                entry, match->group_id) {
2542                 if (found->group_id == match->group_id)
2543                         return found;
2544         }
2545
2546         return NULL;
2547 }
2548
2549 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2550                                         struct rocker_group_tbl_entry *entry)
2551 {
2552         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2553         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2554         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2555                 rocker_kfree(trans, entry->group_ids);
2556                 break;
2557         default:
2558                 break;
2559         }
2560         rocker_kfree(trans, entry);
2561 }
2562
2563 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2564                                 struct switchdev_trans *trans, int flags,
2565                                 struct rocker_group_tbl_entry *match)
2566 {
2567         struct rocker *rocker = rocker_port->rocker;
2568         struct rocker_group_tbl_entry *found;
2569         unsigned long lock_flags;
2570
2571         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2572
2573         found = rocker_group_tbl_find(rocker, match);
2574
2575         if (found) {
2576                 if (!switchdev_trans_ph_prepare(trans))
2577                         hash_del(&found->entry);
2578                 rocker_group_tbl_entry_free(trans, found);
2579                 found = match;
2580                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2581         } else {
2582                 found = match;
2583                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2584         }
2585
2586         if (!switchdev_trans_ph_prepare(trans))
2587                 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2588
2589         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2590
2591         return rocker_cmd_exec(rocker_port, trans, flags,
2592                                rocker_cmd_group_tbl_add, found, NULL, NULL);
2593 }
2594
2595 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2596                                 struct switchdev_trans *trans, int flags,
2597                                 struct rocker_group_tbl_entry *match)
2598 {
2599         struct rocker *rocker = rocker_port->rocker;
2600         struct rocker_group_tbl_entry *found;
2601         unsigned long lock_flags;
2602         int err = 0;
2603
2604         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2605
2606         found = rocker_group_tbl_find(rocker, match);
2607
2608         if (found) {
2609                 if (!switchdev_trans_ph_prepare(trans))
2610                         hash_del(&found->entry);
2611                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2612         }
2613
2614         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2615
2616         rocker_group_tbl_entry_free(trans, match);
2617
2618         if (found) {
2619                 err = rocker_cmd_exec(rocker_port, trans, flags,
2620                                       rocker_cmd_group_tbl_del,
2621                                       found, NULL, NULL);
2622                 rocker_group_tbl_entry_free(trans, found);
2623         }
2624
2625         return err;
2626 }
2627
2628 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2629                                struct switchdev_trans *trans, int flags,
2630                                struct rocker_group_tbl_entry *entry)
2631 {
2632         if (flags & ROCKER_OP_FLAG_REMOVE)
2633                 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2634         else
2635                 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2636 }
2637
2638 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2639                                      struct switchdev_trans *trans, int flags,
2640                                      __be16 vlan_id, u32 out_pport,
2641                                      int pop_vlan)
2642 {
2643         struct rocker_group_tbl_entry *entry;
2644
2645         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2646         if (!entry)
2647                 return -ENOMEM;
2648
2649         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2650         entry->l2_interface.pop_vlan = pop_vlan;
2651
2652         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2653 }
2654
2655 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2656                                    struct switchdev_trans *trans,
2657                                    int flags, u8 group_count,
2658                                    const u32 *group_ids, u32 group_id)
2659 {
2660         struct rocker_group_tbl_entry *entry;
2661
2662         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2663         if (!entry)
2664                 return -ENOMEM;
2665
2666         entry->group_id = group_id;
2667         entry->group_count = group_count;
2668
2669         entry->group_ids = rocker_kcalloc(trans, flags,
2670                                           group_count, sizeof(u32));
2671         if (!entry->group_ids) {
2672                 rocker_kfree(trans, entry);
2673                 return -ENOMEM;
2674         }
2675         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2676
2677         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2678 }
2679
2680 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2681                                  struct switchdev_trans *trans, int flags,
2682                                  __be16 vlan_id, u8 group_count,
2683                                  const u32 *group_ids, u32 group_id)
2684 {
2685         return rocker_group_l2_fan_out(rocker_port, trans, flags,
2686                                        group_count, group_ids,
2687                                        group_id);
2688 }
2689
2690 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2691                                    struct switchdev_trans *trans, int flags,
2692                                    u32 index, const u8 *src_mac, const u8 *dst_mac,
2693                                    __be16 vlan_id, bool ttl_check, u32 pport)
2694 {
2695         struct rocker_group_tbl_entry *entry;
2696
2697         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2698         if (!entry)
2699                 return -ENOMEM;
2700
2701         entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2702         if (src_mac)
2703                 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2704         if (dst_mac)
2705                 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2706         entry->l3_unicast.vlan_id = vlan_id;
2707         entry->l3_unicast.ttl_check = ttl_check;
2708         entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2709
2710         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2711 }
2712
2713 static struct rocker_neigh_tbl_entry *
2714 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2715 {
2716         struct rocker_neigh_tbl_entry *found;
2717
2718         hash_for_each_possible(rocker->neigh_tbl, found,
2719                                entry, be32_to_cpu(ip_addr))
2720                 if (found->ip_addr == ip_addr)
2721                         return found;
2722
2723         return NULL;
2724 }
2725
2726 static void _rocker_neigh_add(struct rocker *rocker,
2727                               struct switchdev_trans *trans,
2728                               struct rocker_neigh_tbl_entry *entry)
2729 {
2730         if (!switchdev_trans_ph_commit(trans))
2731                 entry->index = rocker->neigh_tbl_next_index++;
2732         if (switchdev_trans_ph_prepare(trans))
2733                 return;
2734         entry->ref_count++;
2735         hash_add(rocker->neigh_tbl, &entry->entry,
2736                  be32_to_cpu(entry->ip_addr));
2737 }
2738
2739 static void _rocker_neigh_del(struct switchdev_trans *trans,
2740                               struct rocker_neigh_tbl_entry *entry)
2741 {
2742         if (switchdev_trans_ph_prepare(trans))
2743                 return;
2744         if (--entry->ref_count == 0) {
2745                 hash_del(&entry->entry);
2746                 rocker_kfree(trans, entry);
2747         }
2748 }
2749
2750 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2751                                  struct switchdev_trans *trans,
2752                                  const u8 *eth_dst, bool ttl_check)
2753 {
2754         if (eth_dst) {
2755                 ether_addr_copy(entry->eth_dst, eth_dst);
2756                 entry->ttl_check = ttl_check;
2757         } else if (!switchdev_trans_ph_prepare(trans)) {
2758                 entry->ref_count++;
2759         }
2760 }
2761
2762 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2763                                   struct switchdev_trans *trans,
2764                                   int flags, __be32 ip_addr, const u8 *eth_dst)
2765 {
2766         struct rocker *rocker = rocker_port->rocker;
2767         struct rocker_neigh_tbl_entry *entry;
2768         struct rocker_neigh_tbl_entry *found;
2769         unsigned long lock_flags;
2770         __be16 eth_type = htons(ETH_P_IP);
2771         enum rocker_of_dpa_table_id goto_tbl =
2772                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2773         u32 group_id;
2774         u32 priority = 0;
2775         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2776         bool updating;
2777         bool removing;
2778         int err = 0;
2779
2780         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2781         if (!entry)
2782                 return -ENOMEM;
2783
2784         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2785
2786         found = rocker_neigh_tbl_find(rocker, ip_addr);
2787
2788         updating = found && adding;
2789         removing = found && !adding;
2790         adding = !found && adding;
2791
2792         if (adding) {
2793                 entry->ip_addr = ip_addr;
2794                 entry->dev = rocker_port->dev;
2795                 ether_addr_copy(entry->eth_dst, eth_dst);
2796                 entry->ttl_check = true;
2797                 _rocker_neigh_add(rocker, trans, entry);
2798         } else if (removing) {
2799                 memcpy(entry, found, sizeof(*entry));
2800                 _rocker_neigh_del(trans, found);
2801         } else if (updating) {
2802                 _rocker_neigh_update(found, trans, eth_dst, true);
2803                 memcpy(entry, found, sizeof(*entry));
2804         } else {
2805                 err = -ENOENT;
2806         }
2807
2808         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2809
2810         if (err)
2811                 goto err_out;
2812
2813         /* For each active neighbor, we have an L3 unicast group and
2814          * a /32 route to the neighbor, which uses the L3 unicast
2815          * group.  The L3 unicast group can also be referred to by
2816          * other routes' nexthops.
2817          */
2818
2819         err = rocker_group_l3_unicast(rocker_port, trans, flags,
2820                                       entry->index,
2821                                       rocker_port->dev->dev_addr,
2822                                       entry->eth_dst,
2823                                       rocker_port->internal_vlan_id,
2824                                       entry->ttl_check,
2825                                       rocker_port->pport);
2826         if (err) {
2827                 netdev_err(rocker_port->dev,
2828                            "Error (%d) L3 unicast group index %d\n",
2829                            err, entry->index);
2830                 goto err_out;
2831         }
2832
2833         if (adding || removing) {
2834                 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
2835                 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
2836                                                      eth_type, ip_addr,
2837                                                      inet_make_mask(32),
2838                                                      priority, goto_tbl,
2839                                                      group_id, flags);
2840
2841                 if (err)
2842                         netdev_err(rocker_port->dev,
2843                                    "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
2844                                    err, &entry->ip_addr, group_id);
2845         }
2846
2847 err_out:
2848         if (!adding)
2849                 rocker_kfree(trans, entry);
2850
2851         return err;
2852 }
2853
2854 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2855                                     struct switchdev_trans *trans,
2856                                     __be32 ip_addr)
2857 {
2858         struct net_device *dev = rocker_port->dev;
2859         struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
2860         int err = 0;
2861
2862         if (!n) {
2863                 n = neigh_create(&arp_tbl, &ip_addr, dev);
2864                 if (IS_ERR(n))
2865                         return IS_ERR(n);
2866         }
2867
2868         /* If the neigh is already resolved, then go ahead and
2869          * install the entry, otherwise start the ARP process to
2870          * resolve the neigh.
2871          */
2872
2873         if (n->nud_state & NUD_VALID)
2874                 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
2875                                              ip_addr, n->ha);
2876         else
2877                 neigh_event_send(n, NULL);
2878
2879         neigh_release(n);
2880         return err;
2881 }
2882
2883 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
2884                                struct switchdev_trans *trans, int flags,
2885                                __be32 ip_addr, u32 *index)
2886 {
2887         struct rocker *rocker = rocker_port->rocker;
2888         struct rocker_neigh_tbl_entry *entry;
2889         struct rocker_neigh_tbl_entry *found;
2890         unsigned long lock_flags;
2891         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2892         bool updating;
2893         bool removing;
2894         bool resolved = true;
2895         int err = 0;
2896
2897         entry = rocker_kzalloc(trans, flags, sizeof(*entry));
2898         if (!entry)
2899                 return -ENOMEM;
2900
2901         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2902
2903         found = rocker_neigh_tbl_find(rocker, ip_addr);
2904         if (found)
2905                 *index = found->index;
2906
2907         updating = found && adding;
2908         removing = found && !adding;
2909         adding = !found && adding;
2910
2911         if (adding) {
2912                 entry->ip_addr = ip_addr;
2913                 entry->dev = rocker_port->dev;
2914                 _rocker_neigh_add(rocker, trans, entry);
2915                 *index = entry->index;
2916                 resolved = false;
2917         } else if (removing) {
2918                 _rocker_neigh_del(trans, found);
2919         } else if (updating) {
2920                 _rocker_neigh_update(found, trans, NULL, false);
2921                 resolved = !is_zero_ether_addr(found->eth_dst);
2922         } else {
2923                 err = -ENOENT;
2924         }
2925
2926         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2927
2928         if (!adding)
2929                 rocker_kfree(trans, entry);
2930
2931         if (err)
2932                 return err;
2933
2934         /* Resolved means neigh ip_addr is resolved to neigh mac. */
2935
2936         if (!resolved)
2937                 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
2938
2939         return err;
2940 }
2941
2942 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2943                                         struct switchdev_trans *trans,
2944                                         int flags, __be16 vlan_id)
2945 {
2946         struct rocker_port *p;
2947         const struct rocker *rocker = rocker_port->rocker;
2948         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2949         u32 *group_ids;
2950         u8 group_count = 0;
2951         int err = 0;
2952         int i;
2953
2954         group_ids = rocker_kcalloc(trans, flags,
2955                                    rocker->port_count, sizeof(u32));
2956         if (!group_ids)
2957                 return -ENOMEM;
2958
2959         /* Adjust the flood group for this VLAN.  The flood group
2960          * references an L2 interface group for each port in this
2961          * VLAN.
2962          */
2963
2964         for (i = 0; i < rocker->port_count; i++) {
2965                 p = rocker->ports[i];
2966                 if (!p)
2967                         continue;
2968                 if (!rocker_port_is_bridged(p))
2969                         continue;
2970                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2971                         group_ids[group_count++] =
2972                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
2973                 }
2974         }
2975
2976         /* If there are no bridged ports in this VLAN, we're done */
2977         if (group_count == 0)
2978                 goto no_ports_in_vlan;
2979
2980         err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
2981                                     group_count, group_ids, group_id);
2982         if (err)
2983                 netdev_err(rocker_port->dev,
2984                            "Error (%d) port VLAN l2 flood group\n", err);
2985
2986 no_ports_in_vlan:
2987         rocker_kfree(trans, group_ids);
2988         return err;
2989 }
2990
2991 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2992                                       struct switchdev_trans *trans, int flags,
2993                                       __be16 vlan_id, bool pop_vlan)
2994 {
2995         const struct rocker *rocker = rocker_port->rocker;
2996         struct rocker_port *p;
2997         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2998         u32 out_pport;
2999         int ref = 0;
3000         int err;
3001         int i;
3002
3003         /* An L2 interface group for this port in this VLAN, but
3004          * only when port STP state is LEARNING|FORWARDING.
3005          */
3006
3007         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3008             rocker_port->stp_state == BR_STATE_FORWARDING) {
3009                 out_pport = rocker_port->pport;
3010                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3011                                                 vlan_id, out_pport, pop_vlan);
3012                 if (err) {
3013                         netdev_err(rocker_port->dev,
3014                                    "Error (%d) port VLAN l2 group for pport %d\n",
3015                                    err, out_pport);
3016                         return err;
3017                 }
3018         }
3019
3020         /* An L2 interface group for this VLAN to CPU port.
3021          * Add when first port joins this VLAN and destroy when
3022          * last port leaves this VLAN.
3023          */
3024
3025         for (i = 0; i < rocker->port_count; i++) {
3026                 p = rocker->ports[i];
3027                 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3028                         ref++;
3029         }
3030
3031         if ((!adding || ref != 1) && (adding || ref != 0))
3032                 return 0;
3033
3034         out_pport = 0;
3035         err = rocker_group_l2_interface(rocker_port, trans, flags,
3036                                         vlan_id, out_pport, pop_vlan);
3037         if (err) {
3038                 netdev_err(rocker_port->dev,
3039                            "Error (%d) port VLAN l2 group for CPU port\n", err);
3040                 return err;
3041         }
3042
3043         return 0;
3044 }
3045
3046 static struct rocker_ctrl {
3047         const u8 *eth_dst;
3048         const u8 *eth_dst_mask;
3049         __be16 eth_type;
3050         bool acl;
3051         bool bridge;
3052         bool term;
3053         bool copy_to_cpu;
3054 } rocker_ctrls[] = {
3055         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3056                 /* pass link local multicast pkts up to CPU for filtering */
3057                 .eth_dst = ll_mac,
3058                 .eth_dst_mask = ll_mask,
3059                 .acl = true,
3060         },
3061         [ROCKER_CTRL_LOCAL_ARP] = {
3062                 /* pass local ARP pkts up to CPU */
3063                 .eth_dst = zero_mac,
3064                 .eth_dst_mask = zero_mac,
3065                 .eth_type = htons(ETH_P_ARP),
3066                 .acl = true,
3067         },
3068         [ROCKER_CTRL_IPV4_MCAST] = {
3069                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3070                 .eth_dst = ipv4_mcast,
3071                 .eth_dst_mask = ipv4_mask,
3072                 .eth_type = htons(ETH_P_IP),
3073                 .term  = true,
3074                 .copy_to_cpu = true,
3075         },
3076         [ROCKER_CTRL_IPV6_MCAST] = {
3077                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3078                 .eth_dst = ipv6_mcast,
3079                 .eth_dst_mask = ipv6_mask,
3080                 .eth_type = htons(ETH_P_IPV6),
3081                 .term  = true,
3082                 .copy_to_cpu = true,
3083         },
3084         [ROCKER_CTRL_DFLT_BRIDGING] = {
3085                 /* flood any pkts on vlan */
3086                 .bridge = true,
3087                 .copy_to_cpu = true,
3088         },
3089         [ROCKER_CTRL_DFLT_OVS] = {
3090                 /* pass all pkts up to CPU */
3091                 .eth_dst = zero_mac,
3092                 .eth_dst_mask = zero_mac,
3093                 .acl = true,
3094         },
3095 };
3096
3097 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3098                                      struct switchdev_trans *trans, int flags,
3099                                      const struct rocker_ctrl *ctrl, __be16 vlan_id)
3100 {
3101         u32 in_pport = rocker_port->pport;
3102         u32 in_pport_mask = 0xffffffff;
3103         u32 out_pport = 0;
3104         const u8 *eth_src = NULL;
3105         const u8 *eth_src_mask = NULL;
3106         __be16 vlan_id_mask = htons(0xffff);
3107         u8 ip_proto = 0;
3108         u8 ip_proto_mask = 0;
3109         u8 ip_tos = 0;
3110         u8 ip_tos_mask = 0;
3111         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3112         int err;
3113
3114         err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3115                                   in_pport, in_pport_mask,
3116                                   eth_src, eth_src_mask,
3117                                   ctrl->eth_dst, ctrl->eth_dst_mask,
3118                                   ctrl->eth_type,
3119                                   vlan_id, vlan_id_mask,
3120                                   ip_proto, ip_proto_mask,
3121                                   ip_tos, ip_tos_mask,
3122                                   group_id);
3123
3124         if (err)
3125                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3126
3127         return err;
3128 }
3129
3130 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3131                                         struct switchdev_trans *trans,
3132                                         int flags,
3133                                         const struct rocker_ctrl *ctrl,
3134                                         __be16 vlan_id)
3135 {
3136         enum rocker_of_dpa_table_id goto_tbl =
3137                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3138         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3139         u32 tunnel_id = 0;
3140         int err;
3141
3142         if (!rocker_port_is_bridged(rocker_port))
3143                 return 0;
3144
3145         err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3146                                      ctrl->eth_dst, ctrl->eth_dst_mask,
3147                                      vlan_id, tunnel_id,
3148                                      goto_tbl, group_id, ctrl->copy_to_cpu);
3149
3150         if (err)
3151                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3152
3153         return err;
3154 }
3155
3156 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3157                                       struct switchdev_trans *trans, int flags,
3158                                       const struct rocker_ctrl *ctrl, __be16 vlan_id)
3159 {
3160         u32 in_pport_mask = 0xffffffff;
3161         __be16 vlan_id_mask = htons(0xffff);
3162         int err;
3163
3164         if (ntohs(vlan_id) == 0)
3165                 vlan_id = rocker_port->internal_vlan_id;
3166
3167         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3168                                        rocker_port->pport, in_pport_mask,
3169                                        ctrl->eth_type, ctrl->eth_dst,
3170                                        ctrl->eth_dst_mask, vlan_id,
3171                                        vlan_id_mask, ctrl->copy_to_cpu,
3172                                        flags);
3173
3174         if (err)
3175                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3176
3177         return err;
3178 }
3179
3180 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3181                                  struct switchdev_trans *trans, int flags,
3182                                  const struct rocker_ctrl *ctrl, __be16 vlan_id)
3183 {
3184         if (ctrl->acl)
3185                 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3186                                                  ctrl, vlan_id);
3187         if (ctrl->bridge)
3188                 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3189                                                     ctrl, vlan_id);
3190
3191         if (ctrl->term)
3192                 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3193                                                   ctrl, vlan_id);
3194
3195         return -EOPNOTSUPP;
3196 }
3197
3198 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3199                                      struct switchdev_trans *trans, int flags,
3200                                      __be16 vlan_id)
3201 {
3202         int err = 0;
3203         int i;
3204
3205         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3206                 if (rocker_port->ctrls[i]) {
3207                         err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3208                                                     &rocker_ctrls[i], vlan_id);
3209                         if (err)
3210                                 return err;
3211                 }
3212         }
3213
3214         return err;
3215 }
3216
3217 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3218                             struct switchdev_trans *trans, int flags,
3219                             const struct rocker_ctrl *ctrl)
3220 {
3221         u16 vid;
3222         int err = 0;
3223
3224         for (vid = 1; vid < VLAN_N_VID; vid++) {
3225                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3226                         continue;
3227                 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3228                                             ctrl, htons(vid));
3229                 if (err)
3230                         break;
3231         }
3232
3233         return err;
3234 }
3235
3236 static int rocker_port_vlan(struct rocker_port *rocker_port,
3237                             struct switchdev_trans *trans, int flags, u16 vid)
3238 {
3239         enum rocker_of_dpa_table_id goto_tbl =
3240                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3241         u32 in_pport = rocker_port->pport;
3242         __be16 vlan_id = htons(vid);
3243         __be16 vlan_id_mask = htons(0xffff);
3244         __be16 internal_vlan_id;
3245         bool untagged;
3246         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3247         int err;
3248
3249         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3250
3251         if (adding && test_bit(ntohs(internal_vlan_id),
3252                                rocker_port->vlan_bitmap))
3253                 return 0; /* already added */
3254         else if (!adding && !test_bit(ntohs(internal_vlan_id),
3255                                       rocker_port->vlan_bitmap))
3256                 return 0; /* already removed */
3257
3258         change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3259
3260         if (adding) {
3261                 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3262                                                 internal_vlan_id);
3263                 if (err) {
3264                         netdev_err(rocker_port->dev,
3265                                    "Error (%d) port ctrl vlan add\n", err);
3266                         goto err_out;
3267                 }
3268         }
3269
3270         err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3271                                          internal_vlan_id, untagged);
3272         if (err) {
3273                 netdev_err(rocker_port->dev,
3274                            "Error (%d) port VLAN l2 groups\n", err);
3275                 goto err_out;
3276         }
3277
3278         err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3279                                            internal_vlan_id);
3280         if (err) {
3281                 netdev_err(rocker_port->dev,
3282                            "Error (%d) port VLAN l2 flood group\n", err);
3283                 goto err_out;
3284         }
3285
3286         err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3287                                    in_pport, vlan_id, vlan_id_mask,
3288                                    goto_tbl, untagged, internal_vlan_id);
3289         if (err)
3290                 netdev_err(rocker_port->dev,
3291                            "Error (%d) port VLAN table\n", err);
3292
3293 err_out:
3294         if (switchdev_trans_ph_prepare(trans))
3295                 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3296
3297         return err;
3298 }
3299
3300 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3301                               struct switchdev_trans *trans, int flags)
3302 {
3303         enum rocker_of_dpa_table_id goto_tbl;
3304         u32 in_pport;
3305         u32 in_pport_mask;
3306         int err;
3307
3308         /* Normal Ethernet Frames.  Matches pkts from any local physical
3309          * ports.  Goto VLAN tbl.
3310          */
3311
3312         in_pport = 0;
3313         in_pport_mask = 0xffff0000;
3314         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3315
3316         err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3317                                       in_pport, in_pport_mask,
3318                                       goto_tbl);
3319         if (err)
3320                 netdev_err(rocker_port->dev,
3321                            "Error (%d) ingress port table entry\n", err);
3322
3323         return err;
3324 }
3325
3326 struct rocker_fdb_learn_work {
3327         struct work_struct work;
3328         struct rocker_port *rocker_port;
3329         struct switchdev_trans *trans;
3330         int flags;
3331         u8 addr[ETH_ALEN];
3332         u16 vid;
3333 };
3334
3335 static void rocker_port_fdb_learn_work(struct work_struct *work)
3336 {
3337         const struct rocker_fdb_learn_work *lw =
3338                 container_of(work, struct rocker_fdb_learn_work, work);
3339         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3340         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3341         struct switchdev_notifier_fdb_info info;
3342
3343         info.addr = lw->addr;
3344         info.vid = lw->vid;
3345
3346         rtnl_lock();
3347         if (learned && removing)
3348                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3349                                          lw->rocker_port->dev, &info.info);
3350         else if (learned && !removing)
3351                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3352                                          lw->rocker_port->dev, &info.info);
3353         rtnl_unlock();
3354
3355         rocker_kfree(lw->trans, work);
3356 }
3357
3358 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3359                                  struct switchdev_trans *trans, int flags,
3360                                  const u8 *addr, __be16 vlan_id)
3361 {
3362         struct rocker_fdb_learn_work *lw;
3363         enum rocker_of_dpa_table_id goto_tbl =
3364                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3365         u32 out_pport = rocker_port->pport;
3366         u32 tunnel_id = 0;
3367         u32 group_id = ROCKER_GROUP_NONE;
3368         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3369         bool copy_to_cpu = false;
3370         int err;
3371
3372         if (rocker_port_is_bridged(rocker_port))
3373                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3374
3375         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3376                 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3377                                              NULL, vlan_id, tunnel_id, goto_tbl,
3378                                              group_id, copy_to_cpu);
3379                 if (err)
3380                         return err;
3381         }
3382
3383         if (!syncing)
3384                 return 0;
3385
3386         if (!rocker_port_is_bridged(rocker_port))
3387                 return 0;
3388
3389         lw = rocker_kzalloc(trans, flags, sizeof(*lw));
3390         if (!lw)
3391                 return -ENOMEM;
3392
3393         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3394
3395         lw->rocker_port = rocker_port;
3396         lw->trans = trans;
3397         lw->flags = flags;
3398         ether_addr_copy(lw->addr, addr);
3399         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3400
3401         if (switchdev_trans_ph_prepare(trans))
3402                 rocker_kfree(trans, lw);
3403         else
3404                 schedule_work(&lw->work);
3405
3406         return 0;
3407 }
3408
3409 static struct rocker_fdb_tbl_entry *
3410 rocker_fdb_tbl_find(const struct rocker *rocker,
3411                     const struct rocker_fdb_tbl_entry *match)
3412 {
3413         struct rocker_fdb_tbl_entry *found;
3414
3415         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3416                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3417                         return found;
3418
3419         return NULL;
3420 }
3421
3422 static int rocker_port_fdb(struct rocker_port *rocker_port,
3423                            struct switchdev_trans *trans,
3424                            const unsigned char *addr,
3425                            __be16 vlan_id, int flags)
3426 {
3427         struct rocker *rocker = rocker_port->rocker;
3428         struct rocker_fdb_tbl_entry *fdb;
3429         struct rocker_fdb_tbl_entry *found;
3430         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3431         unsigned long lock_flags;
3432
3433         fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
3434         if (!fdb)
3435                 return -ENOMEM;
3436
3437         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3438         fdb->touched = jiffies;
3439         fdb->key.rocker_port = rocker_port;
3440         ether_addr_copy(fdb->key.addr, addr);
3441         fdb->key.vlan_id = vlan_id;
3442         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3443
3444         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3445
3446         found = rocker_fdb_tbl_find(rocker, fdb);
3447
3448         if (found) {
3449                 found->touched = jiffies;
3450                 if (removing) {
3451                         rocker_kfree(trans, fdb);
3452                         if (!switchdev_trans_ph_prepare(trans))
3453                                 hash_del(&found->entry);
3454                 }
3455         } else if (!removing) {
3456                 if (!switchdev_trans_ph_prepare(trans))
3457                         hash_add(rocker->fdb_tbl, &fdb->entry,
3458                                  fdb->key_crc32);
3459         }
3460
3461         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3462
3463         /* Check if adding and already exists, or removing and can't find */
3464         if (!found != !removing) {
3465                 rocker_kfree(trans, fdb);
3466                 if (!found && removing)
3467                         return 0;
3468                 /* Refreshing existing to update aging timers */
3469                 flags |= ROCKER_OP_FLAG_REFRESH;
3470         }
3471
3472         return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3473 }
3474
3475 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3476                                  struct switchdev_trans *trans, int flags)
3477 {
3478         struct rocker *rocker = rocker_port->rocker;
3479         struct rocker_fdb_tbl_entry *found;
3480         unsigned long lock_flags;
3481         struct hlist_node *tmp;
3482         int bkt;
3483         int err = 0;
3484
3485         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3486             rocker_port->stp_state == BR_STATE_FORWARDING)
3487                 return 0;
3488
3489         flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3490
3491         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3492
3493         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3494                 if (found->key.rocker_port != rocker_port)
3495                         continue;
3496                 if (!found->learned)
3497                         continue;
3498                 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3499                                             found->key.addr,
3500                                             found->key.vlan_id);
3501                 if (err)
3502                         goto err_out;
3503                 if (!switchdev_trans_ph_prepare(trans))
3504                         hash_del(&found->entry);
3505         }
3506
3507 err_out:
3508         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3509
3510         return err;
3511 }
3512
3513 static void rocker_fdb_cleanup(unsigned long data)
3514 {
3515         struct rocker *rocker = (struct rocker *)data;
3516         struct rocker_port *rocker_port;
3517         struct rocker_fdb_tbl_entry *entry;
3518         struct hlist_node *tmp;
3519         unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3520         unsigned long expires;
3521         unsigned long lock_flags;
3522         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3523                     ROCKER_OP_FLAG_LEARNED;
3524         int bkt;
3525
3526         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3527
3528         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3529                 if (!entry->learned)
3530                         continue;
3531                 rocker_port = entry->key.rocker_port;
3532                 expires = entry->touched + rocker_port->ageing_time;
3533                 if (time_before_eq(expires, jiffies)) {
3534                         rocker_port_fdb_learn(rocker_port, NULL,
3535                                               flags, entry->key.addr,
3536                                               entry->key.vlan_id);
3537                         hash_del(&entry->entry);
3538                 } else if (time_before(expires, next_timer)) {
3539                         next_timer = expires;
3540                 }
3541         }
3542
3543         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3544
3545         mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3546 }
3547
3548 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3549                                   struct switchdev_trans *trans, int flags,
3550                                   __be16 vlan_id)
3551 {
3552         u32 in_pport_mask = 0xffffffff;
3553         __be16 eth_type;
3554         const u8 *dst_mac_mask = ff_mac;
3555         __be16 vlan_id_mask = htons(0xffff);
3556         bool copy_to_cpu = false;
3557         int err;
3558
3559         if (ntohs(vlan_id) == 0)
3560                 vlan_id = rocker_port->internal_vlan_id;
3561
3562         eth_type = htons(ETH_P_IP);
3563         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3564                                        rocker_port->pport, in_pport_mask,
3565                                        eth_type, rocker_port->dev->dev_addr,
3566                                        dst_mac_mask, vlan_id, vlan_id_mask,
3567                                        copy_to_cpu, flags);
3568         if (err)
3569                 return err;
3570
3571         eth_type = htons(ETH_P_IPV6);
3572         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3573                                        rocker_port->pport, in_pport_mask,
3574                                        eth_type, rocker_port->dev->dev_addr,
3575                                        dst_mac_mask, vlan_id, vlan_id_mask,
3576                                        copy_to_cpu, flags);
3577
3578         return err;
3579 }
3580
3581 static int rocker_port_fwding(struct rocker_port *rocker_port,
3582                               struct switchdev_trans *trans, int flags)
3583 {
3584         bool pop_vlan;
3585         u32 out_pport;
3586         __be16 vlan_id;
3587         u16 vid;
3588         int err;
3589
3590         /* Port will be forwarding-enabled if its STP state is LEARNING
3591          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3592          * port STP state.  Use L2 interface group on port VLANs as a way
3593          * to toggle port forwarding: if forwarding is disabled, L2
3594          * interface group will not exist.
3595          */
3596
3597         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3598             rocker_port->stp_state != BR_STATE_FORWARDING)
3599                 flags |= ROCKER_OP_FLAG_REMOVE;
3600
3601         out_pport = rocker_port->pport;
3602         for (vid = 1; vid < VLAN_N_VID; vid++) {
3603                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3604                         continue;
3605                 vlan_id = htons(vid);
3606                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3607                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3608                                                 vlan_id, out_pport, pop_vlan);
3609                 if (err) {
3610                         netdev_err(rocker_port->dev,
3611                                    "Error (%d) port VLAN l2 group for pport %d\n",
3612                                    err, out_pport);
3613                         return err;
3614                 }
3615         }
3616
3617         return 0;
3618 }
3619
3620 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3621                                   struct switchdev_trans *trans, int flags,
3622                                   u8 state)
3623 {
3624         bool want[ROCKER_CTRL_MAX] = { 0, };
3625         bool prev_ctrls[ROCKER_CTRL_MAX];
3626         u8 uninitialized_var(prev_state);
3627         int err;
3628         int i;
3629
3630         if (switchdev_trans_ph_prepare(trans)) {
3631                 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3632                 prev_state = rocker_port->stp_state;
3633         }
3634
3635         if (rocker_port->stp_state == state)
3636                 return 0;
3637
3638         rocker_port->stp_state = state;
3639
3640         switch (state) {
3641         case BR_STATE_DISABLED:
3642                 /* port is completely disabled */
3643                 break;
3644         case BR_STATE_LISTENING:
3645         case BR_STATE_BLOCKING:
3646                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3647                 break;
3648         case BR_STATE_LEARNING:
3649         case BR_STATE_FORWARDING:
3650                 if (!rocker_port_is_ovsed(rocker_port))
3651                         want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3652                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3653                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3654                 if (rocker_port_is_bridged(rocker_port))
3655                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3656                 else if (rocker_port_is_ovsed(rocker_port))
3657                         want[ROCKER_CTRL_DFLT_OVS] = true;
3658                 else
3659                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3660                 break;
3661         }
3662
3663         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3664                 if (want[i] != rocker_port->ctrls[i]) {
3665                         int ctrl_flags = flags |
3666                                          (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3667                         err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3668                                                &rocker_ctrls[i]);
3669                         if (err)
3670                                 goto err_out;
3671                         rocker_port->ctrls[i] = want[i];
3672                 }
3673         }
3674
3675         err = rocker_port_fdb_flush(rocker_port, trans, flags);
3676         if (err)
3677                 goto err_out;
3678
3679         err = rocker_port_fwding(rocker_port, trans, flags);
3680
3681 err_out:
3682         if (switchdev_trans_ph_prepare(trans)) {
3683                 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3684                 rocker_port->stp_state = prev_state;
3685         }
3686
3687         return err;
3688 }
3689
3690 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3691                                   struct switchdev_trans *trans, int flags)
3692 {
3693         if (rocker_port_is_bridged(rocker_port))
3694                 /* bridge STP will enable port */
3695                 return 0;
3696
3697         /* port is not bridged, so simulate going to FORWARDING state */
3698         return rocker_port_stp_update(rocker_port, trans, flags,
3699                                       BR_STATE_FORWARDING);
3700 }
3701
3702 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3703                                    struct switchdev_trans *trans, int flags)
3704 {
3705         if (rocker_port_is_bridged(rocker_port))
3706                 /* bridge STP will disable port */
3707                 return 0;
3708
3709         /* port is not bridged, so simulate going to DISABLED state */
3710         return rocker_port_stp_update(rocker_port, trans, flags,
3711                                       BR_STATE_DISABLED);
3712 }
3713
3714 static struct rocker_internal_vlan_tbl_entry *
3715 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3716 {
3717         struct rocker_internal_vlan_tbl_entry *found;
3718
3719         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3720                                entry, ifindex) {
3721                 if (found->ifindex == ifindex)
3722                         return found;
3723         }
3724
3725         return NULL;
3726 }
3727
3728 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3729                                                int ifindex)
3730 {
3731         struct rocker *rocker = rocker_port->rocker;
3732         struct rocker_internal_vlan_tbl_entry *entry;
3733         struct rocker_internal_vlan_tbl_entry *found;
3734         unsigned long lock_flags;
3735         int i;
3736
3737         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3738         if (!entry)
3739                 return 0;
3740
3741         entry->ifindex = ifindex;
3742
3743         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3744
3745         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3746         if (found) {
3747                 kfree(entry);
3748                 goto found;
3749         }
3750
3751         found = entry;
3752         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3753
3754         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3755                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3756                         continue;
3757                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3758                 goto found;
3759         }
3760
3761         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3762
3763 found:
3764         found->ref_count++;
3765         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3766
3767         return found->vlan_id;
3768 }
3769
3770 static void
3771 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3772                                  int ifindex)
3773 {
3774         struct rocker *rocker = rocker_port->rocker;
3775         struct rocker_internal_vlan_tbl_entry *found;
3776         unsigned long lock_flags;
3777         unsigned long bit;
3778
3779         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3780
3781         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3782         if (!found) {
3783                 netdev_err(rocker_port->dev,
3784                            "ifindex (%d) not found in internal VLAN tbl\n",
3785                            ifindex);
3786                 goto not_found;
3787         }
3788
3789         if (--found->ref_count <= 0) {
3790                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3791                 clear_bit(bit, rocker->internal_vlan_bitmap);
3792                 hash_del(&found->entry);
3793                 kfree(found);
3794         }
3795
3796 not_found:
3797         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3798 }
3799
3800 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3801                                 struct switchdev_trans *trans, __be32 dst,
3802                                 int dst_len, const struct fib_info *fi,
3803                                 u32 tb_id, int flags)
3804 {
3805         const struct fib_nh *nh;
3806         __be16 eth_type = htons(ETH_P_IP);
3807         __be32 dst_mask = inet_make_mask(dst_len);
3808         __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3809         u32 priority = fi->fib_priority;
3810         enum rocker_of_dpa_table_id goto_tbl =
3811                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3812         u32 group_id;
3813         bool nh_on_port;
3814         bool has_gw;
3815         u32 index;
3816         int err;
3817
3818         /* XXX support ECMP */
3819
3820         nh = fi->fib_nh;
3821         nh_on_port = (fi->fib_dev == rocker_port->dev);
3822         has_gw = !!nh->nh_gw;
3823
3824         if (has_gw && nh_on_port) {
3825                 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
3826                                           nh->nh_gw, &index);
3827                 if (err)
3828                         return err;
3829
3830                 group_id = ROCKER_GROUP_L3_UNICAST(index);
3831         } else {
3832                 /* Send to CPU for processing */
3833                 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3834         }
3835
3836         err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
3837                                              dst_mask, priority, goto_tbl,
3838                                              group_id, flags);
3839         if (err)
3840                 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3841                            err, &dst);
3842
3843         return err;
3844 }
3845
3846 /*****************
3847  * Net device ops
3848  *****************/
3849
3850 static int rocker_port_open(struct net_device *dev)
3851 {
3852         struct rocker_port *rocker_port = netdev_priv(dev);
3853         int err;
3854
3855         err = rocker_port_dma_rings_init(rocker_port);
3856         if (err)
3857                 return err;
3858
3859         err = request_irq(rocker_msix_tx_vector(rocker_port),
3860                           rocker_tx_irq_handler, 0,
3861                           rocker_driver_name, rocker_port);
3862         if (err) {
3863                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3864                 goto err_request_tx_irq;
3865         }
3866
3867         err = request_irq(rocker_msix_rx_vector(rocker_port),
3868                           rocker_rx_irq_handler, 0,
3869                           rocker_driver_name, rocker_port);
3870         if (err) {
3871                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3872                 goto err_request_rx_irq;
3873         }
3874
3875         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
3876         if (err)
3877                 goto err_fwd_enable;
3878
3879         napi_enable(&rocker_port->napi_tx);
3880         napi_enable(&rocker_port->napi_rx);
3881         if (!dev->proto_down)
3882                 rocker_port_set_enable(rocker_port, true);
3883         netif_start_queue(dev);
3884         return 0;
3885
3886 err_fwd_enable:
3887         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3888 err_request_rx_irq:
3889         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3890 err_request_tx_irq:
3891         rocker_port_dma_rings_fini(rocker_port);
3892         return err;
3893 }
3894
3895 static int rocker_port_stop(struct net_device *dev)
3896 {
3897         struct rocker_port *rocker_port = netdev_priv(dev);
3898
3899         netif_stop_queue(dev);
3900         rocker_port_set_enable(rocker_port, false);
3901         napi_disable(&rocker_port->napi_rx);
3902         napi_disable(&rocker_port->napi_tx);
3903         rocker_port_fwd_disable(rocker_port, NULL,
3904                                 ROCKER_OP_FLAG_NOWAIT);
3905         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3906         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3907         rocker_port_dma_rings_fini(rocker_port);
3908
3909         return 0;
3910 }
3911
3912 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
3913                                        const struct rocker_desc_info *desc_info)
3914 {
3915         const struct rocker *rocker = rocker_port->rocker;
3916         struct pci_dev *pdev = rocker->pdev;
3917         const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3918         struct rocker_tlv *attr;
3919         int rem;
3920
3921         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3922         if (!attrs[ROCKER_TLV_TX_FRAGS])
3923                 return;
3924         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3925                 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3926                 dma_addr_t dma_handle;
3927                 size_t len;
3928
3929                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3930                         continue;
3931                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3932                                         attr);
3933                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3934                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3935                         continue;
3936                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3937                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3938                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3939         }
3940 }
3941
3942 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
3943                                        struct rocker_desc_info *desc_info,
3944                                        char *buf, size_t buf_len)
3945 {
3946         const struct rocker *rocker = rocker_port->rocker;
3947         struct pci_dev *pdev = rocker->pdev;
3948         dma_addr_t dma_handle;
3949         struct rocker_tlv *frag;
3950
3951         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3952         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3953                 if (net_ratelimit())
3954                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3955                 return -EIO;
3956         }
3957         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3958         if (!frag)
3959                 goto unmap_frag;
3960         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3961                                dma_handle))
3962                 goto nest_cancel;
3963         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3964                                buf_len))
3965                 goto nest_cancel;
3966         rocker_tlv_nest_end(desc_info, frag);
3967         return 0;
3968
3969 nest_cancel:
3970         rocker_tlv_nest_cancel(desc_info, frag);
3971 unmap_frag:
3972         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3973         return -EMSGSIZE;
3974 }
3975
3976 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3977 {
3978         struct rocker_port *rocker_port = netdev_priv(dev);
3979         struct rocker *rocker = rocker_port->rocker;
3980         struct rocker_desc_info *desc_info;
3981         struct rocker_tlv *frags;
3982         int i;
3983         int err;
3984
3985         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3986         if (unlikely(!desc_info)) {
3987                 if (net_ratelimit())
3988                         netdev_err(dev, "tx ring full when queue awake\n");
3989                 return NETDEV_TX_BUSY;
3990         }
3991
3992         rocker_desc_cookie_ptr_set(desc_info, skb);
3993
3994         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3995         if (!frags)
3996                 goto out;
3997         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3998                                           skb->data, skb_headlen(skb));
3999         if (err)
4000                 goto nest_cancel;
4001         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4002                 err = skb_linearize(skb);
4003                 if (err)
4004                         goto unmap_frags;
4005         }
4006
4007         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4008                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4009
4010                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4011                                                   skb_frag_address(frag),
4012                                                   skb_frag_size(frag));
4013                 if (err)
4014                         goto unmap_frags;
4015         }
4016         rocker_tlv_nest_end(desc_info, frags);
4017
4018         rocker_desc_gen_clear(desc_info);
4019         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4020
4021         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4022         if (!desc_info)
4023                 netif_stop_queue(dev);
4024
4025         return NETDEV_TX_OK;
4026
4027 unmap_frags:
4028         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4029 nest_cancel:
4030         rocker_tlv_nest_cancel(desc_info, frags);
4031 out:
4032         dev_kfree_skb(skb);
4033         dev->stats.tx_dropped++;
4034
4035         return NETDEV_TX_OK;
4036 }
4037
4038 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4039 {
4040         struct sockaddr *addr = p;
4041         struct rocker_port *rocker_port = netdev_priv(dev);
4042         int err;
4043
4044         if (!is_valid_ether_addr(addr->sa_data))
4045                 return -EADDRNOTAVAIL;
4046
4047         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4048         if (err)
4049                 return err;
4050         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4051         return 0;
4052 }
4053
4054 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4055 {
4056         struct rocker_port *rocker_port = netdev_priv(dev);
4057         int running = netif_running(dev);
4058         int err;
4059
4060 #define ROCKER_PORT_MIN_MTU     68
4061 #define ROCKER_PORT_MAX_MTU     9000
4062
4063         if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4064                 return -EINVAL;
4065
4066         if (running)
4067                 rocker_port_stop(dev);
4068
4069         netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4070         dev->mtu = new_mtu;
4071
4072         err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4073         if (err)
4074                 return err;
4075
4076         if (running)
4077                 err = rocker_port_open(dev);
4078
4079         return err;
4080 }
4081
4082 static int rocker_port_get_phys_port_name(struct net_device *dev,
4083                                           char *buf, size_t len)
4084 {
4085         struct rocker_port *rocker_port = netdev_priv(dev);
4086         struct port_name name = { .buf = buf, .len = len };
4087         int err;
4088
4089         err = rocker_cmd_exec(rocker_port, NULL, 0,
4090                               rocker_cmd_get_port_settings_prep, NULL,
4091                               rocker_cmd_get_port_settings_phys_name_proc,
4092                               &name);
4093
4094         return err ? -EOPNOTSUPP : 0;
4095 }
4096
4097 static int rocker_port_change_proto_down(struct net_device *dev,
4098                                          bool proto_down)
4099 {
4100         struct rocker_port *rocker_port = netdev_priv(dev);
4101
4102         if (rocker_port->dev->flags & IFF_UP)
4103                 rocker_port_set_enable(rocker_port, !proto_down);
4104         rocker_port->dev->proto_down = proto_down;
4105         return 0;
4106 }
4107
4108 static void rocker_port_neigh_destroy(struct neighbour *n)
4109 {
4110         struct rocker_port *rocker_port = netdev_priv(n->dev);
4111         int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4112         __be32 ip_addr = *(__be32 *)n->primary_key;
4113
4114         rocker_port_ipv4_neigh(rocker_port, NULL,
4115                                flags, ip_addr, n->ha);
4116 }
4117
4118 static const struct net_device_ops rocker_port_netdev_ops = {
4119         .ndo_open                       = rocker_port_open,
4120         .ndo_stop                       = rocker_port_stop,
4121         .ndo_start_xmit                 = rocker_port_xmit,
4122         .ndo_set_mac_address            = rocker_port_set_mac_address,
4123         .ndo_change_mtu                 = rocker_port_change_mtu,
4124         .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
4125         .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
4126         .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
4127         .ndo_fdb_add                    = switchdev_port_fdb_add,
4128         .ndo_fdb_del                    = switchdev_port_fdb_del,
4129         .ndo_fdb_dump                   = switchdev_port_fdb_dump,
4130         .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
4131         .ndo_change_proto_down          = rocker_port_change_proto_down,
4132         .ndo_neigh_destroy              = rocker_port_neigh_destroy,
4133 };
4134
4135 /********************
4136  * swdev interface
4137  ********************/
4138
4139 static int rocker_port_attr_get(struct net_device *dev,
4140                                 struct switchdev_attr *attr)
4141 {
4142         const struct rocker_port *rocker_port = netdev_priv(dev);
4143         const struct rocker *rocker = rocker_port->rocker;
4144
4145         switch (attr->id) {
4146         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4147                 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4148                 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4149                 break;
4150         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4151                 attr->u.brport_flags = rocker_port->brport_flags;
4152                 break;
4153         default:
4154                 return -EOPNOTSUPP;
4155         }
4156
4157         return 0;
4158 }
4159
4160 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4161                                         struct switchdev_trans *trans,
4162                                         unsigned long brport_flags)
4163 {
4164         unsigned long orig_flags;
4165         int err = 0;
4166
4167         orig_flags = rocker_port->brport_flags;
4168         rocker_port->brport_flags = brport_flags;
4169         if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4170                 err = rocker_port_set_learning(rocker_port, trans);
4171
4172         if (switchdev_trans_ph_prepare(trans))
4173                 rocker_port->brport_flags = orig_flags;
4174
4175         return err;
4176 }
4177
4178 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4179                                           struct switchdev_trans *trans,
4180                                           u32 ageing_time)
4181 {
4182         if (!switchdev_trans_ph_prepare(trans)) {
4183                 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4184                 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4185         }
4186
4187         return 0;
4188 }
4189
4190 static int rocker_port_attr_set(struct net_device *dev,
4191                                 const struct switchdev_attr *attr,
4192                                 struct switchdev_trans *trans)
4193 {
4194         struct rocker_port *rocker_port = netdev_priv(dev);
4195         int err = 0;
4196
4197         switch (attr->id) {
4198         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4199                 err = rocker_port_stp_update(rocker_port, trans, 0,
4200                                              attr->u.stp_state);
4201                 break;
4202         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4203                 err = rocker_port_brport_flags_set(rocker_port, trans,
4204                                                    attr->u.brport_flags);
4205                 break;
4206         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4207                 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4208                                                      attr->u.ageing_time);
4209                 break;
4210         default:
4211                 err = -EOPNOTSUPP;
4212                 break;
4213         }
4214
4215         return err;
4216 }
4217
4218 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4219                                 struct switchdev_trans *trans,
4220                                 u16 vid, u16 flags)
4221 {
4222         int err;
4223
4224         /* XXX deal with flags for PVID and untagged */
4225
4226         err = rocker_port_vlan(rocker_port, trans, 0, vid);
4227         if (err)
4228                 return err;
4229
4230         err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4231         if (err)
4232                 rocker_port_vlan(rocker_port, trans,
4233                                  ROCKER_OP_FLAG_REMOVE, vid);
4234
4235         return err;
4236 }
4237
4238 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4239                                  struct switchdev_trans *trans,
4240                                  const struct switchdev_obj_port_vlan *vlan)
4241 {
4242         u16 vid;
4243         int err;
4244
4245         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4246                 err = rocker_port_vlan_add(rocker_port, trans,
4247                                            vid, vlan->flags);
4248                 if (err)
4249                         return err;
4250         }
4251
4252         return 0;
4253 }
4254
4255 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4256                                struct switchdev_trans *trans,
4257                                const struct switchdev_obj_port_fdb *fdb)
4258 {
4259         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4260         int flags = 0;
4261
4262         if (!rocker_port_is_bridged(rocker_port))
4263                 return -EINVAL;
4264
4265         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4266 }
4267
4268 static int rocker_port_obj_add(struct net_device *dev,
4269                                const struct switchdev_obj *obj,
4270                                struct switchdev_trans *trans)
4271 {
4272         struct rocker_port *rocker_port = netdev_priv(dev);
4273         const struct switchdev_obj_ipv4_fib *fib4;
4274         int err = 0;
4275
4276         switch (obj->id) {
4277         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4278                 err = rocker_port_vlans_add(rocker_port, trans,
4279                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4280                 break;
4281         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4282                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4283                 err = rocker_port_fib_ipv4(rocker_port, trans,
4284                                            htonl(fib4->dst), fib4->dst_len,
4285                                            &fib4->fi, fib4->tb_id, 0);
4286                 break;
4287         case SWITCHDEV_OBJ_ID_PORT_FDB:
4288                 err = rocker_port_fdb_add(rocker_port, trans,
4289                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4290                 break;
4291         default:
4292                 err = -EOPNOTSUPP;
4293                 break;
4294         }
4295
4296         return err;
4297 }
4298
4299 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4300                                 u16 vid, u16 flags)
4301 {
4302         int err;
4303
4304         err = rocker_port_router_mac(rocker_port, NULL,
4305                                      ROCKER_OP_FLAG_REMOVE, htons(vid));
4306         if (err)
4307                 return err;
4308
4309         return rocker_port_vlan(rocker_port, NULL,
4310                                 ROCKER_OP_FLAG_REMOVE, vid);
4311 }
4312
4313 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4314                                  const struct switchdev_obj_port_vlan *vlan)
4315 {
4316         u16 vid;
4317         int err;
4318
4319         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4320                 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4321                 if (err)
4322                         return err;
4323         }
4324
4325         return 0;
4326 }
4327
4328 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4329                                struct switchdev_trans *trans,
4330                                const struct switchdev_obj_port_fdb *fdb)
4331 {
4332         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4333         int flags = ROCKER_OP_FLAG_REMOVE;
4334
4335         if (!rocker_port_is_bridged(rocker_port))
4336                 return -EINVAL;
4337
4338         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4339 }
4340
4341 static int rocker_port_obj_del(struct net_device *dev,
4342                                const struct switchdev_obj *obj)
4343 {
4344         struct rocker_port *rocker_port = netdev_priv(dev);
4345         const struct switchdev_obj_ipv4_fib *fib4;
4346         int err = 0;
4347
4348         switch (obj->id) {
4349         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4350                 err = rocker_port_vlans_del(rocker_port,
4351                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4352                 break;
4353         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4354                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4355                 err = rocker_port_fib_ipv4(rocker_port, NULL,
4356                                            htonl(fib4->dst), fib4->dst_len,
4357                                            &fib4->fi, fib4->tb_id,
4358                                            ROCKER_OP_FLAG_REMOVE);
4359                 break;
4360         case SWITCHDEV_OBJ_ID_PORT_FDB:
4361                 err = rocker_port_fdb_del(rocker_port, NULL,
4362                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4363                 break;
4364         default:
4365                 err = -EOPNOTSUPP;
4366                 break;
4367         }
4368
4369         return err;
4370 }
4371
4372 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4373                                 struct switchdev_obj_port_fdb *fdb,
4374                                 switchdev_obj_dump_cb_t *cb)
4375 {
4376         struct rocker *rocker = rocker_port->rocker;
4377         struct rocker_fdb_tbl_entry *found;
4378         struct hlist_node *tmp;
4379         unsigned long lock_flags;
4380         int bkt;
4381         int err = 0;
4382
4383         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4384         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4385                 if (found->key.rocker_port != rocker_port)
4386                         continue;
4387                 ether_addr_copy(fdb->addr, found->key.addr);
4388                 fdb->ndm_state = NUD_REACHABLE;
4389                 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4390                                                    found->key.vlan_id);
4391                 err = cb(&fdb->obj);
4392                 if (err)
4393                         break;
4394         }
4395         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4396
4397         return err;
4398 }
4399
4400 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4401                                  struct switchdev_obj_port_vlan *vlan,
4402                                  switchdev_obj_dump_cb_t *cb)
4403 {
4404         u16 vid;
4405         int err = 0;
4406
4407         for (vid = 1; vid < VLAN_N_VID; vid++) {
4408                 if (!test_bit(vid, rocker_port->vlan_bitmap))
4409                         continue;
4410                 vlan->flags = 0;
4411                 if (rocker_vlan_id_is_internal(htons(vid)))
4412                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4413                 vlan->vid_begin = vid;
4414                 vlan->vid_end = vid;
4415                 err = cb(&vlan->obj);
4416                 if (err)
4417                         break;
4418         }
4419
4420         return err;
4421 }
4422
4423 static int rocker_port_obj_dump(struct net_device *dev,
4424                                 struct switchdev_obj *obj,
4425                                 switchdev_obj_dump_cb_t *cb)
4426 {
4427         const struct rocker_port *rocker_port = netdev_priv(dev);
4428         int err = 0;
4429
4430         switch (obj->id) {
4431         case SWITCHDEV_OBJ_ID_PORT_FDB:
4432                 err = rocker_port_fdb_dump(rocker_port,
4433                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4434                 break;
4435         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4436                 err = rocker_port_vlan_dump(rocker_port,
4437                                             SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4438                 break;
4439         default:
4440                 err = -EOPNOTSUPP;
4441                 break;
4442         }
4443
4444         return err;
4445 }
4446
4447 static const struct switchdev_ops rocker_port_switchdev_ops = {
4448         .switchdev_port_attr_get        = rocker_port_attr_get,
4449         .switchdev_port_attr_set        = rocker_port_attr_set,
4450         .switchdev_port_obj_add         = rocker_port_obj_add,
4451         .switchdev_port_obj_del         = rocker_port_obj_del,
4452         .switchdev_port_obj_dump        = rocker_port_obj_dump,
4453 };
4454
4455 /********************
4456  * ethtool interface
4457  ********************/
4458
4459 static int rocker_port_get_settings(struct net_device *dev,
4460                                     struct ethtool_cmd *ecmd)
4461 {
4462         struct rocker_port *rocker_port = netdev_priv(dev);
4463
4464         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4465 }
4466
4467 static int rocker_port_set_settings(struct net_device *dev,
4468                                     struct ethtool_cmd *ecmd)
4469 {
4470         struct rocker_port *rocker_port = netdev_priv(dev);
4471
4472         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4473 }
4474
4475 static void rocker_port_get_drvinfo(struct net_device *dev,
4476                                     struct ethtool_drvinfo *drvinfo)
4477 {
4478         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4479         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4480 }
4481
4482 static struct rocker_port_stats {
4483         char str[ETH_GSTRING_LEN];
4484         int type;
4485 } rocker_port_stats[] = {
4486         { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4487         { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4488         { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4489         { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4490
4491         { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4492         { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4493         { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4494         { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4495 };
4496
4497 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4498
4499 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4500                                     u8 *data)
4501 {
4502         u8 *p = data;
4503         int i;
4504
4505         switch (stringset) {
4506         case ETH_SS_STATS:
4507                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4508                         memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4509                         p += ETH_GSTRING_LEN;
4510                 }
4511                 break;
4512         }
4513 }
4514
4515 static int
4516 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4517                                struct rocker_desc_info *desc_info,
4518                                void *priv)
4519 {
4520         struct rocker_tlv *cmd_stats;
4521
4522         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4523                                ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4524                 return -EMSGSIZE;
4525
4526         cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4527         if (!cmd_stats)
4528                 return -EMSGSIZE;
4529
4530         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4531                                rocker_port->pport))
4532                 return -EMSGSIZE;
4533
4534         rocker_tlv_nest_end(desc_info, cmd_stats);
4535
4536         return 0;
4537 }
4538
4539 static int
4540 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4541                                        const struct rocker_desc_info *desc_info,
4542                                        void *priv)
4543 {
4544         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4545         const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4546         const struct rocker_tlv *pattr;
4547         u32 pport;
4548         u64 *data = priv;
4549         int i;
4550
4551         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4552
4553         if (!attrs[ROCKER_TLV_CMD_INFO])
4554                 return -EIO;
4555
4556         rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4557                                 attrs[ROCKER_TLV_CMD_INFO]);
4558
4559         if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4560                 return -EIO;
4561
4562         pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4563         if (pport != rocker_port->pport)
4564                 return -EIO;
4565
4566         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4567                 pattr = stats_attrs[rocker_port_stats[i].type];
4568                 if (!pattr)
4569                         continue;
4570
4571                 data[i] = rocker_tlv_get_u64(pattr);
4572         }
4573
4574         return 0;
4575 }
4576
4577 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4578                                              void *priv)
4579 {
4580         return rocker_cmd_exec(rocker_port, NULL, 0,
4581                                rocker_cmd_get_port_stats_prep, NULL,
4582                                rocker_cmd_get_port_stats_ethtool_proc,
4583                                priv);
4584 }
4585
4586 static void rocker_port_get_stats(struct net_device *dev,
4587                                   struct ethtool_stats *stats, u64 *data)
4588 {
4589         struct rocker_port *rocker_port = netdev_priv(dev);
4590
4591         if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4592                 int i;
4593
4594                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4595                         data[i] = 0;
4596         }
4597 }
4598
4599 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4600 {
4601         switch (sset) {
4602         case ETH_SS_STATS:
4603                 return ROCKER_PORT_STATS_LEN;
4604         default:
4605                 return -EOPNOTSUPP;
4606         }
4607 }
4608
4609 static const struct ethtool_ops rocker_port_ethtool_ops = {
4610         .get_settings           = rocker_port_get_settings,
4611         .set_settings           = rocker_port_set_settings,
4612         .get_drvinfo            = rocker_port_get_drvinfo,
4613         .get_link               = ethtool_op_get_link,
4614         .get_strings            = rocker_port_get_strings,
4615         .get_ethtool_stats      = rocker_port_get_stats,
4616         .get_sset_count         = rocker_port_get_sset_count,
4617 };
4618
4619 /*****************
4620  * NAPI interface
4621  *****************/
4622
4623 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4624 {
4625         return container_of(napi, struct rocker_port, napi_tx);
4626 }
4627
4628 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4629 {
4630         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4631         const struct rocker *rocker = rocker_port->rocker;
4632         const struct rocker_desc_info *desc_info;
4633         u32 credits = 0;
4634         int err;
4635
4636         /* Cleanup tx descriptors */
4637         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4638                 struct sk_buff *skb;
4639
4640                 err = rocker_desc_err(desc_info);
4641                 if (err && net_ratelimit())
4642                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4643                                    err);
4644                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4645
4646                 skb = rocker_desc_cookie_ptr_get(desc_info);
4647                 if (err == 0) {
4648                         rocker_port->dev->stats.tx_packets++;
4649                         rocker_port->dev->stats.tx_bytes += skb->len;
4650                 } else {
4651                         rocker_port->dev->stats.tx_errors++;
4652                 }
4653
4654                 dev_kfree_skb_any(skb);
4655                 credits++;
4656         }
4657
4658         if (credits && netif_queue_stopped(rocker_port->dev))
4659                 netif_wake_queue(rocker_port->dev);
4660
4661         napi_complete(napi);
4662         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4663
4664         return 0;
4665 }
4666
4667 static int rocker_port_rx_proc(const struct rocker *rocker,
4668                                const struct rocker_port *rocker_port,
4669                                struct rocker_desc_info *desc_info)
4670 {
4671         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4672         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4673         size_t rx_len;
4674         u16 rx_flags = 0;
4675
4676         if (!skb)
4677                 return -ENOENT;
4678
4679         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4680         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4681                 return -EINVAL;
4682         if (attrs[ROCKER_TLV_RX_FLAGS])
4683                 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4684
4685         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4686
4687         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4688         skb_put(skb, rx_len);
4689         skb->protocol = eth_type_trans(skb, rocker_port->dev);
4690
4691         if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4692                 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4693
4694         rocker_port->dev->stats.rx_packets++;
4695         rocker_port->dev->stats.rx_bytes += skb->len;
4696
4697         netif_receive_skb(skb);
4698
4699         return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4700 }
4701
4702 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4703 {
4704         return container_of(napi, struct rocker_port, napi_rx);
4705 }
4706
4707 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4708 {
4709         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4710         const struct rocker *rocker = rocker_port->rocker;
4711         struct rocker_desc_info *desc_info;
4712         u32 credits = 0;
4713         int err;
4714
4715         /* Process rx descriptors */
4716         while (credits < budget &&
4717                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4718                 err = rocker_desc_err(desc_info);
4719                 if (err) {
4720                         if (net_ratelimit())
4721                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4722                                            err);
4723                 } else {
4724                         err = rocker_port_rx_proc(rocker, rocker_port,
4725                                                   desc_info);
4726                         if (err && net_ratelimit())
4727                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4728                                            err);
4729                 }
4730                 if (err)
4731                         rocker_port->dev->stats.rx_errors++;
4732
4733                 rocker_desc_gen_clear(desc_info);
4734                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4735                 credits++;
4736         }
4737
4738         if (credits < budget)
4739                 napi_complete(napi);
4740
4741         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4742
4743         return credits;
4744 }
4745
4746 /*****************
4747  * PCI driver ops
4748  *****************/
4749
4750 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4751 {
4752         const struct rocker *rocker = rocker_port->rocker;
4753         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4754         bool link_up;
4755
4756         link_up = link_status & (1 << rocker_port->pport);
4757         if (link_up)
4758                 netif_carrier_on(rocker_port->dev);
4759         else
4760                 netif_carrier_off(rocker_port->dev);
4761 }
4762
4763 static void rocker_remove_ports(const struct rocker *rocker)
4764 {
4765         struct rocker_port *rocker_port;
4766         int i;
4767
4768         for (i = 0; i < rocker->port_count; i++) {
4769                 rocker_port = rocker->ports[i];
4770                 if (!rocker_port)
4771                         continue;
4772                 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4773                 unregister_netdev(rocker_port->dev);
4774                 free_netdev(rocker_port->dev);
4775         }
4776         kfree(rocker->ports);
4777 }
4778
4779 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4780 {
4781         const struct rocker *rocker = rocker_port->rocker;
4782         const struct pci_dev *pdev = rocker->pdev;
4783         int err;
4784
4785         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4786                                                    rocker_port->dev->dev_addr);
4787         if (err) {
4788                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4789                 eth_hw_addr_random(rocker_port->dev);
4790         }
4791 }
4792
4793 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4794 {
4795         const struct pci_dev *pdev = rocker->pdev;
4796         struct rocker_port *rocker_port;
4797         struct net_device *dev;
4798         u16 untagged_vid = 0;
4799         int err;
4800
4801         dev = alloc_etherdev(sizeof(struct rocker_port));
4802         if (!dev)
4803                 return -ENOMEM;
4804         rocker_port = netdev_priv(dev);
4805         rocker_port->dev = dev;
4806         rocker_port->rocker = rocker;
4807         rocker_port->port_number = port_number;
4808         rocker_port->pport = port_number + 1;
4809         rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4810         rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
4811
4812         rocker_port_dev_addr_init(rocker_port);
4813         dev->netdev_ops = &rocker_port_netdev_ops;
4814         dev->ethtool_ops = &rocker_port_ethtool_ops;
4815         dev->switchdev_ops = &rocker_port_switchdev_ops;
4816         netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4817                           NAPI_POLL_WEIGHT);
4818         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4819                        NAPI_POLL_WEIGHT);
4820         rocker_carrier_init(rocker_port);
4821
4822         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
4823
4824         err = register_netdev(dev);
4825         if (err) {
4826                 dev_err(&pdev->dev, "register_netdev failed\n");
4827                 goto err_register_netdev;
4828         }
4829         rocker->ports[port_number] = rocker_port;
4830
4831         switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
4832
4833         rocker_port_set_learning(rocker_port, NULL);
4834
4835         err = rocker_port_ig_tbl(rocker_port, NULL, 0);
4836         if (err) {
4837                 netdev_err(rocker_port->dev, "install ig port table failed\n");
4838                 goto err_port_ig_tbl;
4839         }
4840
4841         rocker_port->internal_vlan_id =
4842                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4843
4844         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
4845         if (err) {
4846                 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
4847                 goto err_untagged_vlan;
4848         }
4849
4850         return 0;
4851
4852 err_untagged_vlan:
4853         rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4854 err_port_ig_tbl:
4855         rocker->ports[port_number] = NULL;
4856         unregister_netdev(dev);
4857 err_register_netdev:
4858         free_netdev(dev);
4859         return err;
4860 }
4861
4862 static int rocker_probe_ports(struct rocker *rocker)
4863 {
4864         int i;
4865         size_t alloc_size;
4866         int err;
4867
4868         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4869         rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
4870         if (!rocker->ports)
4871                 return -ENOMEM;
4872         for (i = 0; i < rocker->port_count; i++) {
4873                 err = rocker_probe_port(rocker, i);
4874                 if (err)
4875                         goto remove_ports;
4876         }
4877         return 0;
4878
4879 remove_ports:
4880         rocker_remove_ports(rocker);
4881         return err;
4882 }
4883
4884 static int rocker_msix_init(struct rocker *rocker)
4885 {
4886         struct pci_dev *pdev = rocker->pdev;
4887         int msix_entries;
4888         int i;
4889         int err;
4890
4891         msix_entries = pci_msix_vec_count(pdev);
4892         if (msix_entries < 0)
4893                 return msix_entries;
4894
4895         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4896                 return -EINVAL;
4897
4898         rocker->msix_entries = kmalloc_array(msix_entries,
4899                                              sizeof(struct msix_entry),
4900                                              GFP_KERNEL);
4901         if (!rocker->msix_entries)
4902                 return -ENOMEM;
4903
4904         for (i = 0; i < msix_entries; i++)
4905                 rocker->msix_entries[i].entry = i;
4906
4907         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4908         if (err < 0)
4909                 goto err_enable_msix;
4910
4911         return 0;
4912
4913 err_enable_msix:
4914         kfree(rocker->msix_entries);
4915         return err;
4916 }
4917
4918 static void rocker_msix_fini(const struct rocker *rocker)
4919 {
4920         pci_disable_msix(rocker->pdev);
4921         kfree(rocker->msix_entries);
4922 }
4923
4924 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4925 {
4926         struct rocker *rocker;
4927         int err;
4928
4929         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4930         if (!rocker)
4931                 return -ENOMEM;
4932
4933         err = pci_enable_device(pdev);
4934         if (err) {
4935                 dev_err(&pdev->dev, "pci_enable_device failed\n");
4936                 goto err_pci_enable_device;
4937         }
4938
4939         err = pci_request_regions(pdev, rocker_driver_name);
4940         if (err) {
4941                 dev_err(&pdev->dev, "pci_request_regions failed\n");
4942                 goto err_pci_request_regions;
4943         }
4944
4945         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4946         if (!err) {
4947                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4948                 if (err) {
4949                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4950                         goto err_pci_set_dma_mask;
4951                 }
4952         } else {
4953                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4954                 if (err) {
4955                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4956                         goto err_pci_set_dma_mask;
4957                 }
4958         }
4959
4960         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4961                 dev_err(&pdev->dev, "invalid PCI region size\n");
4962                 err = -EINVAL;
4963                 goto err_pci_resource_len_check;
4964         }
4965
4966         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4967                                   pci_resource_len(pdev, 0));
4968         if (!rocker->hw_addr) {
4969                 dev_err(&pdev->dev, "ioremap failed\n");
4970                 err = -EIO;
4971                 goto err_ioremap;
4972         }
4973         pci_set_master(pdev);
4974
4975         rocker->pdev = pdev;
4976         pci_set_drvdata(pdev, rocker);
4977
4978         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4979
4980         err = rocker_msix_init(rocker);
4981         if (err) {
4982                 dev_err(&pdev->dev, "MSI-X init failed\n");
4983                 goto err_msix_init;
4984         }
4985
4986         err = rocker_basic_hw_test(rocker);
4987         if (err) {
4988                 dev_err(&pdev->dev, "basic hw test failed\n");
4989                 goto err_basic_hw_test;
4990         }
4991
4992         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4993
4994         err = rocker_dma_rings_init(rocker);
4995         if (err)
4996                 goto err_dma_rings_init;
4997
4998         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4999                           rocker_cmd_irq_handler, 0,
5000                           rocker_driver_name, rocker);
5001         if (err) {
5002                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5003                 goto err_request_cmd_irq;
5004         }
5005
5006         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5007                           rocker_event_irq_handler, 0,
5008                           rocker_driver_name, rocker);
5009         if (err) {
5010                 dev_err(&pdev->dev, "cannot assign event irq\n");
5011                 goto err_request_event_irq;
5012         }
5013
5014         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5015
5016         err = rocker_init_tbls(rocker);
5017         if (err) {
5018                 dev_err(&pdev->dev, "cannot init rocker tables\n");
5019                 goto err_init_tbls;
5020         }
5021
5022         setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5023                     (unsigned long) rocker);
5024         mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5025
5026         err = rocker_probe_ports(rocker);
5027         if (err) {
5028                 dev_err(&pdev->dev, "failed to probe ports\n");
5029                 goto err_probe_ports;
5030         }
5031
5032         dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5033                  (int)sizeof(rocker->hw.id), &rocker->hw.id);
5034
5035         return 0;
5036
5037 err_probe_ports:
5038         del_timer_sync(&rocker->fdb_cleanup_timer);
5039         rocker_free_tbls(rocker);
5040 err_init_tbls:
5041         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5042 err_request_event_irq:
5043         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5044 err_request_cmd_irq:
5045         rocker_dma_rings_fini(rocker);
5046 err_dma_rings_init:
5047 err_basic_hw_test:
5048         rocker_msix_fini(rocker);
5049 err_msix_init:
5050         iounmap(rocker->hw_addr);
5051 err_ioremap:
5052 err_pci_resource_len_check:
5053 err_pci_set_dma_mask:
5054         pci_release_regions(pdev);
5055 err_pci_request_regions:
5056         pci_disable_device(pdev);
5057 err_pci_enable_device:
5058         kfree(rocker);
5059         return err;
5060 }
5061
5062 static void rocker_remove(struct pci_dev *pdev)
5063 {
5064         struct rocker *rocker = pci_get_drvdata(pdev);
5065
5066         del_timer_sync(&rocker->fdb_cleanup_timer);
5067         rocker_free_tbls(rocker);
5068         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5069         rocker_remove_ports(rocker);
5070         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5071         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5072         rocker_dma_rings_fini(rocker);
5073         rocker_msix_fini(rocker);
5074         iounmap(rocker->hw_addr);
5075         pci_release_regions(rocker->pdev);
5076         pci_disable_device(rocker->pdev);
5077         kfree(rocker);
5078 }
5079
5080 static struct pci_driver rocker_pci_driver = {
5081         .name           = rocker_driver_name,
5082         .id_table       = rocker_pci_id_table,
5083         .probe          = rocker_probe,
5084         .remove         = rocker_remove,
5085 };
5086
5087 /************************************
5088  * Net device notifier event handler
5089  ************************************/
5090
5091 static bool rocker_port_dev_check(const struct net_device *dev)
5092 {
5093         return dev->netdev_ops == &rocker_port_netdev_ops;
5094 }
5095
5096 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5097                                    struct net_device *bridge)
5098 {
5099         u16 untagged_vid = 0;
5100         int err;
5101
5102         /* Port is joining bridge, so the internal VLAN for the
5103          * port is going to change to the bridge internal VLAN.
5104          * Let's remove untagged VLAN (vid=0) from port and
5105          * re-add once internal VLAN has changed.
5106          */
5107
5108         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5109         if (err)
5110                 return err;
5111
5112         rocker_port_internal_vlan_id_put(rocker_port,
5113                                          rocker_port->dev->ifindex);
5114         rocker_port->internal_vlan_id =
5115                 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5116
5117         rocker_port->bridge_dev = bridge;
5118         switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5119
5120         return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5121 }
5122
5123 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5124 {
5125         u16 untagged_vid = 0;
5126         int err;
5127
5128         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5129         if (err)
5130                 return err;
5131
5132         rocker_port_internal_vlan_id_put(rocker_port,
5133                                          rocker_port->bridge_dev->ifindex);
5134         rocker_port->internal_vlan_id =
5135                 rocker_port_internal_vlan_id_get(rocker_port,
5136                                                  rocker_port->dev->ifindex);
5137
5138         switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5139                                     false);
5140         rocker_port->bridge_dev = NULL;
5141
5142         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5143         if (err)
5144                 return err;
5145
5146         if (rocker_port->dev->flags & IFF_UP)
5147                 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5148
5149         return err;
5150 }
5151
5152 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5153                                    struct net_device *master)
5154 {
5155         int err;
5156
5157         rocker_port->bridge_dev = master;
5158
5159         err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5160         if (err)
5161                 return err;
5162         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5163
5164         return err;
5165 }
5166
5167 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5168                                      struct net_device *master)
5169 {
5170         int err = 0;
5171
5172         if (netif_is_bridge_master(master))
5173                 err = rocker_port_bridge_join(rocker_port, master);
5174         else if (netif_is_ovs_master(master))
5175                 err = rocker_port_ovs_changed(rocker_port, master);
5176         return err;
5177 }
5178
5179 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5180 {
5181         int err = 0;
5182
5183         if (rocker_port_is_bridged(rocker_port))
5184                 err = rocker_port_bridge_leave(rocker_port);
5185         else if (rocker_port_is_ovsed(rocker_port))
5186                 err = rocker_port_ovs_changed(rocker_port, NULL);
5187         return err;
5188 }
5189
5190 static int rocker_netdevice_event(struct notifier_block *unused,
5191                                   unsigned long event, void *ptr)
5192 {
5193         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5194         struct netdev_notifier_changeupper_info *info;
5195         struct rocker_port *rocker_port;
5196         int err;
5197
5198         if (!rocker_port_dev_check(dev))
5199                 return NOTIFY_DONE;
5200
5201         switch (event) {
5202         case NETDEV_CHANGEUPPER:
5203                 info = ptr;
5204                 if (!info->master)
5205                         goto out;
5206                 rocker_port = netdev_priv(dev);
5207                 if (info->linking) {
5208                         err = rocker_port_master_linked(rocker_port,
5209                                                         info->upper_dev);
5210                         if (err)
5211                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5212                                             err);
5213                 } else {
5214                         err = rocker_port_master_unlinked(rocker_port);
5215                         if (err)
5216                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5217                                             err);
5218                 }
5219                 break;
5220         }
5221 out:
5222         return NOTIFY_DONE;
5223 }
5224
5225 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5226         .notifier_call = rocker_netdevice_event,
5227 };
5228
5229 /************************************
5230  * Net event notifier event handler
5231  ************************************/
5232
5233 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5234 {
5235         struct rocker_port *rocker_port = netdev_priv(dev);
5236         int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5237                     ROCKER_OP_FLAG_NOWAIT;
5238         __be32 ip_addr = *(__be32 *)n->primary_key;
5239
5240         return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5241 }
5242
5243 static int rocker_netevent_event(struct notifier_block *unused,
5244                                  unsigned long event, void *ptr)
5245 {
5246         struct net_device *dev;
5247         struct neighbour *n = ptr;
5248         int err;
5249
5250         switch (event) {
5251         case NETEVENT_NEIGH_UPDATE:
5252                 if (n->tbl != &arp_tbl)
5253                         return NOTIFY_DONE;
5254                 dev = n->dev;
5255                 if (!rocker_port_dev_check(dev))
5256                         return NOTIFY_DONE;
5257                 err = rocker_neigh_update(dev, n);
5258                 if (err)
5259                         netdev_warn(dev,
5260                                     "failed to handle neigh update (err %d)\n",
5261                                     err);
5262                 break;
5263         }
5264
5265         return NOTIFY_DONE;
5266 }
5267
5268 static struct notifier_block rocker_netevent_nb __read_mostly = {
5269         .notifier_call = rocker_netevent_event,
5270 };
5271
5272 /***********************
5273  * Module init and exit
5274  ***********************/
5275
5276 static int __init rocker_module_init(void)
5277 {
5278         int err;
5279
5280         register_netdevice_notifier(&rocker_netdevice_nb);
5281         register_netevent_notifier(&rocker_netevent_nb);
5282         err = pci_register_driver(&rocker_pci_driver);
5283         if (err)
5284                 goto err_pci_register_driver;
5285         return 0;
5286
5287 err_pci_register_driver:
5288         unregister_netevent_notifier(&rocker_netevent_nb);
5289         unregister_netdevice_notifier(&rocker_netdevice_nb);
5290         return err;
5291 }
5292
5293 static void __exit rocker_module_exit(void)
5294 {
5295         unregister_netevent_notifier(&rocker_netevent_nb);
5296         unregister_netdevice_notifier(&rocker_netdevice_nb);
5297         pci_unregister_driver(&rocker_pci_driver);
5298 }
5299
5300 module_init(rocker_module_init);
5301 module_exit(rocker_module_exit);
5302
5303 MODULE_LICENSE("GPL v2");
5304 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5305 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5306 MODULE_DESCRIPTION("Rocker switch device driver");
5307 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);