2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/delay.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_bridge.h>
14 #include <linux/jiffies.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/phy.h>
20 #include "mv88e6xxx.h"
22 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
23 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
24 * will be directly accessible on some {device address,register address}
25 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
26 * will only respond to SMI transactions to that specific address, and
27 * an indirect addressing mechanism needs to be used to access its
30 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
35 for (i = 0; i < 16; i++) {
36 ret = mdiobus_read(bus, sw_addr, SMI_CMD);
40 if ((ret & SMI_CMD_BUSY) == 0)
47 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
52 return mdiobus_read(bus, addr, reg);
54 /* Wait for the bus to become free. */
55 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
59 /* Transmit the read command. */
60 ret = mdiobus_write(bus, sw_addr, SMI_CMD,
61 SMI_CMD_OP_22_READ | (addr << 5) | reg);
65 /* Wait for the read command to complete. */
66 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
71 ret = mdiobus_read(bus, sw_addr, SMI_DATA);
78 /* Must be called with SMI mutex held */
79 static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
81 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
87 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
91 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
97 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
99 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
102 mutex_lock(&ps->smi_mutex);
103 ret = _mv88e6xxx_reg_read(ds, addr, reg);
104 mutex_unlock(&ps->smi_mutex);
109 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
115 return mdiobus_write(bus, addr, reg, val);
117 /* Wait for the bus to become free. */
118 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
122 /* Transmit the data to write. */
123 ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
127 /* Transmit the write command. */
128 ret = mdiobus_write(bus, sw_addr, SMI_CMD,
129 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
133 /* Wait for the write command to complete. */
134 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
141 /* Must be called with SMI mutex held */
142 static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
145 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
150 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
153 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
156 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
158 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
161 mutex_lock(&ps->smi_mutex);
162 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
163 mutex_unlock(&ps->smi_mutex);
168 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
170 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
171 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
172 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
177 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
182 for (i = 0; i < 6; i++) {
185 /* Write the MAC address byte. */
186 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
187 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
189 /* Wait for the write to complete. */
190 for (j = 0; j < 16; j++) {
191 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
192 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
202 /* Must be called with SMI mutex held */
203 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
206 return _mv88e6xxx_reg_read(ds, addr, regnum);
210 /* Must be called with SMI mutex held */
211 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
215 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
219 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
220 static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
223 unsigned long timeout;
225 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
226 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
227 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
229 timeout = jiffies + 1 * HZ;
230 while (time_before(jiffies, timeout)) {
231 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
232 usleep_range(1000, 2000);
233 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
234 GLOBAL_STATUS_PPU_POLLING)
241 static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
244 unsigned long timeout;
246 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
247 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
249 timeout = jiffies + 1 * HZ;
250 while (time_before(jiffies, timeout)) {
251 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
252 usleep_range(1000, 2000);
253 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
254 GLOBAL_STATUS_PPU_POLLING)
261 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
263 struct mv88e6xxx_priv_state *ps;
265 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
266 if (mutex_trylock(&ps->ppu_mutex)) {
267 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
269 if (mv88e6xxx_ppu_enable(ds) == 0)
270 ps->ppu_disabled = 0;
271 mutex_unlock(&ps->ppu_mutex);
275 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
277 struct mv88e6xxx_priv_state *ps = (void *)_ps;
279 schedule_work(&ps->ppu_work);
282 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
284 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
287 mutex_lock(&ps->ppu_mutex);
289 /* If the PHY polling unit is enabled, disable it so that
290 * we can access the PHY registers. If it was already
291 * disabled, cancel the timer that is going to re-enable
294 if (!ps->ppu_disabled) {
295 ret = mv88e6xxx_ppu_disable(ds);
297 mutex_unlock(&ps->ppu_mutex);
300 ps->ppu_disabled = 1;
302 del_timer(&ps->ppu_timer);
309 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
311 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
313 /* Schedule a timer to re-enable the PHY polling unit. */
314 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
315 mutex_unlock(&ps->ppu_mutex);
318 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
320 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
322 mutex_init(&ps->ppu_mutex);
323 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
324 init_timer(&ps->ppu_timer);
325 ps->ppu_timer.data = (unsigned long)ps;
326 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
329 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
333 ret = mv88e6xxx_ppu_access_get(ds);
335 ret = mv88e6xxx_reg_read(ds, addr, regnum);
336 mv88e6xxx_ppu_access_put(ds);
342 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
347 ret = mv88e6xxx_ppu_access_get(ds);
349 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
350 mv88e6xxx_ppu_access_put(ds);
357 void mv88e6xxx_poll_link(struct dsa_switch *ds)
361 for (i = 0; i < DSA_MAX_PORTS; i++) {
362 struct net_device *dev;
363 int uninitialized_var(port_status);
374 if (dev->flags & IFF_UP) {
375 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
380 link = !!(port_status & PORT_STATUS_LINK);
384 if (netif_carrier_ok(dev)) {
385 netdev_info(dev, "link down\n");
386 netif_carrier_off(dev);
391 switch (port_status & PORT_STATUS_SPEED_MASK) {
392 case PORT_STATUS_SPEED_10:
395 case PORT_STATUS_SPEED_100:
398 case PORT_STATUS_SPEED_1000:
405 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
406 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
408 if (!netif_carrier_ok(dev)) {
410 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
412 duplex ? "full" : "half",
414 netif_carrier_on(dev);
419 static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
421 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
424 case PORT_SWITCH_ID_6031:
425 case PORT_SWITCH_ID_6061:
426 case PORT_SWITCH_ID_6035:
427 case PORT_SWITCH_ID_6065:
433 static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
435 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
438 case PORT_SWITCH_ID_6092:
439 case PORT_SWITCH_ID_6095:
445 static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
447 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
450 case PORT_SWITCH_ID_6046:
451 case PORT_SWITCH_ID_6085:
452 case PORT_SWITCH_ID_6096:
453 case PORT_SWITCH_ID_6097:
459 static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
461 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
464 case PORT_SWITCH_ID_6123:
465 case PORT_SWITCH_ID_6161:
466 case PORT_SWITCH_ID_6165:
472 static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
474 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
477 case PORT_SWITCH_ID_6121:
478 case PORT_SWITCH_ID_6122:
479 case PORT_SWITCH_ID_6152:
480 case PORT_SWITCH_ID_6155:
481 case PORT_SWITCH_ID_6182:
482 case PORT_SWITCH_ID_6185:
483 case PORT_SWITCH_ID_6108:
484 case PORT_SWITCH_ID_6131:
490 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
492 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
495 case PORT_SWITCH_ID_6171:
496 case PORT_SWITCH_ID_6175:
497 case PORT_SWITCH_ID_6350:
498 case PORT_SWITCH_ID_6351:
504 static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
506 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
509 case PORT_SWITCH_ID_6172:
510 case PORT_SWITCH_ID_6176:
511 case PORT_SWITCH_ID_6240:
512 case PORT_SWITCH_ID_6352:
518 /* Must be called with SMI mutex held */
519 static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
524 for (i = 0; i < 10; i++) {
525 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
526 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
533 /* Must be called with SMI mutex held */
534 static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
538 if (mv88e6xxx_6352_family(ds))
539 port = (port + 1) << 5;
541 /* Snapshot the hardware statistics counters for this port. */
542 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
543 GLOBAL_STATS_OP_CAPTURE_PORT |
544 GLOBAL_STATS_OP_HIST_RX_TX | port);
548 /* Wait for the snapshotting to complete. */
549 ret = _mv88e6xxx_stats_wait(ds);
556 /* Must be called with SMI mutex held */
557 static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
564 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
565 GLOBAL_STATS_OP_READ_CAPTURED |
566 GLOBAL_STATS_OP_HIST_RX_TX | stat);
570 ret = _mv88e6xxx_stats_wait(ds);
574 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
580 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
587 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
588 { "in_good_octets", 8, 0x00, },
589 { "in_bad_octets", 4, 0x02, },
590 { "in_unicast", 4, 0x04, },
591 { "in_broadcasts", 4, 0x06, },
592 { "in_multicasts", 4, 0x07, },
593 { "in_pause", 4, 0x16, },
594 { "in_undersize", 4, 0x18, },
595 { "in_fragments", 4, 0x19, },
596 { "in_oversize", 4, 0x1a, },
597 { "in_jabber", 4, 0x1b, },
598 { "in_rx_error", 4, 0x1c, },
599 { "in_fcs_error", 4, 0x1d, },
600 { "out_octets", 8, 0x0e, },
601 { "out_unicast", 4, 0x10, },
602 { "out_broadcasts", 4, 0x13, },
603 { "out_multicasts", 4, 0x12, },
604 { "out_pause", 4, 0x15, },
605 { "excessive", 4, 0x11, },
606 { "collisions", 4, 0x1e, },
607 { "deferred", 4, 0x05, },
608 { "single", 4, 0x14, },
609 { "multiple", 4, 0x17, },
610 { "out_fcs_error", 4, 0x03, },
611 { "late", 4, 0x1f, },
612 { "hist_64bytes", 4, 0x08, },
613 { "hist_65_127bytes", 4, 0x09, },
614 { "hist_128_255bytes", 4, 0x0a, },
615 { "hist_256_511bytes", 4, 0x0b, },
616 { "hist_512_1023bytes", 4, 0x0c, },
617 { "hist_1024_max_bytes", 4, 0x0d, },
618 /* Not all devices have the following counters */
619 { "sw_in_discards", 4, 0x110, },
620 { "sw_in_filtered", 2, 0x112, },
621 { "sw_out_filtered", 2, 0x113, },
625 static bool have_sw_in_discards(struct dsa_switch *ds)
627 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
630 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
631 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
632 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
633 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
634 case PORT_SWITCH_ID_6352:
641 static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
643 struct mv88e6xxx_hw_stat *stats,
644 int port, uint8_t *data)
648 for (i = 0; i < nr_stats; i++) {
649 memcpy(data + i * ETH_GSTRING_LEN,
650 stats[i].string, ETH_GSTRING_LEN);
654 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
656 struct mv88e6xxx_hw_stat *stats,
657 int port, uint64_t *data)
659 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
663 mutex_lock(&ps->smi_mutex);
665 ret = _mv88e6xxx_stats_snapshot(ds, port);
667 mutex_unlock(&ps->smi_mutex);
671 /* Read each of the counters. */
672 for (i = 0; i < nr_stats; i++) {
673 struct mv88e6xxx_hw_stat *s = stats + i;
677 if (s->reg >= 0x100) {
678 ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
683 if (s->sizeof_stat == 4) {
684 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
690 data[i] = (((u64)high) << 16) | low;
693 _mv88e6xxx_stats_read(ds, s->reg, &low);
694 if (s->sizeof_stat == 8)
695 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
697 data[i] = (((u64)high) << 32) | low;
700 mutex_unlock(&ps->smi_mutex);
703 /* All the statistics in the table */
705 mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
707 if (have_sw_in_discards(ds))
708 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
709 mv88e6xxx_hw_stats, port, data);
711 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
712 mv88e6xxx_hw_stats, port, data);
715 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
717 if (have_sw_in_discards(ds))
718 return ARRAY_SIZE(mv88e6xxx_hw_stats);
719 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
723 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
724 int port, uint64_t *data)
726 if (have_sw_in_discards(ds))
727 _mv88e6xxx_get_ethtool_stats(
728 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
729 mv88e6xxx_hw_stats, port, data);
731 _mv88e6xxx_get_ethtool_stats(
732 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
733 mv88e6xxx_hw_stats, port, data);
736 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
738 return 32 * sizeof(u16);
741 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
742 struct ethtool_regs *regs, void *_p)
749 memset(p, 0xff, 32 * sizeof(u16));
751 for (i = 0; i < 32; i++) {
754 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
760 #ifdef CONFIG_NET_DSA_HWMON
762 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
764 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
770 mutex_lock(&ps->smi_mutex);
772 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
776 /* Enable temperature sensor */
777 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
781 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
785 /* Wait for temperature to stabilize */
786 usleep_range(10000, 12000);
788 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
794 /* Disable temperature sensor */
795 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
799 *temp = ((val & 0x1f) - 5) * 5;
802 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
803 mutex_unlock(&ps->smi_mutex);
806 #endif /* CONFIG_NET_DSA_HWMON */
808 /* Must be called with SMI lock held */
809 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
812 unsigned long timeout = jiffies + HZ / 10;
814 while (time_before(jiffies, timeout)) {
817 ret = _mv88e6xxx_reg_read(ds, reg, offset);
823 usleep_range(1000, 2000);
828 static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
830 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
833 mutex_lock(&ps->smi_mutex);
834 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
835 mutex_unlock(&ps->smi_mutex);
840 static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
842 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
843 GLOBAL2_SMI_OP_BUSY);
846 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
848 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
849 GLOBAL2_EEPROM_OP_LOAD);
852 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
854 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
855 GLOBAL2_EEPROM_OP_BUSY);
858 /* Must be called with SMI lock held */
859 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
861 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
865 /* Must be called with SMI mutex held */
866 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
871 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
872 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
877 ret = _mv88e6xxx_phy_wait(ds);
881 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
884 /* Must be called with SMI mutex held */
885 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
890 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
894 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
895 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
898 return _mv88e6xxx_phy_wait(ds);
901 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
903 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
906 mutex_lock(&ps->smi_mutex);
908 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
912 e->eee_enabled = !!(reg & 0x0200);
913 e->tx_lpi_enabled = !!(reg & 0x0100);
915 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
919 e->eee_active = !!(reg & PORT_STATUS_EEE);
923 mutex_unlock(&ps->smi_mutex);
927 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
928 struct phy_device *phydev, struct ethtool_eee *e)
930 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
934 mutex_lock(&ps->smi_mutex);
936 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
943 if (e->tx_lpi_enabled)
946 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
948 mutex_unlock(&ps->smi_mutex);
953 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
957 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
961 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
965 return _mv88e6xxx_atu_wait(ds);
968 static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
972 ret = _mv88e6xxx_atu_wait(ds);
976 return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
979 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
981 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
985 mutex_lock(&ps->smi_mutex);
987 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
993 oldstate = reg & PORT_CONTROL_STATE_MASK;
994 if (oldstate != state) {
995 /* Flush forwarding database if we're moving a port
996 * from Learning or Forwarding state to Disabled or
997 * Blocking or Listening state.
999 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1000 state <= PORT_CONTROL_STATE_BLOCKING) {
1001 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
1005 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1006 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1011 mutex_unlock(&ps->smi_mutex);
1015 /* Must be called with smi lock held */
1016 static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1018 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1019 u8 fid = ps->fid[port];
1020 u16 reg = fid << 12;
1022 if (dsa_is_cpu_port(ds, port))
1023 reg |= ds->phys_port_mask;
1025 reg |= (ps->bridge_mask[fid] |
1026 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1028 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
1031 /* Must be called with smi lock held */
1032 static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1034 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1039 mask = ds->phys_port_mask;
1042 mask &= ~(1 << port);
1043 if (ps->fid[port] != fid)
1046 ret = _mv88e6xxx_update_port_config(ds, port);
1051 return _mv88e6xxx_flush_fid(ds, fid);
1054 /* Bridge handling functions */
1056 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1058 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1063 /* If the bridge group is not empty, join that group.
1064 * Otherwise create a new group.
1066 fid = ps->fid[port];
1067 nmask = br_port_mask & ~(1 << port);
1069 fid = ps->fid[__ffs(nmask)];
1071 nmask = ps->bridge_mask[fid] | (1 << port);
1072 if (nmask != br_port_mask) {
1073 netdev_err(ds->ports[port],
1074 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1075 fid, br_port_mask, nmask);
1079 mutex_lock(&ps->smi_mutex);
1081 ps->bridge_mask[fid] = br_port_mask;
1083 if (fid != ps->fid[port]) {
1084 ps->fid_mask |= 1 << ps->fid[port];
1085 ps->fid[port] = fid;
1086 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1089 mutex_unlock(&ps->smi_mutex);
1094 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1096 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1100 fid = ps->fid[port];
1102 if (ps->bridge_mask[fid] != br_port_mask) {
1103 netdev_err(ds->ports[port],
1104 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1105 fid, br_port_mask, ps->bridge_mask[fid]);
1109 /* If the port was the last port of a bridge, we are done.
1110 * Otherwise assign a new fid to the port, and fix up
1111 * the bridge configuration.
1113 if (br_port_mask == (1 << port))
1116 mutex_lock(&ps->smi_mutex);
1118 newfid = __ffs(ps->fid_mask);
1119 ps->fid[port] = newfid;
1120 ps->fid_mask &= (1 << newfid);
1121 ps->bridge_mask[fid] &= ~(1 << port);
1122 ps->bridge_mask[newfid] = 1 << port;
1124 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1126 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1128 mutex_unlock(&ps->smi_mutex);
1133 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1135 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1139 case BR_STATE_DISABLED:
1140 stp_state = PORT_CONTROL_STATE_DISABLED;
1142 case BR_STATE_BLOCKING:
1143 case BR_STATE_LISTENING:
1144 stp_state = PORT_CONTROL_STATE_BLOCKING;
1146 case BR_STATE_LEARNING:
1147 stp_state = PORT_CONTROL_STATE_LEARNING;
1149 case BR_STATE_FORWARDING:
1151 stp_state = PORT_CONTROL_STATE_FORWARDING;
1155 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1157 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1158 * so we can not update the port state directly but need to schedule it.
1160 ps->port_state[port] = stp_state;
1161 set_bit(port, &ps->port_state_update_mask);
1162 schedule_work(&ps->bridge_work);
1167 static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
1168 const unsigned char *addr)
1172 for (i = 0; i < 3; i++) {
1173 ret = _mv88e6xxx_reg_write(
1174 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1175 (addr[i * 2] << 8) | addr[i * 2 + 1]);
1183 static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
1187 for (i = 0; i < 3; i++) {
1188 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1189 GLOBAL_ATU_MAC_01 + i);
1192 addr[i * 2] = ret >> 8;
1193 addr[i * 2 + 1] = ret & 0xff;
1199 static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
1200 const unsigned char *addr, int state)
1202 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1203 u8 fid = ps->fid[port];
1206 ret = _mv88e6xxx_atu_wait(ds);
1210 ret = __mv88e6xxx_write_addr(ds, addr);
1214 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
1215 (0x10 << port) | state);
1219 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
1224 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1225 const unsigned char *addr, u16 vid)
1227 int state = is_multicast_ether_addr(addr) ?
1228 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1229 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1230 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1233 mutex_lock(&ps->smi_mutex);
1234 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
1235 mutex_unlock(&ps->smi_mutex);
1240 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1241 const unsigned char *addr, u16 vid)
1243 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1246 mutex_lock(&ps->smi_mutex);
1247 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
1248 GLOBAL_ATU_DATA_STATE_UNUSED);
1249 mutex_unlock(&ps->smi_mutex);
1254 static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
1255 unsigned char *addr, bool *is_static)
1257 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1258 u8 fid = ps->fid[port];
1261 ret = _mv88e6xxx_atu_wait(ds);
1265 ret = __mv88e6xxx_write_addr(ds, addr);
1270 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
1274 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1277 state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1278 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
1280 } while (!(((ret >> 4) & 0xff) & (1 << port)));
1282 ret = __mv88e6xxx_read_addr(ds, addr);
1286 *is_static = state == (is_multicast_ether_addr(addr) ?
1287 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1288 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1293 /* get next entry for port */
1294 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
1295 unsigned char *addr, bool *is_static)
1297 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1300 mutex_lock(&ps->smi_mutex);
1301 ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
1302 mutex_unlock(&ps->smi_mutex);
1307 static void mv88e6xxx_bridge_work(struct work_struct *work)
1309 struct mv88e6xxx_priv_state *ps;
1310 struct dsa_switch *ds;
1313 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1314 ds = ((struct dsa_switch *)ps) - 1;
1316 while (ps->port_state_update_mask) {
1317 port = __ffs(ps->port_state_update_mask);
1318 clear_bit(port, &ps->port_state_update_mask);
1319 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1323 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
1325 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1329 mutex_lock(&ps->smi_mutex);
1331 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1332 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1333 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
1334 mv88e6xxx_6065_family(ds)) {
1335 /* MAC Forcing register: don't force link, speed,
1336 * duplex or flow control state to any particular
1337 * values on physical ports, but force the CPU port
1338 * and all DSA ports to their maximum bandwidth and
1341 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
1342 if (dsa_is_cpu_port(ds, port) ||
1343 ds->dsa_port_mask & (1 << port)) {
1344 reg |= PORT_PCS_CTRL_FORCE_LINK |
1345 PORT_PCS_CTRL_LINK_UP |
1346 PORT_PCS_CTRL_DUPLEX_FULL |
1347 PORT_PCS_CTRL_FORCE_DUPLEX;
1348 if (mv88e6xxx_6065_family(ds))
1349 reg |= PORT_PCS_CTRL_100;
1351 reg |= PORT_PCS_CTRL_1000;
1353 reg |= PORT_PCS_CTRL_UNFORCED;
1356 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1357 PORT_PCS_CTRL, reg);
1362 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
1363 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
1364 * tunneling, determine priority by looking at 802.1p and IP
1365 * priority fields (IP prio has precedence), and set STP state
1368 * If this is the CPU link, use DSA or EDSA tagging depending
1369 * on which tagging mode was configured.
1371 * If this is a link to another switch, use DSA tagging mode.
1373 * If this is the upstream port for this switch, enable
1374 * forwarding of unknown unicasts and multicasts.
1377 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1378 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1379 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1380 mv88e6xxx_6185_family(ds))
1381 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
1382 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
1383 PORT_CONTROL_STATE_FORWARDING;
1384 if (dsa_is_cpu_port(ds, port)) {
1385 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
1386 reg |= PORT_CONTROL_DSA_TAG;
1387 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1388 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
1389 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1390 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
1392 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1395 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1396 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1397 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1398 mv88e6xxx_6185_family(ds)) {
1399 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1400 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
1403 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1404 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1405 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
1406 if (ds->dsa_port_mask & (1 << port))
1407 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1408 if (port == dsa_upstream_port(ds))
1409 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
1410 PORT_CONTROL_FORWARD_UNKNOWN_MC;
1413 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1419 /* Port Control 2: don't force a good FCS, set the maximum
1420 * frame size to 10240 bytes, don't let the switch add or
1421 * strip 802.1q tags, don't discard tagged or untagged frames
1422 * on this port, do a destination address lookup on all
1423 * received packets as usual, disable ARP mirroring and don't
1424 * send a copy of all transmitted/received frames on this port
1428 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1429 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1430 mv88e6xxx_6095_family(ds))
1431 reg = PORT_CONTROL_2_MAP_DA;
1433 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1434 mv88e6xxx_6165_family(ds))
1435 reg |= PORT_CONTROL_2_JUMBO_10240;
1437 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
1438 /* Set the upstream port this port should use */
1439 reg |= dsa_upstream_port(ds);
1440 /* enable forwarding of unknown multicast addresses to
1443 if (port == dsa_upstream_port(ds))
1444 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
1448 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1449 PORT_CONTROL_2, reg);
1454 /* Port Association Vector: when learning source addresses
1455 * of packets, add the address to the address database using
1456 * a port bitmap that has only the bit for this port set and
1457 * the other bits clear.
1459 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
1464 /* Egress rate control 2: disable egress rate control. */
1465 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
1470 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1471 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
1472 /* Do not limit the period of time that this port can
1473 * be paused for by the remote end or the period of
1474 * time that this port can pause the remote end.
1476 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1477 PORT_PAUSE_CTRL, 0x0000);
1481 /* Port ATU control: disable limiting the number of
1482 * address database entries that this port is allowed
1485 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1486 PORT_ATU_CONTROL, 0x0000);
1487 /* Priority Override: disable DA, SA and VTU priority
1490 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1491 PORT_PRI_OVERRIDE, 0x0000);
1495 /* Port Ethertype: use the Ethertype DSA Ethertype
1498 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1499 PORT_ETH_TYPE, ETH_P_EDSA);
1502 /* Tag Remap: use an identity 802.1p prio -> switch
1505 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1506 PORT_TAG_REGMAP_0123, 0x3210);
1510 /* Tag Remap 2: use an identity 802.1p prio -> switch
1513 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1514 PORT_TAG_REGMAP_4567, 0x7654);
1519 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1520 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1521 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
1522 /* Rate Control: disable ingress rate limiting. */
1523 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1524 PORT_RATE_CONTROL, 0x0001);
1529 /* Port Control 1: disable trunking, disable sending
1530 * learning messages to this port.
1532 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
1536 /* Port based VLAN map: give each port its own address
1537 * database, allow the CPU port to talk to each of the 'real'
1538 * ports, and allow each of the 'real' ports to only talk to
1539 * the upstream port.
1541 fid = __ffs(ps->fid_mask);
1542 ps->fid[port] = fid;
1543 ps->fid_mask &= ~(1 << fid);
1545 if (!dsa_is_cpu_port(ds, port))
1546 ps->bridge_mask[fid] = 1 << port;
1548 ret = _mv88e6xxx_update_port_config(ds, port);
1552 /* Default VLAN ID and priority: don't set a default VLAN
1553 * ID, and set the default packet priority to zero.
1555 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1558 mutex_unlock(&ps->smi_mutex);
1562 int mv88e6xxx_setup_ports(struct dsa_switch *ds)
1564 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1568 for (i = 0; i < ps->num_ports; i++) {
1569 ret = mv88e6xxx_setup_port(ds, i);
1576 int mv88e6xxx_setup_common(struct dsa_switch *ds)
1578 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1580 mutex_init(&ps->smi_mutex);
1582 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
1584 ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
1586 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
1591 int mv88e6xxx_setup_global(struct dsa_switch *ds)
1593 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1596 /* Set the default address aging time to 5 minutes, and
1597 * enable address learn messages to be sent to all message
1600 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
1601 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
1603 /* Configure the IP ToS mapping registers. */
1604 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
1605 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
1606 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
1607 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
1608 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
1609 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
1610 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
1611 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
1613 /* Configure the IEEE 802.1p priority mapping register. */
1614 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
1616 /* Send all frames with destination addresses matching
1617 * 01:80:c2:00:00:0x to the CPU port.
1619 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
1621 /* Ignore removed tag data on doubly tagged packets, disable
1622 * flow control messages, force flow control priority to the
1623 * highest, and send all special multicast frames to the CPU
1624 * port at the highest priority.
1626 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
1627 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
1628 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
1630 /* Program the DSA routing table. */
1631 for (i = 0; i < 32; i++) {
1634 if (ds->pd->rtable &&
1635 i != ds->index && i < ds->dst->pd->nr_chips)
1636 nexthop = ds->pd->rtable[i] & 0x1f;
1638 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
1639 GLOBAL2_DEVICE_MAPPING_UPDATE |
1640 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
1644 /* Clear all trunk masks. */
1645 for (i = 0; i < 8; i++)
1646 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
1647 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
1648 ((1 << ps->num_ports) - 1));
1650 /* Clear all trunk mappings. */
1651 for (i = 0; i < 16; i++)
1652 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
1653 GLOBAL2_TRUNK_MAPPING_UPDATE |
1654 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
1656 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1657 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
1658 /* Send all frames with destination addresses matching
1659 * 01:80:c2:00:00:2x to the CPU port.
1661 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
1663 /* Initialise cross-chip port VLAN table to reset
1666 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
1668 /* Clear the priority override table. */
1669 for (i = 0; i < 16; i++)
1670 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
1674 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1675 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1676 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
1677 /* Disable ingress rate limiting by resetting all
1678 * ingress rate limit registers to their initial
1681 for (i = 0; i < ps->num_ports; i++)
1682 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
1689 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
1691 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1692 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
1693 unsigned long timeout;
1697 /* Set all ports to the disabled state. */
1698 for (i = 0; i < ps->num_ports; i++) {
1699 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
1700 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
1703 /* Wait for transmit queues to drain. */
1704 usleep_range(2000, 4000);
1706 /* Reset the switch. Keep the PPU active if requested. The PPU
1707 * needs to be active to support indirect phy register access
1708 * through global registers 0x18 and 0x19.
1711 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
1713 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
1715 /* Wait up to one second for reset to complete. */
1716 timeout = jiffies + 1 * HZ;
1717 while (time_before(jiffies, timeout)) {
1718 ret = REG_READ(REG_GLOBAL, 0x00);
1719 if ((ret & is_reset) == is_reset)
1721 usleep_range(1000, 2000);
1723 if (time_after(jiffies, timeout))
1729 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
1731 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1734 mutex_lock(&ps->smi_mutex);
1735 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1738 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
1740 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1741 mutex_unlock(&ps->smi_mutex);
1745 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
1748 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1751 mutex_lock(&ps->smi_mutex);
1752 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1756 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
1758 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1759 mutex_unlock(&ps->smi_mutex);
1763 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
1765 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1767 if (port >= 0 && port < ps->num_ports)
1773 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
1775 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1776 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1782 mutex_lock(&ps->smi_mutex);
1783 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
1784 mutex_unlock(&ps->smi_mutex);
1789 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
1791 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1792 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1798 mutex_lock(&ps->smi_mutex);
1799 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
1800 mutex_unlock(&ps->smi_mutex);
1805 mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
1807 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1808 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1814 mutex_lock(&ps->smi_mutex);
1815 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
1816 mutex_unlock(&ps->smi_mutex);
1821 mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
1824 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1825 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1831 mutex_lock(&ps->smi_mutex);
1832 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
1833 mutex_unlock(&ps->smi_mutex);
1837 static int __init mv88e6xxx_init(void)
1839 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1840 register_switch_driver(&mv88e6131_switch_driver);
1842 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1843 register_switch_driver(&mv88e6123_61_65_switch_driver);
1845 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
1846 register_switch_driver(&mv88e6352_switch_driver);
1848 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1849 register_switch_driver(&mv88e6171_switch_driver);
1853 module_init(mv88e6xxx_init);
1855 static void __exit mv88e6xxx_cleanup(void)
1857 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1858 unregister_switch_driver(&mv88e6171_switch_driver);
1860 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1861 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
1863 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1864 unregister_switch_driver(&mv88e6131_switch_driver);
1867 module_exit(mv88e6xxx_cleanup);
1869 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1870 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
1871 MODULE_LICENSE("GPL");