HOST AP DRIVER
M: Jouni Malinen <j@w1.fi>
-L: hostap@shmoo.com (subscribers-only)
L: linux-wireless@vger.kernel.org
-W: http://hostap.epitest.fi/
-S: Maintained
+W: http://w1.fi/hostap-driver.html
+S: Obsolete
F: drivers/net/wireless/intersil/hostap/
HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
+mv88e6xxx-objs += global1.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
#include <net/switchdev.h>
#include "mv88e6xxx.h"
+#include "global1.h"
#include "global2.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
return 0;
}
-static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = {
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_single_chip_ops = {
.read = mv88e6xxx_smi_single_chip_read,
.write = mv88e6xxx_smi_single_chip_write,
};
return 0;
}
-static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = {
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_multi_chip_ops = {
.read = mv88e6xxx_smi_multi_chip_read,
.write = mv88e6xxx_smi_multi_chip_write,
};
{
int addr = phy; /* PHY devices addresses start at 0x0 */
- if (!chip->phy_ops)
+ if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
- return chip->phy_ops->read(chip, addr, reg, val);
+ return chip->info->ops->phy_read(chip, addr, reg, val);
}
static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
{
int addr = phy; /* PHY devices addresses start at 0x0 */
- if (!chip->phy_ops)
+ if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
- return chip->phy_ops->write(chip, addr, reg, val);
+ return chip->info->ops->phy_write(chip, addr, reg, val);
}
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
return mv88e6xxx_write(chip, addr, reg, val);
}
-static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
{
u16 val;
- int err;
+ int i, err;
- err = mv88e6xxx_read(chip, addr, reg, &val);
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
if (err)
return err;
- return val;
-}
-
-static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 val)
-{
- return mv88e6xxx_write(chip, addr, reg, val);
-}
-
-static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
-{
- int ret;
- int i;
-
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
- ret & ~GLOBAL_CONTROL_PPU_ENABLE);
- if (ret)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
+ val & ~GLOBAL_CONTROL_PPU_ENABLE);
+ if (err)
+ return err;
for (i = 0; i < 16; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
usleep_range(1000, 2000);
- if ((ret & GLOBAL_STATUS_PPU_MASK) !=
- GLOBAL_STATUS_PPU_POLLING)
+ if ((val & GLOBAL_STATUS_PPU_MASK) != GLOBAL_STATUS_PPU_POLLING)
return 0;
}
static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
{
- int ret, err, i;
+ u16 val;
+ int i, err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
- ret | GLOBAL_CONTROL_PPU_ENABLE);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
+ val | GLOBAL_CONTROL_PPU_ENABLE);
if (err)
return err;
for (i = 0; i < 16; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
usleep_range(1000, 2000);
- if ((ret & GLOBAL_STATUS_PPU_MASK) ==
- GLOBAL_STATUS_PPU_POLLING)
+ if ((val & GLOBAL_STATUS_PPU_MASK) == GLOBAL_STATUS_PPU_POLLING)
return 0;
}
return err;
}
-static const struct mv88e6xxx_ops mv88e6xxx_phy_ppu_ops = {
- .read = mv88e6xxx_phy_ppu_read,
- .write = mv88e6xxx_phy_ppu_write,
-};
-
static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip)
{
return chip->info->family == MV88E6XXX_FAMILY_6065;
return chip->info->family == MV88E6XXX_FAMILY_6352;
}
-static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
-{
- return chip->info->num_databases;
-}
-
-static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip)
-{
- /* Does the device have dedicated FID registers for ATU and VTU ops? */
- if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip))
- return true;
-
- return false;
-}
-
/* We expect the switch to perform auto negotiation if there is a real
* phy. However, in the case of a fixed link phy, we force the port
* settings from the fixed link settings.
reg |= PORT_PCS_CTRL_DUPLEX_FULL;
if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) &&
- (port >= chip->info->num_ports - 2)) {
+ (port >= mv88e6xxx_num_ports(chip) - 2)) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
{
- int ret;
- int i;
+ u16 val;
+ int i, err;
for (i = 0; i < 10; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP);
- if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_OP, &val);
+ if ((val & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
- int ret;
+ int err;
if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT |
- GLOBAL_STATS_OP_HIST_RX_TX | port);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
+ if (err)
+ return err;
/* Wait for the snapshotting to complete. */
- ret = _mv88e6xxx_stats_wait(chip);
- if (ret < 0)
- return ret;
-
- return 0;
+ return _mv88e6xxx_stats_wait(chip);
}
static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip,
int stat, u32 *val)
{
- u32 _val;
- int ret;
+ u32 value;
+ u16 reg;
+ int err;
*val = 0;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_READ_CAPTURED |
- GLOBAL_STATS_OP_HIST_RX_TX | stat);
- if (ret < 0)
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED |
+ GLOBAL_STATS_OP_HIST_RX_TX | stat);
+ if (err)
return;
- ret = _mv88e6xxx_stats_wait(chip);
- if (ret < 0)
+ err = _mv88e6xxx_stats_wait(chip);
+ if (err)
return;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
- if (ret < 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_32, ®);
+ if (err)
return;
- _val = ret << 16;
+ value = reg << 16;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
- if (ret < 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_01, ®);
+ if (err)
return;
- *val = _val | ret;
+ *val = value | reg;
}
static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP,
- GLOBAL_ATU_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
}
static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
{
- int ret;
+ u16 val;
+ int err;
- if (mv88e6xxx_has_fid_reg(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID,
- fid);
- if (ret < 0)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) {
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid);
+ if (err)
+ return err;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
- (ret & 0xfff) |
- ((fid << 8) & 0xf000));
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
+ (val & 0xfff) | ((fid << 8) & 0xf000));
+ if (err)
+ return err;
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
cmd |= fid & 0xf;
}
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd);
+ if (err)
+ return err;
return _mv88e6xxx_atu_wait(chip);
}
data |= (entry->portv_trunkid << shift) & mask;
}
- return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
}
static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
struct net_device *bridge = chip->ports[port].bridge_dev;
- const u16 mask = (1 << chip->info->num_ports) - 1;
+ const u16 mask = (1 << mv88e6xxx_num_ports(chip)) - 1;
struct dsa_switch *ds = chip->ds;
u16 output_ports = 0;
u16 reg;
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
output_ports = mask;
} else {
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
/* allow sending frames to every group member */
if (bridge && chip->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP,
- GLOBAL_VTU_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY);
}
static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op)
{
- int ret;
+ int err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_wait(chip);
}
}
static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry,
+ struct mv88e6xxx_vtu_entry *entry,
unsigned int nibble_offset)
{
u16 regs[3];
- int i;
- int ret;
+ int i, err;
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_DATA_0_3 + i);
- if (ret < 0)
- return ret;
+ u16 *reg = ®s[i];
- regs[i] = ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
}
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u16 reg = regs[i / 4];
}
static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0);
}
static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2);
}
static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry,
+ struct mv88e6xxx_vtu_entry *entry,
unsigned int nibble_offset)
{
u16 regs[3] = { 0 };
- int i;
- int ret;
+ int i, err;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u8 data = entry->data[i];
}
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL,
- GLOBAL_VTU_DATA_0_3 + i, regs[i]);
- if (ret < 0)
- return ret;
+ u16 reg = regs[i];
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
}
return 0;
}
static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0);
}
static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2);
}
static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid)
{
- return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID,
- vid & GLOBAL_VTU_VID_MASK);
+ return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID,
+ vid & GLOBAL_VTU_VID_MASK);
}
static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
- struct mv88e6xxx_vtu_stu_entry next = { 0 };
- int ret;
+ struct mv88e6xxx_vtu_entry next = { 0 };
+ u16 val;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ if (err)
+ return err;
- next.vid = ret & GLOBAL_VTU_VID_MASK;
- next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+ next.vid = val & GLOBAL_VTU_VID_MASK;
+ next.valid = !!(val & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_vtu_data_read(chip, &next);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_vtu_data_read(chip, &next);
+ if (err)
+ return err;
- if (mv88e6xxx_has_fid_reg(chip)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_FID);
- if (ret < 0)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val);
+ if (err)
+ return err;
- next.fid = ret & GLOBAL_VTU_FID_MASK;
+ next.fid = val & GLOBAL_VTU_FID_MASK;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
*/
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_OP);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val);
+ if (err)
+ return err;
- next.fid = (ret & 0xf00) >> 4;
- next.fid |= ret & 0xf;
+ next.fid = (val & 0xf00) >> 4;
+ next.fid |= val & 0xf;
}
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_SID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ if (err)
+ return err;
- next.sid = ret & GLOBAL_VTU_SID_MASK;
+ next.sid = val & GLOBAL_VTU_SID_MASK;
}
}
int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_stu_entry next;
+ struct mv88e6xxx_vtu_entry next;
u16 pvid;
int err;
}
static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
u16 reg = 0;
- int ret;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
if (!entry->valid)
goto loadpurge;
/* Write port member tags */
- ret = mv88e6xxx_vtu_data_write(chip, entry);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_vtu_data_write(chip, entry);
+ if (err)
+ return err;
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
- reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
+ if (err)
+ return err;
}
- if (mv88e6xxx_has_fid_reg(chip)) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
reg = entry->fid & GLOBAL_VTU_FID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID,
- reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, reg);
+ if (err)
+ return err;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
reg |= entry->vid & GLOBAL_VTU_VID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_cmd(chip, op);
}
static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
- struct mv88e6xxx_vtu_stu_entry next = { 0 };
- int ret;
+ struct mv88e6xxx_vtu_entry next = { 0 };
+ u16 val;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
- sid & GLOBAL_VTU_SID_MASK);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID,
+ sid & GLOBAL_VTU_SID_MASK);
+ if (err)
+ return err;
- ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ if (err)
+ return err;
- next.sid = ret & GLOBAL_VTU_SID_MASK;
+ next.sid = val & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ if (err)
+ return err;
- next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+ next.valid = !!(val & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_stu_data_read(chip, &next);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_stu_data_read(chip, &next);
+ if (err)
+ return err;
}
*entry = next;
}
static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
u16 reg = 0;
- int ret;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
if (!entry->valid)
goto loadpurge;
/* Write port states */
- ret = mv88e6xxx_stu_data_write(chip, entry);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_stu_data_write(chip, entry);
+ if (err)
+ return err;
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
+ if (err)
+ return err;
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
err = _mv88e6xxx_port_fid_get(chip, i, fid);
if (err)
return err;
}
static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
struct dsa_switch *ds = chip->ds;
- struct mv88e6xxx_vtu_stu_entry vlan = {
+ struct mv88e6xxx_vtu_entry vlan = {
.valid = true,
.vid = vid,
};
return err;
/* exclude all ports except the CPU and DSA ports */
- for (i = 0; i < chip->info->num_ports; ++i)
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
- struct mv88e6xxx_vtu_stu_entry vstp;
+ struct mv88e6xxx_vtu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
* implemented, only one STU entry is needed to cover all VTU
}
static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
+ struct mv88e6xxx_vtu_entry *entry, bool creat)
{
int err;
u16 vid_begin, u16 vid_end)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
if (!vid_begin)
if (vlan.vid > vid_end)
break;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
u16 vid, bool untagged)
{
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int err;
err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true);
int port, u16 vid)
{
struct dsa_switch *ds = chip->ds;
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
const unsigned char *addr)
{
- int i, ret;
+ int i, err;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_write(
- chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
- (addr[i * 2] << 8) | addr[i * 2 + 1]);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i,
+ (addr[i * 2] << 8) | addr[i * 2 + 1]);
+ if (err)
+ return err;
}
return 0;
static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
unsigned char *addr)
{
- int i, ret;
+ u16 val;
+ int i, err;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_ATU_MAC_01 + i);
- if (ret < 0)
- return ret;
- addr[i * 2] = ret >> 8;
- addr[i * 2 + 1] = ret & 0xff;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
+ if (err)
+ return err;
+
+ addr[i * 2] = val >> 8;
+ addr[i * 2 + 1] = val & 0xff;
}
return 0;
const unsigned char *addr, u16 vid,
u8 state)
{
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
struct mv88e6xxx_atu_entry entry;
int err;
struct mv88e6xxx_atu_entry *entry)
{
struct mv88e6xxx_atu_entry next = { 0 };
- int ret;
+ u16 val;
+ int err;
next.fid = fid;
- ret = _mv88e6xxx_atu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (err)
+ return err;
- ret = _mv88e6xxx_atu_mac_read(chip, next.mac);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_mac_read(chip, next.mac);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
+ if (err)
+ return err;
- next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+ next.state = val & GLOBAL_ATU_DATA_STATE_MASK;
if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
unsigned int mask, shift;
- if (ret & GLOBAL_ATU_DATA_TRUNK) {
+ if (val & GLOBAL_ATU_DATA_TRUNK) {
next.trunk = true;
mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
}
- next.portv_trunkid = (ret & mask) >> shift;
+ next.portv_trunkid = (val & mask) >> shift;
}
*entry = next;
struct switchdev_obj *obj,
int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_vtu_stu_entry vlan = {
+ struct mv88e6xxx_vtu_entry vlan = {
.vid = GLOBAL_VTU_VID_MASK, /* all ones */
};
u16 fid;
/* Assign the bridge and remap each port's VLANTable */
chip->ports[port].bridge_dev = bridge;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (chip->ports[i].bridge_dev == bridge) {
err = _mv88e6xxx_port_based_vlan_map(chip, i);
if (err)
/* Unassign the bridge and remap each port's VLANTable */
chip->ports[port].bridge_dev = NULL;
- for (i = 0; i < chip->info->num_ports; ++i)
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
if (i == port || chip->ports[i].bridge_dev == bridge)
if (_mv88e6xxx_port_based_vlan_map(chip, i))
netdev_warn(ds->ports[i].netdev,
u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
struct gpio_desc *gpiod = chip->reset;
unsigned long timeout;
- int err, ret;
u16 reg;
+ int err;
int i;
/* Set all ports to the disabled state. */
- for (i = 0; i < chip->info->num_ports; i++) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
err = mv88e6xxx_port_read(chip, i, PORT_CONTROL, ®);
if (err)
return err;
* through global registers 0x18 and 0x19.
*/
if (ppu_active)
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
+ err = mv88e6xxx_g1_write(chip, 0x04, 0xc000);
else
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
+ err = mv88e6xxx_g1_write(chip, 0x04, 0xc400);
if (err)
return err;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, 0x00, ®);
+ if (err)
+ return err;
- if ((ret & is_reset) == is_reset)
+ if ((reg & is_reset) == is_reset)
break;
usleep_range(1000, 2000);
}
return mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, 0x0000);
}
-static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
int err;
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_01,
- (addr[0] << 8) | addr[1]);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
if (err)
return err;
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_23,
- (addr[2] << 8) | addr[3]);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
if (err)
return err;
- return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_45,
- (addr[4] << 8) | addr[5]);
+ return 0;
}
static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
/* Round to nearest multiple of coeff */
age_time = (msecs + coeff / 2) / coeff;
- err = mv88e6xxx_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
if (err)
return err;
val &= ~0xff0;
val |= age_time << 4;
- return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, val);
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
}
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
reg |= GLOBAL_CONTROL_PPU_ENABLE;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, reg);
if (err)
return err;
reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL,
- reg);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
if (err)
return err;
/* Disable remote management, and set the switch's DSA device number. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
- (ds->index & 0x1f));
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
if (err)
return err;
* enable address learn messages to be sent to all message
* ports.
*/
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_LEARN2ALL);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_LEARN2ALL);
if (err)
return err;
return err;
/* Configure the IP ToS mapping registers. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_1, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_2, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_3, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_4, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_5, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_6, 0xffff);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_7, 0xffff);
if (err)
return err;
/* Configure the IEEE 802.1p priority mapping register. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IEEE_PRI, 0xfa41);
if (err)
return err;
/* Clear the statistics counters for all ports */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_FLUSH_ALL);
if (err)
return err;
goto unlock;
/* Setup Switch Port Registers */
- for (i = 0; i < chip->info->num_ports; i++) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
err = mv88e6xxx_setup_port(chip, i);
if (err)
goto unlock;
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
-
- /* Has an indirect Switch MAC/WoL/WoF register in Global 2? */
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_SWITCH_MAC))
- err = mv88e6xxx_g2_set_switch_mac(chip, addr);
- else
- err = mv88e6xxx_g1_set_switch_mac(chip, addr);
+ if (!chip->info->ops->set_switch_mac)
+ return -EOPNOTSUPP;
+ mutex_lock(&chip->reg_lock);
+ err = chip->info->ops->set_switch_mac(chip, addr);
mutex_unlock(&chip->reg_lock);
return err;
u16 val;
int err;
- if (phy >= chip->info->num_ports)
+ if (phy >= mv88e6xxx_num_ports(chip))
return 0xffff;
mutex_lock(&chip->reg_lock);
struct mv88e6xxx_chip *chip = bus->priv;
int err;
- if (phy >= chip->info->num_ports)
+ if (phy >= mv88e6xxx_num_ports(chip))
return 0xffff;
mutex_lock(&chip->reg_lock);
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
- err = mv88e6xxx_g2_get_eeprom16(chip, eeprom, data);
- else
- err = -EOPNOTSUPP;
+ if (!chip->info->ops->get_eeprom)
+ return -EOPNOTSUPP;
+ mutex_lock(&chip->reg_lock);
+ err = chip->info->ops->get_eeprom(chip, eeprom, data);
mutex_unlock(&chip->reg_lock);
if (err)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
+ if (!chip->info->ops->set_eeprom)
+ return -EOPNOTSUPP;
+
if (eeprom->magic != 0xc3ec4951)
return -EINVAL;
mutex_lock(&chip->reg_lock);
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
- err = mv88e6xxx_g2_set_eeprom16(chip, eeprom, data);
- else
- err = -EOPNOTSUPP;
-
+ err = chip->info->ops->set_eeprom(chip, eeprom, data);
mutex_unlock(&chip->reg_lock);
return err;
}
+static const struct mv88e6xxx_ops mv88e6085_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6095_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6123_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6131_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6161_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6165_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6171_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6172_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6175_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6176_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6185_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6240_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6320_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6321_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6350_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6351_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6352_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
.num_databases = 4096,
.num_ports = 10,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
+ .ops = &mv88e6085_ops,
},
[MV88E6095] = {
.num_databases = 256,
.num_ports = 11,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6095,
+ .ops = &mv88e6095_ops,
},
[MV88E6123] = {
.num_databases = 4096,
.num_ports = 3,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6123_ops,
},
[MV88E6131] = {
.num_databases = 256,
.num_ports = 8,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ .ops = &mv88e6131_ops,
},
[MV88E6161] = {
.num_databases = 4096,
.num_ports = 6,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6161_ops,
},
[MV88E6165] = {
.num_databases = 4096,
.num_ports = 6,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6165_ops,
},
[MV88E6171] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6171_ops,
},
[MV88E6172] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6172_ops,
},
[MV88E6175] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6175_ops,
},
[MV88E6176] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6176_ops,
},
[MV88E6185] = {
.num_databases = 256,
.num_ports = 10,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ .ops = &mv88e6185_ops,
},
[MV88E6240] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6240_ops,
},
[MV88E6320] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ .ops = &mv88e6320_ops,
},
[MV88E6321] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ .ops = &mv88e6321_ops,
},
[MV88E6350] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6350_ops,
},
[MV88E6351] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6351_ops,
},
[MV88E6352] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6352_ops,
},
};
return chip;
}
-static const struct mv88e6xxx_ops mv88e6xxx_g2_smi_phy_ops = {
- .read = mv88e6xxx_g2_smi_phy_read,
- .write = mv88e6xxx_g2_smi_phy_write,
-};
-
-static const struct mv88e6xxx_ops mv88e6xxx_phy_ops = {
- .read = mv88e6xxx_read,
- .write = mv88e6xxx_write,
-};
-
static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SMI_PHY)) {
- chip->phy_ops = &mv88e6xxx_g2_smi_phy_ops;
- } else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) {
- chip->phy_ops = &mv88e6xxx_phy_ppu_ops;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
mv88e6xxx_ppu_state_init(chip);
- } else {
- chip->phy_ops = &mv88e6xxx_phy_ops;
- }
}
static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
mv88e6xxx_ppu_state_destroy(chip);
- }
}
static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
if (IS_ERR(chip->reset))
return PTR_ERR(chip->reset);
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16) &&
+ if (chip->info->ops->get_eeprom &&
!of_property_read_u32(np, "eeprom-length", &eeprom_len))
chip->eeprom_len = eeprom_len;
--- /dev/null
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global1.h"
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+{
+ return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask);
+}
--- /dev/null
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_GLOBAL1_H
+#define _MV88E6XXX_GLOBAL1_H
+
+#include "mv88e6xxx.h"
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
+int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+
+#endif /* _MV88E6XXX_GLOBAL1_H */
#include "mv88e6xxx.h"
#include "global2.h"
+#define ADDR_GLOBAL2 0x1c
+
+static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, ADDR_GLOBAL2, reg, val);
+}
+
+static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, ADDR_GLOBAL2, reg, val);
+}
+
+static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
+{
+ return mv88e6xxx_update(chip, ADDR_GLOBAL2, reg, update);
+}
+
+static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+{
+ return mv88e6xxx_wait(chip, ADDR_GLOBAL2, reg, mask);
+}
+
/* Offset 0x06: Device Mapping Table register */
static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
{
u16 val = (target << 8) | (port & 0xf);
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_DEVICE_MAPPING, val);
}
static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
bool hask, u16 mask)
{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (num << 12) | (mask & port_mask);
if (hask)
val |= GLOBAL2_TRUNK_MASK_HASK;
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MASK, val);
}
/* Offset 0x08: Trunk Mapping Table register */
static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
u16 map)
{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MAPPING, val);
}
static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
int i, err;
/* Clear all eight possible Trunk Mask vectors */
int port, err;
/* Init all Ingress Rate Limit resources of all ports */
- for (port = 0; port < chip->info->num_ports; ++port) {
+ for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
/* XXX newer chips (like 88E6390) have different 2-bit ops */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_OP_INIT_ALL |
- (port << 8));
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_OP_INIT_ALL |
+ (port << 8));
if (err)
break;
/* Wait for the operation to complete */
- err = mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_BUSY);
+ err = mv88e6xxx_g2_wait(chip, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_BUSY);
if (err)
break;
}
{
u16 val = (pointer << 8) | data;
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_SWITCH_MAC, val);
}
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
u16 val = (pointer << 8) | (data & 0x7);
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_PRIO_OVERRIDE, val);
}
static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD,
- GLOBAL2_EEPROM_CMD_BUSY |
- GLOBAL2_EEPROM_CMD_RUNNING);
+ return mv88e6xxx_g2_wait(chip, GLOBAL2_EEPROM_CMD,
+ GLOBAL2_EEPROM_CMD_BUSY |
+ GLOBAL2_EEPROM_CMD_RUNNING);
}
static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_CMD, cmd);
if (err)
return err;
if (err)
return err;
- return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ return mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_DATA, data);
}
static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_DATA, data);
if (err)
return err;
int err;
/* Ensure the RO WriteEn bit is set */
- err = mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, &val);
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &val);
if (err)
return err;
static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD,
- GLOBAL2_SMI_PHY_CMD_BUSY);
+ return mv88e6xxx_g2_wait(chip, GLOBAL2_SMI_PHY_CMD,
+ GLOBAL2_SMI_PHY_CMD_BUSY);
}
static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_CMD, cmd);
if (err)
return err;
if (err)
return err;
- return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val);
+ return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
}
int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
if (err)
return err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
if (err)
return err;
/* Consider the frames with reserved multicast destination
* addresses matching 01:80:c2:00:00:2x as MGMT.
*/
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X,
- 0xffff);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_2X, 0xffff);
if (err)
return err;
}
/* Consider the frames with reserved multicast destination
* addresses matching 01:80:c2:00:00:0x as MGMT.
*/
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X,
- 0xffff);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_0X, 0xffff);
if (err)
return err;
}
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) ||
mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X))
reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, reg);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SWITCH_MGMT, reg);
if (err)
return err;
if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) {
/* Initialize Cross-chip Port VLAN Table to reset defaults */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_PVT_ADDR,
- GLOBAL2_PVT_ADDR_OP_INIT_ONES);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR,
+ GLOBAL2_PVT_ADDR_OP_INIT_ONES);
if (err)
return err;
}
#define PORT_TAG_REGMAP_0123 0x18
#define PORT_TAG_REGMAP_4567 0x19
-#define REG_GLOBAL 0x1b
#define GLOBAL_STATUS 0x00
#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
/* Two bits for 6165, 6185 etc */
#define GLOBAL_MAC_01 0x01
#define GLOBAL_MAC_23 0x02
#define GLOBAL_MAC_45 0x03
-#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */
-#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */
+#define GLOBAL_ATU_FID 0x01
+#define GLOBAL_VTU_FID 0x02
#define GLOBAL_VTU_FID_MASK 0xfff
#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */
#define GLOBAL_VTU_SID_MASK 0x3f
#define GLOBAL_STATS_COUNTER_32 0x1e
#define GLOBAL_STATS_COUNTER_01 0x1f
-#define REG_GLOBAL2 0x1c
#define GLOBAL2_INT_SOURCE 0x00
#define GLOBAL2_INT_MASK 0x01
#define GLOBAL2_MGMT_EN_2X 0x02
*/
MV88E6XXX_CAP_SERDES,
+ /* Switch Global (1) Registers.
+ */
+ MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */
+ MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */
+
/* Switch Global 2 Registers.
* The device contains a second set of global 16-bit registers.
*/
MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */
MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */
MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */
- MV88E6XXX_CAP_G2_SWITCH_MAC, /* (0x0d) Switch MAC/WoL/WoF */
MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
- MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */
- MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */
- MV88E6XXX_CAP_G2_SMI_PHY_CMD, /* (0x18) SMI PHY Command */
- MV88E6XXX_CAP_G2_SMI_PHY_DATA, /* (0x19) SMI PHY Data */
/* PHY Polling Unit.
* See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
+#define MV88E6XXX_FLAG_G1_ATU_FID BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID)
+#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
+
#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
#define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT_ULL(MV88E6XXX_CAP_G2_PVT_ADDR)
#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
-#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT_ULL(MV88E6XXX_CAP_G2_SWITCH_MAC)
#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
-#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT_ULL(MV88E6XXX_CAP_G2_EEPROM_CMD)
-#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT_ULL(MV88E6XXX_CAP_G2_EEPROM_DATA)
-#define MV88E6XXX_FLAG_G2_SMI_PHY_CMD BIT_ULL(MV88E6XXX_CAP_G2_SMI_PHY_CMD)
-#define MV88E6XXX_FLAG_G2_SMI_PHY_DATA BIT_ULL(MV88E6XXX_CAP_G2_SMI_PHY_DATA)
#define MV88E6XXX_FLAG_PPU BIT_ULL(MV88E6XXX_CAP_PPU)
#define MV88E6XXX_FLAG_PPU_ACTIVE BIT_ULL(MV88E6XXX_CAP_PPU_ACTIVE)
#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU)
-/* EEPROM Programming via Global2 with 16-bit data */
-#define MV88E6XXX_FLAGS_EEPROM16 \
- (MV88E6XXX_FLAG_G2_EEPROM_CMD | \
- MV88E6XXX_FLAG_G2_EEPROM_DATA)
-
/* Ingress Rate Limit unit */
#define MV88E6XXX_FLAGS_IRL \
(MV88E6XXX_FLAG_G2_IRL_CMD | \
(MV88E6XXX_FLAG_PHY_PAGE | \
MV88E6XXX_FLAG_SERDES)
-/* Indirect PHY access via Global2 SMI PHY registers */
-#define MV88E6XXX_FLAGS_SMI_PHY \
- (MV88E6XXX_FLAG_G2_SMI_PHY_CMD |\
- MV88E6XXX_FLAG_G2_SMI_PHY_DATA)
-
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6097 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
+ (MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6165 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
+ (MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
- MV88E6XXX_FLAGS_EEPROM16 | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT | \
- MV88E6XXX_FLAGS_SMI_PHY)
+ MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
(MV88E6XXX_FLAG_EDSA | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT | \
- MV88E6XXX_FLAGS_SMI_PHY)
+ MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6352 \
(MV88E6XXX_FLAG_EDSA | \
MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
- MV88E6XXX_FLAGS_EEPROM16 | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT | \
- MV88E6XXX_FLAGS_SERDES | \
- MV88E6XXX_FLAGS_SMI_PHY)
+ MV88E6XXX_FLAGS_SERDES)
+
+struct mv88e6xxx_ops;
struct mv88e6xxx_info {
enum mv88e6xxx_family family;
unsigned int num_databases;
unsigned int num_ports;
unsigned int port_base_addr;
+ unsigned int global1_addr;
unsigned int age_time_coeff;
unsigned long long flags;
+ const struct mv88e6xxx_ops *ops;
};
struct mv88e6xxx_atu_entry {
u8 mac[ETH_ALEN];
};
-struct mv88e6xxx_vtu_stu_entry {
- /* VTU only */
+struct mv88e6xxx_vtu_entry {
u16 vid;
u16 fid;
-
- /* VTU and STU */
u8 sid;
bool valid;
u8 data[DSA_MAX_PORTS];
};
-struct mv88e6xxx_ops;
+struct mv88e6xxx_bus_ops;
struct mv88e6xxx_priv_port {
struct net_device *bridge_dev;
/* The MII bus and the address on the bus that is used to
* communication with the switch
*/
- const struct mv88e6xxx_ops *smi_ops;
+ const struct mv88e6xxx_bus_ops *smi_ops;
struct mii_bus *bus;
int sw_addr;
/* Handles automatic disabling and re-enabling of the PHY
* polling unit.
*/
- const struct mv88e6xxx_ops *phy_ops;
+ const struct mv88e6xxx_bus_ops *phy_ops;
struct mutex ppu_mutex;
int ppu_disabled;
struct work_struct ppu_work;
struct mii_bus *mdio_bus;
};
-struct mv88e6xxx_ops {
+struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
};
+struct mv88e6xxx_ops {
+ int (*get_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
+
+ int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 *val);
+ int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 val);
+};
+
enum stat_type {
BANK0,
BANK1,
return (chip->info->flags & flags) == flags;
}
+static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_databases;
+}
+
+static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_ports;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
- int ret;
dev = &ar_ahb->pdev->dev;
- ar_ahb->cmd_clk = clk_get(dev, "wifi_wcss_cmd");
+ ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd");
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
ath10k_err(ar, "failed to get cmd clk: %ld\n",
PTR_ERR(ar_ahb->cmd_clk));
- ret = ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
- goto out;
+ return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
}
- ar_ahb->ref_clk = clk_get(dev, "wifi_wcss_ref");
+ ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref");
if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
ath10k_err(ar, "failed to get ref clk: %ld\n",
PTR_ERR(ar_ahb->ref_clk));
- ret = ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
- goto err_cmd_clk_put;
+ return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
}
- ar_ahb->rtc_clk = clk_get(dev, "wifi_wcss_rtc");
+ ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc");
if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
ath10k_err(ar, "failed to get rtc clk: %ld\n",
PTR_ERR(ar_ahb->rtc_clk));
- ret = ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
- goto err_ref_clk_put;
+ return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
}
return 0;
-
-err_ref_clk_put:
- clk_put(ar_ahb->ref_clk);
-
-err_cmd_clk_put:
- clk_put(ar_ahb->cmd_clk);
-
-out:
- return ret;
}
static void ath10k_ahb_clock_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk))
- clk_put(ar_ahb->cmd_clk);
-
- if (!IS_ERR_OR_NULL(ar_ahb->ref_clk))
- clk_put(ar_ahb->ref_clk);
-
- if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk))
- clk_put(ar_ahb->rtc_clk);
-
ar_ahb->cmd_clk = NULL;
ar_ahb->ref_clk = NULL;
ar_ahb->rtc_clk = NULL;
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
- int ret;
dev = &ar_ahb->pdev->dev;
- ar_ahb->core_cold_rst = reset_control_get(dev, "wifi_core_cold");
- if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) {
+ ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold");
+ if (IS_ERR(ar_ahb->core_cold_rst)) {
ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->core_cold_rst));
- ret = ar_ahb->core_cold_rst ?
- PTR_ERR(ar_ahb->core_cold_rst) : -ENODEV;
- goto out;
+ return PTR_ERR(ar_ahb->core_cold_rst);
}
- ar_ahb->radio_cold_rst = reset_control_get(dev, "wifi_radio_cold");
- if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) {
+ ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold");
+ if (IS_ERR(ar_ahb->radio_cold_rst)) {
ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_cold_rst));
- ret = ar_ahb->radio_cold_rst ?
- PTR_ERR(ar_ahb->radio_cold_rst) : -ENODEV;
- goto err_core_cold_rst_put;
+ return PTR_ERR(ar_ahb->radio_cold_rst);
}
- ar_ahb->radio_warm_rst = reset_control_get(dev, "wifi_radio_warm");
- if (IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) {
+ ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm");
+ if (IS_ERR(ar_ahb->radio_warm_rst)) {
ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_warm_rst));
- ret = ar_ahb->radio_warm_rst ?
- PTR_ERR(ar_ahb->radio_warm_rst) : -ENODEV;
- goto err_radio_cold_rst_put;
+ return PTR_ERR(ar_ahb->radio_warm_rst);
}
- ar_ahb->radio_srif_rst = reset_control_get(dev, "wifi_radio_srif");
- if (IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) {
+ ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif");
+ if (IS_ERR(ar_ahb->radio_srif_rst)) {
ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_srif_rst));
- ret = ar_ahb->radio_srif_rst ?
- PTR_ERR(ar_ahb->radio_srif_rst) : -ENODEV;
- goto err_radio_warm_rst_put;
+ return PTR_ERR(ar_ahb->radio_srif_rst);
}
- ar_ahb->cpu_init_rst = reset_control_get(dev, "wifi_cpu_init");
- if (IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init");
+ if (IS_ERR(ar_ahb->cpu_init_rst)) {
ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
PTR_ERR(ar_ahb->cpu_init_rst));
- ret = ar_ahb->cpu_init_rst ?
- PTR_ERR(ar_ahb->cpu_init_rst) : -ENODEV;
- goto err_radio_srif_rst_put;
+ return PTR_ERR(ar_ahb->cpu_init_rst);
}
return 0;
-
-err_radio_srif_rst_put:
- reset_control_put(ar_ahb->radio_srif_rst);
-
-err_radio_warm_rst_put:
- reset_control_put(ar_ahb->radio_warm_rst);
-
-err_radio_cold_rst_put:
- reset_control_put(ar_ahb->radio_cold_rst);
-
-err_core_cold_rst_put:
- reset_control_put(ar_ahb->core_cold_rst);
-
-out:
- return ret;
}
static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- if (!IS_ERR_OR_NULL(ar_ahb->core_cold_rst))
- reset_control_put(ar_ahb->core_cold_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_cold_rst))
- reset_control_put(ar_ahb->radio_cold_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_warm_rst))
- reset_control_put(ar_ahb->radio_warm_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_srif_rst))
- reset_control_put(ar_ahb->radio_srif_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->cpu_init_rst))
- reset_control_put(ar_ahb->cpu_init_rst);
-
ar_ahb->core_cold_rst = NULL;
ar_ahb->radio_cold_rst = NULL;
ar_ahb->radio_warm_rst = NULL;
ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
if (ar_ahb->irq < 0) {
ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
+ ret = ar_ahb->irq;
goto err_clock_deinit;
}
chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
+ ret = -ENODEV;
goto err_halt_device;
}
* chooses what to send (buffer address, length). The destination
* side keeps a supply of "anonymous receive buffers" available and
* it handles incoming data as it arrives (when the destination
- * recieves an interrupt).
+ * receives an interrupt).
*
* The sender may send a simple buffer (address/length) or it may
* send a small list of buffers. When a small list is sent, hardware
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
u32 ctrl_addr = pipe->ctrl_addr;
+ u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+
+ /* Prevent CE ring stuck issue that will occur when ring is full.
+ * Make sure that write index is 1 less than read index.
+ */
+ if ((cur_write_idx + nentries) == dest_ring->sw_index)
+ nentries -= 1;
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA9887_HW_1_0_VERSION,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_0_VERSION,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_2_VERSION,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
};
goto err_hif_stop;
}
- ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+ if (ar->max_num_vdevs >= 64)
+ ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL;
+ else
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
INIT_LIST_HEAD(&ar->arvifs);
/* PDEV stats */
s32 ch_noise_floor;
- u32 tx_frame_count;
- u32 rx_frame_count;
- u32 rx_clear_count;
- u32 cycle_count;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
/* only accept EAPOL frames */
HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
- /* Non-data in promiscous mode */
+ /* Non-data in promiscuous mode */
HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
* Purpose: indicate how many 32-bit integers follow the message header
* - NUM_CHARS
* Bits 31:16
- * Purpose: indicate how many 8-bit charaters follow the series of integers
+ * Purpose: indicate how many 8-bit characters follow the series of integers
*/
struct htt_rx_test {
u8 num_ints;
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_tx_timeout;
/* wal pdev resets */
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- rfc1042 += round_up(hdr_len, 4) +
- round_up(crypto_len, 4);
+ rfc1042 += round_up(hdr_len, bytes_aligned) +
+ round_up(crypto_len, bytes_aligned);
}
if (is_amsdu)
.ce7_base_address = 0x0004bc00,
/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
- * It is not really neccessary to assign address for newly supported
+ * It is not really necessary to assign address for newly supported
* CEs in this address table.
* Copy Engine Address
* CE8 0x0004c000
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
-/* Known pecularities:
+/* Known peculiarities:
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
bool sw_decrypt_mcast_mgmt;
const struct ath10k_hw_ops *hw_ops;
+
+ /* Number of bytes used for alignment in rx_hdr_status of rx desc. */
+ int decap_align_bytes;
};
struct htt_rx_desc;
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
- ath10k_warn(ar, "faield to down vdev %i: %d\n",
+ ath10k_warn(ar, "failed to down vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->def_wep_key_idx = -1;
* 1. target firmware would check magic number and if it's a match, firmware
* would consider the bits[0:15] are valid and base on that to calculate
* the end of DRAM. Early allocation would be located at that area and
- * may be reclaimed when necesary
+ * may be reclaimed when necessary
* 2. if no magic number is found, early allocation would happen at "_end"
* symbol of ROM which is located before the app-data and might NOT be
* re-claimable. If this is adopted, link script should keep this in
continue;
}
+ /* mac80211 would have already asked us to stop beaconing and
+ * bring the vdev down, so continue in that case
+ */
+ if (!arvif->is_up)
+ continue;
+
/* There are no completions for beacons so wait for next SWBA
* before telling mac80211 to decrement CSA counter
*
* type.
*
* 6. Comment each parameter part of the WMI command/event structure by
- * using the 2 stars at the begining of C comment instead of one star to
+ * using the 2 stars at the beginning of C comment instead of one star to
* enable HTML document generation using Doxygen.
*
*/
* In offload mode target supports features like WOW, chatter and
* other protocol offloads. In order to support them some
* functionalities like reorder buffering, PN checking need to be
- * done in target. This determines maximum number of peers suported
+ * done in target. This determines maximum number of peers supported
* by target in offload mode
*/
__le32 num_offload_peers;
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
struct wmi_host_mem_chunks mem_chunks;
} __packed;
-/* _10x stucture is from 10.X FW API */
+/* _10x structure is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
struct wmi_host_mem_chunks mem_chunks;
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
/* wal pdev continous xretry */
*/
struct wmi_pdev_stats_base {
__le32 chan_nf;
- __le32 tx_frame_count;
- __le32 rx_frame_count;
- __le32 rx_clear_count;
- __le32 cycle_count;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
__le32 phy_err_count;
__le32 chan_tx_pwr;
} __packed;
__le32 flags;
/* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
struct wmi_ssid ssid;
- /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+ /* beacon/probe response xmit rate. Applicable for SoftAP. */
__le32 bcn_tx_rate;
- /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+ /* beacon/probe response xmit power. Applicable for SoftAP. */
__le32 bcn_tx_power;
/* number of p2p NOA descriptor(s) from scan entry */
__le32 num_noa_descriptors;
WMI_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_VDEV_PARAM_MGMT_TX_RATE,
WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_10X_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
} __packed;
/* VDEV start response status codes */
-/* VDEV succesfully started */
+/* VDEV successfully started */
#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
/* requested VDEV not found */
#define WMI_UAPSD_AC_TYPE_TRIG 1
#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
- ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+ (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1))
enum wmi_sta_ps_param_uapsd {
WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
NO_SYNC_WMIFLAG);
- return 0;
+ return ret;
}
int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
config ATH9K_HWRNG
bool "Random number generator support"
depends on ATH9K && (HW_RANDOM = y || HW_RANDOM = ATH9K)
- default y
+ default n
---help---
This option incorporates the ADC register output as a source of
randomness into Linux entropy pool (/dev/urandom and /dev/random)
}
/* Check info buffer */
- info = (void *)&msg[1];
+ info = (void *)&bcdc->buf[0];
/* Copy info buffer */
if (buf) {
u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
- u32 data;
+ u32 data = 0;
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
val = 1;
brcmf_dbg(CONN, "shared key\n");
break;
- case NL80211_AUTHTYPE_AUTOMATIC:
- val = 2;
- brcmf_dbg(CONN, "automatic\n");
- break;
- case NL80211_AUTHTYPE_NETWORK_EAP:
- brcmf_dbg(CONN, "network eap\n");
default:
val = 2;
- brcmf_err("invalid auth type (%d)\n", sme->auth_type);
+ brcmf_dbg(CONN, "automatic, auth type (%d)\n", sme->auth_type);
break;
}
WL_BSS_INFO_MAX);
if (err) {
brcmf_err("Failed to get bss info (%d)\n", err);
- return;
+ goto out_kfree;
}
si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+
+out_kfree:
+ kfree(buf);
}
static s32
struct cfg80211_wowlan *wowl)
{
u32 wowl_config;
+ struct brcmf_wowl_wakeind_le wowl_wakeind;
u32 i;
brcmf_dbg(TRACE, "Suspend, wowl config.\n");
if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
wowl_config |= BRCMF_WOWL_UNASSOC;
- brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear",
- sizeof(struct brcmf_wowl_wakeind_le));
+ memcpy(&wowl_wakeind, "clear", 6);
+ brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", &wowl_wakeind,
+ sizeof(wowl_wakeind));
brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
brcmf_bus_wowl_config(cfg->pub->bus_if, true);
u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
bool mbss;
int is_11d;
+ bool supports_11d;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
mbss = ifp->vif->mbss;
/* store current 11d setting */
- brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d);
- country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
- settings->beacon.tail_len,
- WLAN_EID_COUNTRY);
- is_11d = country_ie ? 1 : 0;
+ if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
+ &ifp->vif->is_11d)) {
+ supports_11d = false;
+ } else {
+ country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len,
+ WLAN_EID_COUNTRY);
+ is_11d = country_ie ? 1 : 0;
+ supports_11d = true;
+ }
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
/* Parameters shared by all radio interfaces */
if (!mbss) {
- if (is_11d != ifp->vif->is_11d) {
+ if ((supports_11d) && (is_11d != ifp->vif->is_11d)) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
is_11d);
if (err < 0) {
brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
- } else if (WARN_ON(is_11d != ifp->vif->is_11d)) {
+ } else if (WARN_ON(supports_11d && (is_11d != ifp->vif->is_11d))) {
/* Multiple-BSS should use same 11d configuration */
err = -EINVAL;
goto exit;
brcmf_err("setting INFRA mode failed %d\n", err);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
- ifp->vif->is_11d);
- if (err < 0)
- brcmf_err("restoring REGULATORY setting failed %d\n",
- err);
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+ ifp->vif->is_11d);
/* Bring device back up so it can be used again */
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0)
err);
}
-static void
-_brcmf_set_mac_address(struct work_struct *work)
-{
- struct brcmf_if *ifp;
- s32 err;
-
- ifp = container_of(work, struct brcmf_if, setmacaddr_work);
-
- brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
-
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
- ETH_ALEN);
- if (err < 0) {
- brcmf_err("Setting cur_etheraddr failed, %d\n", err);
- } else {
- brcmf_dbg(TRACE, "MAC address updated to %pM\n",
- ifp->mac_addr);
- memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- }
-}
-
#if IS_ENABLED(CONFIG_IPV6)
static void _brcmf_update_ndtable(struct work_struct *work)
{
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct sockaddr *sa = (struct sockaddr *)addr;
+ int err;
- memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
- schedule_work(&ifp->setmacaddr_work);
- return 0;
+ brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
+
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
+ ETH_ALEN);
+ if (err < 0) {
+ brcmf_err("Setting cur_etheraddr failed, %d\n", err);
+ } else {
+ brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
+ memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
+ memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ }
+ return err;
}
static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
ndev->needed_headroom += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
- drvr->rxsz = ndev->mtu + ndev->hard_header_len +
- drvr->hdrlen;
-
/* set the mac address */
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
}
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
- cancel_work_sync(&ifp->setmacaddr_work);
cancel_work_sync(&ifp->multicast_work);
cancel_work_sync(&ifp->ndoffload_work);
}
}
break;
case NETDEV_DOWN:
- if (i < NDOL_MAX_ENTRIES)
- for (; i < ifp->ipv6addr_idx; i++)
+ if (i < NDOL_MAX_ENTRIES) {
+ for (; i < ifp->ipv6addr_idx - 1; i++)
table[i] = table[i + 1];
+ memset(&table[i], 0, sizeof(table[i]));
+ ifp->ipv6addr_idx--;
+ }
break;
default:
break;
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
- if (ifp)
- brcmf_net_detach(ifp->ndev, false);
+ brcmf_net_detach(ifp->ndev, false);
if (p2p_ifp)
brcmf_net_detach(p2p_ifp->ndev, false);
drvr->iflist[0] = NULL;
!brcmf_get_pend_8021x_cnt(ifp),
MAX_WAIT_FOR_8021X_TX);
- WARN_ON(!err);
+ if (!err)
+ brcmf_err("Timed out waiting for no pending 802.1x packets\n");
return !err;
}
/* Internal brcmf items */
uint hdrlen; /* Total BRCMF header length (proto + bus) */
- uint rxsz; /* Rx buffer size bus module should use */
/* Dongle media info */
char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- /* Multicast data packets sent to dongle */
- unsigned long tx_multicast;
-
struct mac_address addresses[BRCMF_MAX_IFS];
struct brcmf_if *iflist[BRCMF_MAX_IFS];
* @vif: points to cfg80211 specific interface information.
* @ndev: associated network device.
* @stats: interface specific network statistics.
- * @setmacaddr_work: worker object for setting mac address.
* @multicast_work: worker object for multicast provisioning.
* @ndoffload_work: worker object for neighbor discovery offload configuration.
* @fws_desc: interface specific firmware-signalling descriptor.
struct brcmf_cfg80211_vif *vif;
struct net_device *ndev;
struct net_device_stats stats;
- struct work_struct setmacaddr_work;
struct work_struct multicast_work;
struct work_struct ndoffload_work;
struct brcmf_fws_mac_descriptor *fws_desc;
void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
{
+ struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
struct brcmf_flowring_ring *ring;
+ struct brcmf_if *ifp;
u16 hash_idx;
+ u8 ifidx;
struct sk_buff *skb;
ring = flow->rings[flowid];
if (!ring)
return;
+
+ ifidx = brcmf_flowring_ifidx_get(flow, flowid);
+ ifp = brcmf_get_ifp(bus_if->drvr, ifidx);
+
brcmf_flowring_block(flow, flowid, false);
hash_idx = ring->hash_id;
flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
skb = skb_dequeue(&ring->skblist);
while (skb) {
- brcmu_pkt_buf_free_skb(skb);
+ brcmf_txfinalize(ifp, skb, false);
skb = skb_dequeue(&ring->skblist);
}
} else {
search = flow->tdls_entry;
if (memcmp(search->mac, peer, ETH_ALEN) == 0)
- return;
+ goto free_entry;
while (search->next) {
search = search->next;
if (memcmp(search->mac, peer, ETH_ALEN) == 0)
- return;
+ goto free_entry;
}
search->next = tdls_entry;
}
flow->tdls_active = true;
+ return;
+
+free_entry:
+ kfree(tdls_entry);
}
if ((skb->priority == 0) || (skb->priority > 7))
skb->priority = cfg80211_classify8021d(skb, NULL);
- drvr->tx_multicast += !!multicast;
-
if (fws->avoid_queueing) {
rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
if (rc < 0)
*
******************************************************************************/
-static inline const struct fw_img *
-iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
-{
- if (ucode_type >= IWL_UCODE_TYPE_MAX)
- return NULL;
-
- return &priv->fw->img[ucode_type];
-}
-
/*
* Calibration
*/
enum iwl_ucode_type old_type;
static const u16 alive_cmd[] = { REPLY_ALIVE };
- fw = iwl_get_ucode_image(priv, ucode_type);
+ fw = iwl_get_ucode_image(priv->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 24
-#define IWL3168_UCODE_API_MAX 24
+#define IWL7265D_UCODE_API_MAX 26
+#define IWL3168_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 16
-#define IWL7265_UCODE_API_MIN 16
-#define IWL7265D_UCODE_API_MIN 16
+#define IWL7260_UCODE_API_MIN 17
+#define IWL7265_UCODE_API_MIN 17
+#define IWL7265D_UCODE_API_MIN 17
#define IWL3168_UCODE_API_MIN 20
/* NVM versions */
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 24
-#define IWL8265_UCODE_API_MAX 24
+#define IWL8000_UCODE_API_MAX 26
+#define IWL8265_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 16
+#define IWL8000_UCODE_API_MIN 17
#define IWL8265_UCODE_API_MIN 20
/* NVM versions */
.vht_mu_mimo_supported = true,
};
+const struct iwl_cfg iwl8275_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 8275",
+ .fw_name_pre = IWL8265_FW_PRE,
+ IWL_DEVICE_8265,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .vht_mu_mimo_supported = true,
+};
+
const struct iwl_cfg iwl4165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 24
+#define IWL9000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN 16
+#define IWL9000_UCODE_API_MIN 17
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
.integrated = true,
};
+const struct iwl_cfg iwl9560_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+};
+
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 24
+#define IWL_A000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
high_temp:1,
mac_addr_from_csr:1,
lp_xtal_workaround:1,
- no_power_up_nic_in_init:1,
disable_dummy_notification:1,
apmg_not_supported:1,
mq_rx_supported:1,
extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
+extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */
* Causes for the FH register interrupts
*/
enum msix_fh_int_causes {
+ MSIX_FH_INT_CAUSES_Q0 = BIT(0),
+ MSIX_FH_INT_CAUSES_Q1 = BIT(1),
MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
MSIX_FH_INT_CAUSES_S2D = BIT(19),
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
+#define IWL_TFH_NUM_TBS 25
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
} __packed;
/**
- * struct iwl_tfd
+ * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
*
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
+ * This structure contains dma address and length of transmission address
*
+ * @tb_len length of the tx buffer
+ * @addr 64 bits dma address
+ */
+struct iwl_tfh_tb {
+ __le16 tb_len;
+ __le64 addr;
+} __packed;
+
+/**
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ * contiguous 256 TFDs.
+ * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
*
* Driver must indicate the physical address of the base of each
* circular buffer via the FH_MEM_CBBC_QUEUE registers.
*
- * Each TFD contains pointer/size information for up to 20 data buffers
+ * Each TFD contains pointer/size information for up to 20 / 25 data buffers
* in host DRAM. These buffers collectively contain the (one) frame described
* by the TFD. Each buffer must be a single contiguous block of memory within
* itself, but buffers may be scattered in host DRAM. Each buffer has max size
*
* A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
*/
+
+/**
+ * struct iwl_tfd - Transmit Frame Descriptor (TFD)
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ */
struct iwl_tfd {
u8 __reserved1[3];
u8 num_tbs;
__le32 __pad;
} __packed;
+/**
+ * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
+ * @ num_tbs 0-4 number of active tbs
+ * 5 -15 reserved
+ * @ tbs[25] transmit frame buffer descriptors
+ * @ __pad padding
+ */
+struct iwl_tfh_tfd {
+ __le16 num_tbs;
+ struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
+ __le32 __pad;
+} __packed;
+
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
+ * For devices up to a000:
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ * For a000 and on:
* @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
*/
struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
- * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
- * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
* from the probe request template.
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in different bindings.
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
- * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
- IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
- IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
- * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
- * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
- * instead of 3.
- * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
- * (command version 3) that supports per-chain limits
+ * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ * iteration complete notification, and the timestamp reported for RX
+ * received during scan, are reported in TSF of the mac specified in the
+ * scan request.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
- IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
- IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
- IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
- IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
return conf_tlv->usniffer;
}
+static inline const struct fw_img *
+iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
+{
+ if (ucode_type >= IWL_UCODE_TYPE_MAX)
+ return NULL;
+
+ return &fw->img[ucode_type];
+}
+
#endif /* __iwl_fw_h__ */
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
- };
+ }
switch (cmd) {
IWL_CMD(RFH_RXF_DMA_CFG);
continue;
for (i = 0; i < w->n_cmds; i++) {
- if (w->cmds[i] ==
- WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ u16 rec_id = WIDE_ID(pkt->hdr.group_id,
+ pkt->hdr.cmd);
+
+ if (w->cmds[i] == rec_id ||
+ (!iwl_cmd_groupid(w->cmds[i]) &&
+ DEF_ID(w->cmds[i]) == rec_id)) {
found = true;
break;
}
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
- /* If OEM did not fuse address - get it from OTP */
- if (!mac_addr0 && !mac_addr1) {
- mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
- mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
- }
+ iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+ /*
+ * If the OEM fused a valid address, use it instead of the one in the
+ * OTP
+ */
+ if (is_valid_ether_addr(data->hw_addr))
+ return;
+
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
return regd;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
+
+#ifdef CONFIG_ACPI
+#define WRDD_METHOD "WRDD"
+#define WRDD_WIFI (0x07)
+#define WRDD_WIGIG (0x10)
+
+static u32 iwl_wrdd_get_mcc(struct device *dev, union acpi_object *wrdd)
+{
+ union acpi_object *mcc_pkg, *domain_type, *mcc_value;
+ u32 i;
+
+ if (wrdd->type != ACPI_TYPE_PACKAGE ||
+ wrdd->package.count < 2 ||
+ wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrdd->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_EEPROM(dev, "Unsupported wrdd structure\n");
+ return 0;
+ }
+
+ for (i = 1 ; i < wrdd->package.count ; ++i) {
+ mcc_pkg = &wrdd->package.elements[i];
+
+ if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
+ mcc_pkg->package.count < 2 ||
+ mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ mcc_pkg = NULL;
+ continue;
+ }
+
+ domain_type = &mcc_pkg->package.elements[0];
+ if (domain_type->integer.value == WRDD_WIFI)
+ break;
+
+ mcc_pkg = NULL;
+ }
+
+ if (mcc_pkg) {
+ mcc_value = &mcc_pkg->package.elements[1];
+ return mcc_value->integer.value;
+ }
+
+ return 0;
+}
+
+int iwl_get_bios_mcc(struct device *dev, char *mcc)
+{
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ u32 mcc_val;
+
+ root_handle = ACPI_HANDLE(dev);
+ if (!root_handle) {
+ IWL_DEBUG_EEPROM(dev,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)WRDD_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_EEPROM(dev, "WRD method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call WRDD with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_EEPROM(dev, "WRDC invocation failed (0x%x)\n",
+ status);
+ return -ENOENT;
+ }
+
+ mcc_val = iwl_wrdd_get_mcc(dev, wrdd.pointer);
+ kfree(wrdd.pointer);
+ if (!mcc_val)
+ return -ENOENT;
+
+ mcc[0] = (mcc_val >> 8) & 0xff;
+ mcc[1] = mcc_val & 0xff;
+ mcc[2] = '\0';
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_get_bios_mcc);
+#endif
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc);
+#ifdef CONFIG_ACPI
+/**
+ * iwl_get_bios_mcc - read MCC from BIOS, if available
+ *
+ * @dev: the struct device
+ * @mcc: output buffer (3 bytes) that will get the MCC
+ *
+ * This function tries to read the current MCC from ACPI if available.
+ */
+int iwl_get_bios_mcc(struct device *dev, char *mcc);
+#else
+static inline int iwl_get_bios_mcc(struct device *dev, char *mcc)
+{
+ return -ENOENT;
+}
+#endif
+
#endif /* __iwl_nvm_parse_h__ */
IWL_PHY_DB_MAX
};
-#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+#define PHY_DB_CMD 0x6c
/*
* phy db - configure operational ucode
#include "iwl-trans.h"
#include "iwl-drv.h"
+#include "iwl-fh.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
static struct lock_class_key __key;
#endif
- trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+ trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
if (!trans)
return NULL;
SLAB_HWCACHE_ALIGN,
NULL);
if (!trans->dev_cmd_pool)
- goto free;
+ return NULL;
return trans;
- free:
- kfree(trans);
- return NULL;
}
void iwl_trans_free(struct iwl_trans *trans)
{
kmem_cache_destroy(trans->dev_cmd_pool);
- kfree(trans);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+ if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id))
+ cmd->id = DEF_ID(cmd->id);
+
ret = trans->ops->send_cmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
/* make u16 wide id out of u8 group and opcode */
#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+#define DEF_ID(opcode) ((1 << 8) | (opcode))
/* due to the conversion, this group is special; new groups
* should be defined in the appropriate fw-api header files
* (i.e. mark it as non-idle).
* @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
* called after this command completes. Valid only with CMD_ASYNC.
- * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
- * check that we leave enough room for the TBs bitmap which needs 20 bits.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
CMD_WANT_ASYNC_CALLBACK = BIT(7),
-
- CMD_TB_BITMAP_POS = 11,
};
#define DEF_CMD_PAYLOAD_SIZE 320
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @wide_cmd_header: firmware supports wide host command header
* @sw_csum_tx: transport should compute the TCP checksum
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
bool scd_set_active;
- bool wide_cmd_header;
bool sw_csum_tx;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
+ dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
+
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
+ * @wide_cmd_header: true when ucode supports wide command header format
* @num_rx_queues: number of RX queues allocated by the transport;
* the transport must set this before calling iwl_drv_start()
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
+ bool wide_cmd_header;
u8 num_rx_queues;
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
+static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
+ int queue)
+{
+ /* we should never be called if the trans doesn't support it */
+ BUG_ON(!trans->ops->get_txq_byte_table);
+
+ return trans->ops->get_txq_byte_table(trans, queue);
+}
+
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn,
return !strncmp(name, buf, len) ? buf + len : NULL;
}
+static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 curr_gp2;
+ u64 curr_os;
+ s64 diff;
+ char buf[64];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ do_div(curr_os, NSEC_PER_USEC);
+ diff = curr_os - curr_gp2;
+ pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
+MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
- (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff,
+ mvmvif->dbgfs_dir, S_IRUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
return ret ?: count;
}
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_rx_cmd_buffer rxb = {
+ ._rx_page_order = 0,
+ .truesize = 0, /* not used */
+ ._offset = 0,
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_rx_mpdu_desc *desc;
+ int bin_len = count / 2;
+ int ret = -EINVAL;
+
+ /* supporting only 9000 descriptor */
+ if (!mvm->trans->cfg->mq_rx_supported)
+ return -ENOTSUPP;
+
+ rxb._page = alloc_pages(GFP_ATOMIC, 0);
+ if (!rxb._page)
+ return -ENOMEM;
+ pkt = rxb_addr(&rxb);
+
+ ret = hex2bin(page_address(rxb._page), buf, bin_len);
+ if (ret)
+ goto out;
+
+ /* avoid invalid memory access */
+ if (bin_len < sizeof(*pkt) + sizeof(*desc))
+ goto out;
+
+ /* check this is RX packet */
+ if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
+ WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
+ goto out;
+
+ /* check the length in metadata matches actual received length */
+ desc = (void *)pkt->data;
+ if (le16_to_cpu(desc->mpdu_len) !=
+ (bin_len - sizeof(*desc) - sizeof(*pkt)))
+ goto out;
+
+ local_bh_disable();
+ iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+ local_bh_enable();
+ ret = 0;
+
+out:
+ iwl_free_rxb(&rxb);
+
+ return ret ?: count;
+}
+
static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
#endif
+static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd cmd = {};
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ };
+ size_t delta, len;
+ ssize_t ret;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+ cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
+
+ /* Take care of alignment of both the position and the length */
+ delta = *ppos & 0x3;
+ cmd.addr = cpu_to_le32(*ppos - delta);
+ cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4,
+ (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ len = min((size_t)le32_to_cpu(rsp->len) << 2,
+ iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
+ len = min(len - delta, count);
+ if (len < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_mem_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd *cmd;
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {};
+ size_t cmd_size;
+ size_t data_size;
+ u32 op, len;
+ ssize_t ret;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+
+ if (*ppos & 0x3 || count < 4) {
+ op = DEBUG_MEM_OP_WRITE_BYTES;
+ len = min(count, (size_t)(4 - (*ppos & 0x3)));
+ data_size = len;
+ } else {
+ op = DEBUG_MEM_OP_WRITE;
+ len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
+ data_size = len << 2;
+ }
+
+ cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
+ cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->op = cpu_to_le32(op);
+ cmd->len = cpu_to_le32(len);
+ cmd->addr = cpu_to_le32(*ppos);
+ if (copy_from_user((void *)cmd->data, user_buf, data_size)) {
+ kfree(cmd);
+ return -EFAULT;
+ }
+
+ hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ hcmd.data[0] = (void *)cmd;
+ hcmd.len[0] = cmd_size;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ kfree(cmd);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = data_size;
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static const struct file_operations iwl_dbgfs_mem_ops = {
+ .read = iwl_dbgfs_mem_read,
+ .write = iwl_dbgfs_mem_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, S_IWUSR);
if (!debugfs_create_bool("enable_scan_iteration_notif",
S_IRUSR | S_IWUSR,
mvm->debugfs_dir,
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
goto err;
+ debugfs_create_file("mem", S_IRUSR | S_IWUSR, dbgfs_dir, mvm,
+ &iwl_dbgfs_mem_ops);
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
IWL_TX_POWER_MODE_SET_ACK = 3,
}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
+#define IWL_NUM_CHAIN_LIMITS 2
+#define IWL_NUM_SUB_BANDS 5
+
/**
- * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
* @dev_24: device TX power restriction in 1/8 dBms
* @dev_52_low: device TX power restriction upper band - low
* @dev_52_high: device TX power restriction upper band - high
+ * @per_chain_restriction: per chain restrictions
*/
-struct iwl_dev_tx_power_cmd_v2 {
+struct iwl_dev_tx_power_cmd_v3 {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
__le16 dev_24;
__le16 dev_52_low;
__le16 dev_52_high;
-} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
-
-#define IWL_NUM_CHAIN_LIMITS 2
-#define IWL_NUM_SUB_BANDS 5
-
-/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * @v2: version 2 of the command, embedded here for easier software handling
- * @per_chain_restriction: per chain restrictions
- */
-struct iwl_dev_tx_power_cmd_v3 {
- /* v3 is just an extension of v2 - keep this here */
- struct iwl_dev_tx_power_cmd_v2 v2;
__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
+ * @reserved2: for future use and alignment
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
- __le32 general_flags;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
+ __le16 general_flags;
+ u8 reserved2;
+ u8 scan_start_mac_id;
u8 extended_dwell;
u8 active_dwell;
u8 passive_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
* @status: one of SCAN_COMP_STATUS_*
* @bt_status: BT on/off status
* @last_channel: last channel that was scanned
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
+ * @start_tsf: TSF timer in usecs of the scan start time for the mac specified
+ * in &struct iwl_scan_req_umac.
* @results: array of scan results, only "scanned_channels" of them are valid
*/
struct iwl_umac_scan_iter_complete_notif {
u8 status;
u8 bt_status;
u8 last_channel;
- __le32 tsf_low;
- __le32 tsf_high;
+ __le64 start_tsf;
struct iwl_scan_results_notif results[];
-} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
#endif
* @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
* Should be set for 26/30 length MAC headers
* @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
- * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
* @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
* @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
* @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
TX_CMD_FLG_MH_PAD = BIT(20),
TX_CMD_FLG_RESP_TO_DRV = BIT(21),
- TX_CMD_FLG_CCMP_AGG = BIT(22),
TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
TX_CMD_FLG_DUR = BIT(25),
TX_CMD_FLG_FW_DROP = BIT(26),
* @TX_CMD_SEC_EXT: extended cipher algorithm.
* @TX_CMD_SEC_GCMP: GCMP encryption algorithm.
* @TX_CMD_SEC_KEY128: set for 104 bits WEP key.
- * @TC_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
+ * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
* from the table instead of from the TX command.
* If the key is taken from the key table its index should be given by the
* first byte of the TX command key field.
TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08,
- TC_CMD_SEC_KEY_FROM_TABLE = 0x08,
+ TX_CMD_SEC_KEY_FROM_TABLE = 0x08,
};
/* TODO: how does these values are OK with only 16 bit variable??? */
u8 reserved1;
} __packed;
+/**
+ * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
+ * @q_num: TFD queue number
+ * @tfd_index: Index of first un-acked frame in the TFD queue
+ */
+struct iwl_mvm_compressed_ba_tfd {
+ u8 q_num;
+ u8 reserved;
+ __le16 tfd_index;
+} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
+ * @q_num: RA TID queue number
+ * @tid: TID of the queue
+ * @ssn: BA window current SSN
+ */
+struct iwl_mvm_compressed_ba_ratid {
+ u8 q_num;
+ u8 tid;
+ __le16 ssn;
+} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
+
+/*
+ * enum iwl_mvm_ba_resp_flags - TX aggregation status
+ * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA
+ * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
+ * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
+ * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
+ * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
+ * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
+ * expected time
+ */
+enum iwl_mvm_ba_resp_flags {
+ IWL_MVM_BA_RESP_TX_AGG,
+ IWL_MVM_BA_RESP_TX_BAR,
+ IWL_MVM_BA_RESP_TX_AGG_FAIL,
+ IWL_MVM_BA_RESP_TX_UNDERRUN,
+ IWL_MVM_BA_RESP_TX_BT_KILL,
+ IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
+};
+
+/**
+ * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @flags: status flag, see the &iwl_mvm_ba_resp_flags
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
+ * @initial_rate: TLC rate info, initial rate index, TLC table color
+ * @retry_cnt: retry count
+ * @query_byte_cnt: SCD query byte count
+ * @query_frame_cnt: SCD query frame count
+ * @txed: number of frames sent in the aggregation (all-TIDs)
+ * @done: number of frames that were Acked by the BA (all-TIDs)
+ * @wireless_time: Wireless-media time
+ * @tx_rate: the rate the aggregation was sent at
+ * @tfd_cnt: number of TFD-Q elements
+ * @ra_tid_cnt: number of RATID-Q elements
+ */
+struct iwl_mvm_compressed_ba_notif {
+ __le32 flags;
+ u8 sta_id;
+ u8 reduced_txp;
+ u8 initial_rate;
+ u8 retry_cnt;
+ __le32 query_byte_cnt;
+ __le16 query_frame_cnt;
+ __le16 txed;
+ __le16 done;
+ __le32 wireless_time;
+ __le32 tx_rate;
+ __le16 tfd_cnt;
+ __le16 ra_tid_cnt;
+ struct iwl_mvm_compressed_ba_tfd tfd[1];
+ struct iwl_mvm_compressed_ba_ratid ra_tid[0];
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
+
/**
* struct iwl_mac_beacon_cmd_v6 - beacon template command
* @tx: the tx commands associated with the beacon frame
/* Phy */
PHY_CONFIGURATION_CMD = 0x6a,
CALIB_RES_NOTIF_PHY_DB = 0x6b,
- /* PHY_DB_CMD = 0x6c, */
+ PHY_DB_CMD = 0x6c,
/* ToF - 802.11mc FTM */
TOF_CMD = 0x10,
STORED_BEACON_NTF = 0xFF,
};
+enum iwl_fmac_debug_cmds {
+ LMAC_RD_WR = 0x0,
+ UMAC_RD_WR = 0x1,
+};
+
/* command groups */
enum {
LEGACY_GROUP = 0x0,
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb,
+ DEBUG_GROUP = 0xf,
};
/**
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
-#define TX_FIFO_MAX_NUM 8
-#define RX_FIFO_MAX_NUM 2
+#define TX_FIFO_MAX_NUM_9000 8
+#define TX_FIFO_MAX_NUM 15
+#define RX_FIFO_MAX_NUM 2
#define TX_FIFO_INTERNAL_MAX_NUM 6
/**
* NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
* set, the last 3 members don't exist.
*/
+struct iwl_shared_mem_cfg_v1 {
+ __le32 shared_mem_addr;
+ __le32 shared_mem_size;
+ __le32 sample_buff_addr;
+ __le32 sample_buff_size;
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
+ __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 page_buff_addr;
+ __le32 page_buff_size;
+ __le32 rxfifo_addr;
+ __le32 internal_txfifo_addr;
+ __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
* VHT MU-MIMO group configuration
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+/* Operation types for the debug mem access */
+enum {
+ DEBUG_MEM_OP_READ = 0,
+ DEBUG_MEM_OP_WRITE = 1,
+ DEBUG_MEM_OP_WRITE_BYTES = 2,
+};
+
+#define DEBUG_MEM_MAX_SIZE_DWORDS 32
+
+/**
+ * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory
+ * @op: DEBUG_MEM_OP_*
+ * @addr: address to read/write from/to
+ * @len: in dwords, to read/write
+ * @data: for write opeations, contains the source buffer
+ */
+struct iwl_dbg_mem_access_cmd {
+ __le32 op;
+ __le32 addr;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */
+
+/* Status responses for the debug mem access */
+enum {
+ DEBUG_MEM_STATUS_SUCCESS = 0x0,
+ DEBUG_MEM_STATUS_FAILED = 0x1,
+ DEBUG_MEM_STATUS_LOCKED = 0x2,
+ DEBUG_MEM_STATUS_HIDDEN = 0x3,
+ DEBUG_MEM_STATUS_LENGTH = 0x4,
+};
+
+/**
+ * struct iwl_dbg_mem_access_rsp - Response to debug mem commands
+ * @status: DEBUG_MEM_STATUS_*
+ * @len: read dwords (0 for write operations)
+ * @data: contains the read DWs
+ */
+struct iwl_dbg_mem_access_rsp {
+ __le32 status;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
{ .start = 0x00a04560, .end = 0x00a0457c },
{ .start = 0x00a04590, .end = 0x00a04598 },
{ .start = 0x00a045c0, .end = 0x00a045f4 },
- { .start = 0x00a44000, .end = 0x00a7bf80 },
};
static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a05c00, .end = 0x00a05c18 },
{ .start = 0x00a05400, .end = 0x00a056e8 },
{ .start = 0x00a08000, .end = 0x00a098bc },
- { .start = 0x00adfc00, .end = 0x00adfd1c },
{ .start = 0x00a02400, .end = 0x00a02758 },
};
sizeof(struct iwl_fw_error_dump_fifo);
}
- for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
+ for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
if (!mem_cfg->txfifo_size[i])
continue;
u32 scd_base_addr;
};
-static inline const struct fw_img *
-iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
-{
- if (ucode_type >= IWL_UCODE_TYPE_MAX)
- return NULL;
-
- return &mvm->fw->img[ucode_type];
-}
-
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
- fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
+ fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
else
- fw = iwl_get_ucode_image(mvm, ucode_type);
+ fw = iwl_get_ucode_image(mvm->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
return ret;
}
-static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
{
- struct iwl_host_cmd cmd = {
- .flags = CMD_WANT_SKB,
- .data = { NULL, },
- .len = { 0, },
- };
- struct iwl_shared_mem_cfg *mem_cfg;
- struct iwl_rx_packet *pkt;
- u32 i;
+ struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
+ int i;
- lockdep_assert_held(&mvm->mutex);
+ mvm->shared_mem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
+ mvm->shared_mem_cfg.txfifo_size[i] =
+ le32_to_cpu(mem_cfg->txfifo_size[i]);
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
+ mvm->shared_mem_cfg.rxfifo_size[i] =
+ le32_to_cpu(mem_cfg->rxfifo_size[i]);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
- cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
- else
- cmd.id = SHARED_MEM_CFG;
+ BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+ sizeof(mem_cfg->internal_txfifo_size));
- if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
- return;
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i++)
+ mvm->shared_mem_cfg.internal_txfifo_size[i] =
+ le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+}
- pkt = cmd.resp_pkt;
- mem_cfg = (void *)pkt->data;
-
- mvm->shared_mem_cfg.shared_mem_addr =
- le32_to_cpu(mem_cfg->shared_mem_addr);
- mvm->shared_mem_cfg.shared_mem_size =
- le32_to_cpu(mem_cfg->shared_mem_size);
- mvm->shared_mem_cfg.sample_buff_addr =
- le32_to_cpu(mem_cfg->sample_buff_addr);
- mvm->shared_mem_cfg.sample_buff_size =
- le32_to_cpu(mem_cfg->sample_buff_size);
- mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
+static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
+ int i;
+
+ mvm->shared_mem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
- mvm->shared_mem_cfg.page_buff_addr =
- le32_to_cpu(mem_cfg->page_buff_addr);
- mvm->shared_mem_cfg.page_buff_size =
- le32_to_cpu(mem_cfg->page_buff_size);
- /* new API has more data */
+ /* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- mvm->shared_mem_cfg.rxfifo_addr =
- le32_to_cpu(mem_cfg->rxfifo_addr);
- mvm->shared_mem_cfg.internal_txfifo_addr =
- le32_to_cpu(mem_cfg->internal_txfifo_addr);
-
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
mvm->shared_mem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
+}
+
+static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_host_cmd cmd = {
+ .flags = CMD_WANT_SKB,
+ .data = { NULL, },
+ .len = { 0, },
+ };
+ struct iwl_rx_packet *pkt;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ else
+ cmd.id = SHARED_MEM_CFG;
+
+ if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
+ return;
+
+ pkt = cmd.resp_pkt;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_parse_shared_mem_a000(mvm, pkt);
+ else
+ iwl_mvm_parse_shared_mem(mvm, pkt);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
{
struct iwl_mvm_sar_table sar_table;
struct iwl_dev_tx_power_cmd cmd = {
- .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
int ret, i, j, idx;
int len = sizeof(cmd);
- /* we can't do anything with the table if the FW doesn't support it */
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_TX_POWER_CHAIN)) {
- IWL_DEBUG_RADIO(mvm,
- "FW doesn't support per-chain TX power settings.\n");
- return 0;
- }
-
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
* (for example, if we were in RFKILL)
*/
ret = iwl_run_init_mvm_ucode(mvm, false);
- if (ret && !iwlmvm_mod_params.init_dbg) {
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
+
+ if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
goto error;
}
- if (!iwlmvm_mod_params.init_dbg) {
- /*
- * Stop and start the transport without entering low power
- * mode. This will save the state of other components on the
- * device that are triggered by the INIT firwmare (MFUART).
- */
- _iwl_trans_stop_device(mvm->trans, false);
- ret = _iwl_trans_start_hw(mvm->trans, false);
- if (ret)
- goto error;
- }
- if (iwlmvm_mod_params.init_dbg)
- return 0;
+ /*
+ * Stop and start the transport without entering low power
+ * mode. This will save the state of other components on the
+ * device that are triggered by the INIT firwmare (MFUART).
+ */
+ _iwl_trans_stop_device(mvm->trans, false);
+ ret = _iwl_trans_start_hw(mvm->trans, false);
+ if (ret)
+ goto error;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
}
/* TODO: read the budget from BIOS / Platform NVM */
- if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
+ if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
mvm->cooling_dev.cur_state);
+ if (ret)
+ goto error;
+ }
#else
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
+ else
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
break;
case NL80211_IFTYPE_AP:
cmd->ac[txf].fifos_mask = BIT(txf);
}
- if (vif->type == NL80211_IFTYPE_AP) {
- /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
- cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
- BIT(IWL_MVM_TX_FIFO_MCAST);
-
- /*
- * in AP mode, pass probe requests and beacons from other APs
- * (needed for ht protection); when there're no any associated
- * station don't ask FW to pass beacons to prevent unnecessary
- * wake-ups.
- */
- cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
- if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
- cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
- IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
- } else {
- IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
- }
- }
-
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
*/
static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
struct iwl_mac_data_ap *ctxt_ap,
bool add)
{
.beacon_device_ts = 0
};
+ /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
+ /*
+ * in AP mode, pass probe requests and beacons from other APs
+ * (needed for ht protection); when there're no any associated
+ * station don't ask FW to pass beacons to prevent unnecessary
+ * wake-ups.
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+ IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+ } else {
+ IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+ }
+
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_ap->bi_reciprocal =
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
/* Fill the data specific for ap mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap,
action == FW_CTXT_ACTION_ADD);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
/* Fill the data specific for GO mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap,
action == FW_CTXT_ACTION_ADD);
cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
hw->wiphy->n_cipher_suites++;
}
- /*
- * Enable 11w if advertised by firmware and software crypto
- * is not enabled (as the firmware will interpret some mgmt
- * packets, so enabling it with software crypto isn't safe)
+ /* Enable 11w if software crypto is not enabled (as the
+ * firmware will interpret some mgmt packets, so enabling it
+ * with software crypto isn't safe).
*/
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
- !iwlwifi_mod_params.sw_crypto) {
+ if (!iwlwifi_mod_params.sw_crypto) {
ieee80211_hw_set(hw, MFP_CAPABLE);
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
- hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
-
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SCAN_START_TIME);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
if (ret)
iwl_mvm_leds_exit(mvm);
+ if (mvm->cfg->vht_mu_mimo_supported)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+
return ret;
}
s16 tx_power)
{
struct iwl_dev_tx_power_cmd cmd = {
- .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .v3.v2.mac_context_id =
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .v3.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
+ .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
};
int len = sizeof(cmd);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
- len = sizeof(cmd.v3.v2);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
case NL80211_IFTYPE_ADHOC:
iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
break;
+ case NL80211_IFTYPE_MONITOR:
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mvm_update_mu_groups(mvm, vif);
+ break;
default:
/* shouldn't happen */
WARN_ON_ONCE(1);
};
struct iwl_mvm_shared_mem_cfg {
- u32 shared_mem_addr;
- u32 shared_mem_size;
- u32 sample_buff_addr;
- u32 sample_buff_size;
- u32 txfifo_addr;
+ int num_txfifo_entries;
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo_size[RX_FIFO_MAX_NUM];
- u32 page_buff_addr;
- u32 page_buff_size;
- u32 rxfifo_addr;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
/* UMAC scan tracking */
u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+ /* start time of last scan in TSF of the mac that requested the scan */
+ u64 scan_start;
+
+ /* the vif that requested the current scan */
+ struct iwl_mvm_vif *scan_vif;
+
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
}
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
*****************************************************************************/
#include <linux/firmware.h>
#include <linux/rtnetlink.h>
-#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "mvm.h"
return resp_cp;
}
-#ifdef CONFIG_ACPI
-#define WRD_METHOD "WRDD"
-#define WRDD_WIFI (0x07)
-#define WRDD_WIGIG (0x10)
-
-static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
-{
- union acpi_object *mcc_pkg, *domain_type, *mcc_value;
- u32 i;
-
- if (wrdd->type != ACPI_TYPE_PACKAGE ||
- wrdd->package.count < 2 ||
- wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
- wrdd->package.elements[0].integer.value != 0) {
- IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
- return 0;
- }
-
- for (i = 1 ; i < wrdd->package.count ; ++i) {
- mcc_pkg = &wrdd->package.elements[i];
-
- if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
- mcc_pkg->package.count < 2 ||
- mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
- mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
- mcc_pkg = NULL;
- continue;
- }
-
- domain_type = &mcc_pkg->package.elements[0];
- if (domain_type->integer.value == WRDD_WIFI)
- break;
-
- mcc_pkg = NULL;
- }
-
- if (mcc_pkg) {
- mcc_value = &mcc_pkg->package.elements[1];
- return mcc_value->integer.value;
- }
-
- return 0;
-}
-
-static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
-{
- acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
- u32 mcc_val;
-
- root_handle = ACPI_HANDLE(mvm->dev);
- if (!root_handle) {
- IWL_DEBUG_LAR(mvm,
- "Could not retrieve root port ACPI handle\n");
- return -ENOENT;
- }
-
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_LAR(mvm, "WRD method not found\n");
- return -ENOENT;
- }
-
- /* Call WRDD with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
- return -ENOENT;
- }
-
- mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
- kfree(wrdd.pointer);
- if (!mcc_val)
- return -ENOENT;
-
- mcc[0] = (mcc_val >> 8) & 0xff;
- mcc[1] = mcc_val & 0xff;
- mcc[2] = '\0';
- return 0;
-}
-#else /* CONFIG_ACPI */
-static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
-{
- return -ENOENT;
-}
-#endif
-
int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
{
bool tlv_lar;
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_mvm_get_bios_mcc(mvm, mcc)) {
+ !iwl_get_bios_mcc(mvm->dev, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
HCMD_NAME(BT_COEX_CI),
HCMD_NAME(PHY_CONFIGURATION_CMD),
HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
+ HCMD_NAME(PHY_DB_CMD),
HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD),
/* the hardware splits the A-MSDU */
if (mvm->cfg->mq_rx_supported)
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_WIDE_CMD_HDR);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
- trans_cfg.bc_table_dword = true;
+ trans->wide_cmd_header = true;
+ trans_cfg.bc_table_dword = true;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
IWL_DEBUG_EEPROM(mvm->trans->dev,
"working without external nvm file\n");
- if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
- "not allowing power-up and not having nvm_file\n"))
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
goto out_free;
- /*
- * Even if nvm exists in the nvm_file driver should read again the nvm
- * from the nic because there might be entries that exist in the OTP
- * and not in the file.
- * for nics with no_power_up_nic_in_init: rely completley on nvm_file
- */
- if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
- err = iwl_nvm_init(mvm, false);
- if (err)
- goto out_free;
- } else {
- err = iwl_trans_start_hw(mvm->trans);
- if (err)
- goto out_free;
-
- mutex_lock(&mvm->mutex);
- iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
- err = iwl_run_init_mvm_ucode(mvm, true);
- if (!err || !iwlmvm_mod_params.init_dbg)
- iwl_mvm_stop_device(mvm);
- iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
- mutex_unlock(&mvm->mutex);
- /* returns 0 if successful, 1 if success but in rfkill */
- if (err < 0 && !iwlmvm_mod_params.init_dbg) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
- goto out_free;
- }
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ if (!err || !iwlmvm_mod_params.init_dbg)
+ iwl_mvm_stop_device(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
+ mutex_unlock(&mvm->mutex);
+ /* returns 0 if successful, 1 if success but in rfkill */
+ if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
}
scan_size = iwl_mvm_scan_size(mvm);
flush_delayed_work(&mvm->fw_dump_wk);
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
- iwl_trans_op_mode_leave(trans);
+ iwl_trans_op_mode_leave(trans);
+
ieee80211_free_hw(mvm->hw);
return NULL;
}
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
- struct list_head local_list;
-
- INIT_LIST_HEAD(&local_list);
+ LIST_HEAD(local_list);
/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
- else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+ else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
- else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
- pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
- else if (pkt->hdr.cmd == FRAME_RELEASE)
+ else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
+ if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
- else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
- pkt->hdr.group_id == DATA_PATH_GROUP))
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
- else
+ else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
}
/* enable PM on p2p if p2p stand alone */
if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
- p2p_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
return;
}
ap_mvmvif->phy_ctxt->id);
/* clients are not stand alone: enable PM if DCM */
- if (!(client_same_channel || ap_same_channel) &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ if (!(client_same_channel || ap_same_channel)) {
if (vifs->bss_active)
bss_mvmvif->pm_enabled = true;
- if (vifs->p2p_active &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
+ if (vifs->p2p_active)
p2p_mvmvif->pm_enabled = true;
return;
}
* There is only one channel in the system and there are only
* bss and p2p clients that share it
*/
- if (client_same_channel && !vifs->ap_active &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+ if (client_same_channel && !vifs->ap_active) {
/* share same channel*/
bss_mvmvif->pm_enabled = true;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
- p2p_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
}
}
ssn = ieee80211_sn_inc(ssn);
- /* holes are valid since nssn indicates frames were received. */
- if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
- continue;
- /* Empty the list. Will have more than one frame for A-MSDU */
+ /*
+ * Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
- while (!skb_peek_tail(&reorder_buf->entries[index]))
+ while (skb_queue_empty(&reorder_buf->entries[index]))
index = (index + 1) % reorder_buf->buf_size;
/* modify timer to match next frame's expiration time */
mod_timer(&reorder_buf->reorder_timer,
u16 sn = 0, index = 0;
bool expired = false;
- spin_lock_bh(&buf->lock);
+ spin_lock(&buf->lock);
if (!buf->num_stored || buf->removed) {
- spin_unlock_bh(&buf->lock);
+ spin_unlock(&buf->lock);
return;
}
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
- if (!skb_peek_tail(&buf->entries[index]))
+ if (skb_queue_empty(&buf->entries[index]))
continue;
if (!time_after(jiffies, buf->reorder_time[index] +
RX_REORDER_BUF_TIMEOUT_MQ))
buf->reorder_time[index] +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
- spin_unlock_bh(&buf->lock);
+ spin_unlock(&buf->lock);
}
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
struct iwl_mvm_reorder_buffer *reorder_buf;
u8 baid = data->baid;
- if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
+ if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
return;
rcu_read_lock();
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
+ /*
+ * This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+ * then sends a delBA action frame.
+ */
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- /* not a data packet */
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ /* not a data packet or a bar */
+ if (!ieee80211_is_back_req(hdr->frame_control) &&
+ (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)))
return false;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
spin_lock_bh(&buffer->lock);
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ goto drop;
+ }
+
/*
* If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into
struct cfg80211_match_set *match_sets;
int n_scan_plans;
struct cfg80211_sched_scan_plan *scan_plans;
+ u32 measurement_dwell;
};
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
return IWL_SCAN_TYPE_WILD;
}
+static int
+iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
+ struct cfg80211_scan_request *req,
+ struct iwl_mvm_scan_params *params)
+{
+ if (!req->duration)
+ return 0;
+
+ if (req->duration_mandatory &&
+ req->duration > scan_timing[params->type].max_out_time) {
+ IWL_DEBUG_SCAN(mvm,
+ "Measurement scan - too long dwell %hu (max out time %u)\n",
+ req->duration,
+ scan_timing[params->type].max_out_time);
+ return -EOPNOTSUPP;
+ }
+
+ return min_t(u32, (u32)req->duration,
+ scan_timing[params->type].max_out_time);
+}
+
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
}
-static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
- enum iwl_scan_priority_ext prio)
-{
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
- return cpu_to_le32(prio);
-
- if (prio <= IWL_SCAN_PRIORITY_EXT_2)
- return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
-
- if (prio <= IWL_SCAN_PRIORITY_EXT_4)
- return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
-
- return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-}
-
static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
cmd->extended_dwell = scan_timing[params->type].dwell_extended;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
- cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->extended_dwell = scan_timing[params->type].dwell_extended;
- cmd->active_dwell = scan_timing[params->type].dwell_active;
- cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ if (params->measurement_dwell) {
+ cmd->active_dwell = params->measurement_dwell;
+ cmd->passive_dwell = params->measurement_dwell;
+ cmd->extended_dwell = params->measurement_dwell;
+ } else {
+ cmd->active_dwell = scan_timing[params->type].dwell_active;
+ cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ cmd->extended_dwell = scan_timing[params->type].dwell_extended;
+ }
cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
- cmd->scan_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
else
- cmd->ooc_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
}
}
-static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
{
- int flags = 0;
+ u16 flags = 0;
if (params->n_ssids == 0)
flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+ if (params->measurement_dwell)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->scan_iter_notif_enabled)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
mvm->fw->ucode_capa.n_scan_channels;
int uid, i;
u32 ssid_bitmap = 0;
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
mvm->scan_uid_status[uid] = type;
cmd->uid = cpu_to_le32(uid);
- cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params,
+ cmd->general_flags = cpu_to_le16(iwl_mvm_scan_umac_flags(mvm, params,
vif));
+ cmd->scan_start_mac_id = scan_vif->id;
if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
iwl_mvm_get_scan_type(mvm,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms);
+ if (ret < 0)
+ return ret;
+
+ params.measurement_dwell = ret;
+
iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
struct cfg80211_scan_info info = {
.aborted = aborted,
+ .scan_start_tsf = mvm->scan_start,
};
+ memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN);
ieee80211_scan_completed(mvm->hw, &info);
+ mvm->scan_vif = NULL;
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
u8 buf[256];
+ mvm->scan_start = le64_to_cpu(notif->start_tsf);
+
IWL_DEBUG_SCAN(mvm,
"UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
notif->status, notif->scanned_channels,
ieee80211_sched_scan_results(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
}
+
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+ mvm->scan_start);
}
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
ret);
/* Make sure the SCD wrptr is correctly set before reconfiguring */
- iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
- cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
- ssn, wdg_timeout);
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
/* Update the TID "owner" of the queue */
spin_lock_bh(&mvm->queue_info_lock);
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
- u8 ac;
+ u8 txq_curr_ac;
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
spin_lock_bh(&mvm->queue_info_lock);
- ac = mvm->queue_info[queue].mac80211_ac;
+ txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
- cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
/* If aggs should be turned back on - do it */
if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
- struct iwl_mvm_add_sta_cmd cmd;
+ struct iwl_mvm_add_sta_cmd cmd = {0};
mvmsta->tid_disable_agg &= ~BIT(tid);
return ret;
}
-int iwl_mvm_update_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
-}
-
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ mvm_sta->sta_id, reserved_txq, *status)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -EINVAL;
+ }
+
+ *status = IWL_MVM_QUEUE_FREE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ }
+
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
/* if associated - we can't remove the AP STA now */
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
- init_timer(&baid_data->session_timer);
- baid_data->session_timer.function =
- iwl_mvm_rx_agg_session_expired;
- baid_data->session_timer.data =
- (unsigned long)&mvm->baid_map[baid];
+ setup_timer(&baid_data->session_timer,
+ iwl_mvm_rx_agg_session_expired,
+ (unsigned long)&mvm->baid_map[baid]);
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_id = mvm_sta->sta_id;
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int iwl_mvm_update_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+
+static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
+}
+
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
};
u32 cmdid;
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
- cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
- PHY_OPS_GROUP, 0);
- else
- cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
+ cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ PHY_OPS_GROUP, 0);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
DTS_MEASUREMENT_NOTIF_WIDE) };
int ret;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
- temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
-
lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
rate_idx = info->control.rates[0].idx;
/* if the rate isn't a well known legacy rate, take the lowest one */
- if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(
&mvm->nvm_data->bands[info->band], sta);
* one.
* Need to handle this.
*/
- tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
tx_cmd->key[0] = keyconf->hw_key_idx;
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, __le16 fc)
{
- if (iwl_mvm_is_dqa_supported(mvm)) {
- if (info->control.vif->type == NL80211_IFTYPE_AP &&
- ieee80211_is_probe_resp(fc))
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ return info->hw_queue;
+
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ /*
+ * handle legacy hostapd as well, where station may be added
+ * only after assoc.
+ */
+ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
- else if (ieee80211_is_mgmt(fc) &&
- info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return info->hw_queue;
+
+ WARN_ON_ONCE(1);
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (ieee80211_is_mgmt(fc))
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
- }
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return info->hw_queue;
- return info->hw_queue;
+ WARN_ON_ONCE(1);
+ return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ default:
+ WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
+ return -1;
+ }
}
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
sta_id = mvmvif->bcast_sta.sta_id;
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
+ if (queue < 0)
+ return -1;
+
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
tid = IWL_MAX_TID_COUNT;
}
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
txq_id = mvmsta->tid_data[tid].txq_id;
+ if (ieee80211_is_mgmt(fc))
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ }
+
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
- iwl_mvm_disable_txq(mvm, tid_data->txq_id,
- vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
- CMD_ASYNC);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ u8 mac80211_ac = tid_to_mac80211_ac[tid];
+
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+ vif->hw_queue[mac80211_ac], tid,
+ CMD_ASYNC);
+ }
tid_data->state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
}
-static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
- struct iwl_mvm_ba_notif *ba_notif,
- struct iwl_mvm_tid_data *tid_data)
-{
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = ba_notif->txed_2_done;
- info->status.ampdu_len = ba_notif->txed;
- iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
- info);
- /* TODO: not accounted if the whole A-MPDU failed */
- info->status.tx_time = tid_data->tx_time;
- info->status.status_driver_data[0] =
- (void *)(uintptr_t)ba_notif->reduced_txp;
- info->status.status_driver_data[1] =
- (void *)(uintptr_t)tid_data->rate_n_flags;
-}
-
-void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ int txq, int index,
+ struct ieee80211_tx_info *ba_info, u32 rate)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct sk_buff *skb;
- int sta_id, tid, freed;
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
-
- sta_id = ba_notif->sta_id;
- tid = ba_notif->tid;
+ int freed;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
- if (tid_data->txq_id != scd_flow) {
+ if (tid_data->txq_id != txq) {
IWL_ERR(mvm,
- "invalid BA notification: Q %d, tid %d, flow %d\n",
- tid_data->txq_id, tid, scd_flow);
+ "invalid BA notification: Q %d, tid %d\n",
+ tid_data->txq_id, tid);
rcu_read_unlock();
return;
}
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway).
*/
- iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
- &reclaimed_skbs);
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
- (u8 *)&ba_notif->sta_addr_lo32,
- ba_notif->sta_id);
- IWL_DEBUG_TX_REPLY(mvm,
- "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
- ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
- (unsigned long long)le64_to_cpu(ba_notif->bitmap),
- scd_flow, ba_resp_scd_ssn, ba_notif->txed,
- ba_notif->txed_2_done);
-
- IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
- ba_notif->reduced_txp);
- tid_data->next_reclaimed = ba_resp_scd_ssn;
+ tid_data->next_reclaimed = index;
iwl_mvm_check_ratid_empty(mvm, sta, tid);
freed = 0;
+ ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
- if (freed == 1)
- iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
+ if (freed == 1) {
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ memcpy(&info->status, &ba_info->status,
+ sizeof(ba_info->status));
+ iwl_mvm_hwrate_to_tx_status(rate, info);
+ }
}
spin_unlock_bh(&mvmsta->lock);
* Still it's important to update RS about sent vs. acked.
*/
if (skb_queue_empty(&reclaimed_skbs)) {
- struct ieee80211_tx_info ba_info = {};
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
if (mvmsta->vif)
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
- ba_info.band = chanctx_conf->def.chan->band;
- iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
+ ba_info->band = chanctx_conf->def.chan->band;
+ iwl_mvm_hwrate_to_tx_status(rate, ba_info);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
+ iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
}
out:
}
}
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int sta_id, tid, txq, index;
+ struct ieee80211_tx_info ba_info = {};
+ struct iwl_mvm_ba_notif *ba_notif;
+ struct iwl_mvm_tid_data *tid_data;
+ struct iwl_mvm_sta *mvmsta;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_mvm_compressed_ba_notif *ba_res =
+ (void *)pkt->data;
+
+ sta_id = ba_res->sta_id;
+ ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
+ ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
+ ba_info.status.tx_time =
+ (u16)le32_to_cpu(ba_res->wireless_time);
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_res->reduced_txp;
+
+ /*
+ * TODO:
+ * When supporting multi TID aggregations - we need to move
+ * next_reclaimed to be per TXQ and not per TID or handle it
+ * in a different way.
+ * This will go together with SN and AddBA offload and cannot
+ * be handled properly for now.
+ */
+ WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
+ iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
+ (int)ba_res->tfd[0].q_num,
+ le16_to_cpu(ba_res->tfd[0].tfd_index),
+ &ba_info, le32_to_cpu(ba_res->tx_rate));
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+ return;
+ }
+
+ ba_notif = (void *)pkt->data;
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+ /* "flow" corresponds to Tx queue */
+ txq = le16_to_cpu(ba_notif->scd_flow);
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ index = le16_to_cpu(ba_notif->scd_ssn);
+
+ rcu_read_lock();
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (WARN_ON_ONCE(!mvmsta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
+ ba_info.status.ampdu_len = ba_notif->txed;
+ ba_info.status.tx_time = tid_data->tx_time;
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_notif->reduced_txp;
+
+ rcu_read_unlock();
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+ (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+ le64_to_cpu(ba_notif->bitmap), txq, index,
+ ba_notif->txed, ba_notif->txed_2_done);
+
+ IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+ ba_notif->reduced_txp);
+}
+
/*
* Note that there are transports that buffer frames before they reach
* the firmware. This means that after flush_tx_path is called, the
base = mvm->fw->inst_errlog_ptr;
}
- if (base < 0x800000) {
+ if (base < 0x400000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
rcu_read_unlock();
}
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+{
+ bool ps_disabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Disable power save when reading GP2 */
+ ps_disabled = mvm->ps_disabled;
+ if (!ps_disabled) {
+ mvm->ps_disabled = true;
+ iwl_mvm_power_update_device(mvm);
+ }
+
+ *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ *boottime = ktime_get_boot_ns();
+
+ if (!ps_disabled) {
+ mvm->ps_disabled = ps_disabled;
+ iwl_mvm_power_update_device(mvm);
+ }
+}
+
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout)
{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <linux/cpu.h>
#include "iwl-fh.h"
#include "iwl-csr.h"
* be needed for potential data in the SKB's head. The remaining ones can
* be used for frags.
*/
-#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
/*
* RX related structures and functions
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
u32 flags;
+ u32 tbs;
};
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues.
- *
- * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
- * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
- * the software buffers (in the variables @meta, @txb in struct
- * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
- * the same struct) have 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_queue {
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @write_ptr: 1-st empty entry (index) host_w
+ * @read_ptr: last used entry (index) host_r
+ * @dma_addr: physical addr for BD's
+ * @n_window: safe queue window
+ * @id: queue id
+ * @low_mark: low watermark, resume queue if free space more than this
+ * @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
*/
struct iwl_txq {
- struct iwl_queue q;
- struct iwl_tfd *tfds;
+ void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
bool block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
+
+ int write_ptr;
+ int read_ptr;
+ dma_addr_t dma_addr;
+ int n_window;
+ u32 id;
+ int low_mark;
+ int high_mark;
};
static inline dma_addr_t
u8 *pos;
};
+/**
+ * enum iwl_shared_irq_flags - level of sharing for irq
+ * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
+ * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
+ */
+enum iwl_shared_irq_flags {
+ IWL_SHARED_IRQ_NON_RX = BIT(0),
+ IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
+};
+
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_buf_size: Rx buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @wide_cmd_header: true when ucode supports wide command header format
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
* @fw_mon_size: size of the buffer for the firmware monitor
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
- * @allocated_vector: the number of interrupt vector allocated by the OS
- * @default_irq_num: default irq for non rx interrupt
+ * @shared_vec_mask: the type of causes the shared vector handles
+ * (see iwl_shared_irq_flags).
+ * @alloc_vecs: the number of interrupt vectors allocated by the OS
+ * @def_irq: default irq for non rx causes
* @fh_init_mask: initial unmasked fh causes
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+ u8 max_tbs;
+ u16 tfd_size;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
bool scd_set_active;
- bool wide_cmd_header;
bool sw_csum_tx;
u32 rx_page_order;
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
- u32 allocated_vector;
- u32 default_irq_num;
+ u8 shared_vec_mask;
+ u32 alloc_vecs;
+ u32 def_irq;
u32 fh_init_mask;
u32 hw_init_mask;
u32 fh_mask;
u32 hw_mask;
+ cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
};
static inline struct iwl_trans_pcie *
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
+ u8 idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->tb_len);
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- return le16_to_cpu(tb->hi_n_len) >> 4;
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+ }
}
/*****************************************************
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
+ if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+ if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->q.id);
+ txq->id);
}
-static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
-static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
while (pending) {
int i;
- struct list_head local_allocated;
+ LIST_HEAD(local_allocated);
gfp_t gfp_mask = GFP_KERNEL;
/* Do not post a warning if there are only a few requests */
if (pending < RX_PENDING_WATERMARK)
gfp_mask |= __GFP_NOWARN;
- INIT_LIST_HEAD(&local_allocated);
-
for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
struct iwl_rx_mem_buffer *rxb;
struct page *page;
FH_RSCSR_RXQ_POS != rxq->id);
IWL_DEBUG_RX(trans,
- "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
+ "cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxcb._offset,
iwl_get_cmd_string(trans,
iwl_cmd_id(pkt->hdr.cmd,
pkt->hdr.group_id,
0)),
- pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
+ pkt->hdr.group_id, pkt->hdr.cmd,
+ le16_to_cpu(pkt->hdr.sequence));
len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
sequence = le16_to_cpu(pkt->hdr.sequence);
index = SEQ_TO_INDEX(sequence);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
if (rxq->id == 0)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
inta_fh,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 0);
+ local_bh_enable();
+ }
+
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q1) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 1);
+ local_bh_enable();
+ }
+
/* This "Tx" DMA channel is used only for loading uCode */
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
if (trans_pcie->msix_enabled) {
int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++)
+ for (i = 0; i < trans_pcie->alloc_vecs; i++)
synchronize_irq(trans_pcie->msix_entries[i].vector);
} else {
synchronize_irq(trans_pcie->pci_dev->irq);
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+ int i;
+
+ /*
+ * Access all non RX causes and map them to the default irq.
+ * In case we are missing at least one interrupt vector,
+ * the first interrupt vector will serve non-RX and FBQ causes.
+ */
+ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+ iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
+ iwl_clear_bit(trans, causes_list[i].mask_reg,
+ causes_list[i].cause_num);
+ }
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 offset =
+ trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+ u32 val, idx;
+
+ /*
+ * The first RX queue - fallback queue, which is designated for
+ * management frame, command responses etc, is always mapped to the
+ * first interrupt vector. The other RX queues are mapped to
+ * the other (N - 2) interrupt vectors.
+ */
+ val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+ for (idx = 1; idx < trans->num_rx_queues; idx++) {
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+ MSIX_FH_INT_CAUSES_Q(idx - offset));
+ val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+ }
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+ val = MSIX_FH_INT_CAUSES_Q(0);
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
- u32 val, max_rx_vector, i;
struct iwl_trans *trans = trans_pcie->trans;
- max_rx_vector = trans_pcie->allocated_vector - 1;
-
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, UREG_CHICK,
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
- * Each cause from the list above and the RX causes is represented as
- * a byte in the IVAR table. We access the first (N - 1) bytes and map
- * them to the (N - 1) vectors so these vectors will be used as rx
- * vectors. Then access all non rx causes and map them to the
- * default queue (N'th queue).
+ * Each cause from the causes list above and the RX causes is
+ * represented as a byte in the IVAR table. The first nibble
+ * represents the bound interrupt vector of the cause, the second
+ * represents no auto clear for this cause. This will be set if its
+ * interrupt vector is bound to serve other causes.
*/
- for (i = 0; i < max_rx_vector; i++) {
- iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
- iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
- BIT(MSIX_FH_INT_CAUSES_Q(i)));
- }
+ iwl_pcie_map_rx_causes(trans);
+
+ iwl_pcie_map_non_rx_causes(trans);
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- val = trans_pcie->default_irq_num |
- MSIX_NON_AUTO_CLEAR_CAUSE;
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
- }
trans_pcie->fh_init_mask =
~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_irqs, num_irqs, i, ret, nr_online_cpus;
u16 pci_cmd;
- int max_vector;
- int ret, i;
-
- if (trans->cfg->mq_rx_supported) {
- max_vector = min_t(u32, (num_possible_cpus() + 2),
- IWL_MAX_RX_HW_QUEUES);
- for (i = 0; i < max_vector; i++)
- trans_pcie->msix_entries[i].entry = i;
-
- ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
- MSIX_MIN_INTERRUPT_VECTORS,
- max_vector);
- if (ret > 1) {
- IWL_DEBUG_INFO(trans,
- "Enable MSI-X allocate %d interrupt vector\n",
- ret);
- trans_pcie->allocated_vector = ret;
- trans_pcie->default_irq_num =
- trans_pcie->allocated_vector - 1;
- trans_pcie->trans->num_rx_queues =
- trans_pcie->allocated_vector - 1;
- trans_pcie->msix_enabled = true;
-
- return;
- }
+
+ if (!trans->cfg->mq_rx_supported)
+ goto enable_msi;
+
+ nr_online_cpus = num_online_cpus();
+ max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
+ for (i = 0; i < max_irqs; i++)
+ trans_pcie->msix_entries[i].entry = i;
+
+ num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
+ MSIX_MIN_INTERRUPT_VECTORS,
+ max_irqs);
+ if (num_irqs < 0) {
IWL_DEBUG_INFO(trans,
- "ret = %d %s move to msi mode\n", ret,
- (ret == 1) ?
- "can't allocate more than 1 interrupt vector" :
- "failed to enable msi-x mode");
- pci_disable_msix(pdev);
+ "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
+ num_irqs);
+ goto enable_msi;
+ }
+ trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
+
+ IWL_DEBUG_INFO(trans,
+ "MSI-X enabled. %d interrupt vectors were allocated\n",
+ num_irqs);
+
+ /*
+ * In case the OS provides fewer interrupts than requested, different
+ * causes will share the same interrupt vector as follows:
+ * One interrupt less: non rx causes shared with FBQ.
+ * Two interrupts less: non rx causes shared with FBQ and RSS.
+ * More than two interrupts: we will use fewer RSS queues.
+ */
+ if (num_irqs <= nr_online_cpus) {
+ trans_pcie->trans->num_rx_queues = num_irqs + 1;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
+ IWL_SHARED_IRQ_FIRST_RSS;
+ } else if (num_irqs == nr_online_cpus + 1) {
+ trans_pcie->trans->num_rx_queues = num_irqs;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
+ } else {
+ trans_pcie->trans->num_rx_queues = num_irqs - 1;
}
+ trans_pcie->alloc_vecs = num_irqs;
+ trans_pcie->msix_enabled = true;
+ return;
+
+enable_msi:
ret = pci_enable_msi(pdev);
if (ret) {
dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
}
}
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
+{
+ int iter_rx_q, i, ret, cpu, offset;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
+ iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
+ offset = 1 + i;
+ for (; i < iter_rx_q ; i++) {
+ /*
+ * Get the cpu prior to the place to search
+ * (i.e. return will be > i - 1).
+ */
+ cpu = cpumask_next(i - offset, cpu_online_mask);
+ cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
+ ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
+ &trans_pcie->affinity_mask[i]);
+ if (ret)
+ IWL_ERR(trans_pcie->trans,
+ "Failed to set affinity mask for IRQ %d\n",
+ i);
+ }
+}
+
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
- int i, last_vector;
-
- last_vector = trans_pcie->trans->num_rx_queues;
+ int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++) {
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
int ret;
-
- ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
- iwl_pcie_msix_isr,
- (i == last_vector) ?
- iwl_pcie_irq_msix_handler :
- iwl_pcie_irq_rx_msix_handler,
- IRQF_SHARED,
- DRV_NAME,
- &trans_pcie->msix_entries[i]);
+ struct msix_entry *msix_entry;
+
+ msix_entry = &trans_pcie->msix_entries[i];
+ ret = devm_request_threaded_irq(&pdev->dev,
+ msix_entry->vector,
+ iwl_pcie_msix_isr,
+ (i == trans_pcie->def_irq) ?
+ iwl_pcie_irq_msix_handler :
+ iwl_pcie_irq_rx_msix_handler,
+ IRQF_SHARED,
+ DRV_NAME,
+ msix_entry);
if (ret) {
- int j;
-
IWL_ERR(trans_pcie->trans,
"Error allocating IRQ %d\n", i);
- for (j = 0; j < i; j++)
- free_irq(trans_pcie->msix_entries[j].vector,
- &trans_pcie->msix_entries[j]);
- pci_disable_msix(pdev);
+
return ret;
}
}
+ iwl_pcie_irq_set_affinity(trans_pcie->trans);
return 0;
}
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
- trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
iwl_pcie_rx_free(trans);
if (trans_pcie->msix_enabled) {
- for (i = 0; i < trans_pcie->allocated_vector; i++)
- free_irq(trans_pcie->msix_entries[i].vector,
- &trans_pcie->msix_entries[i]);
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+ irq_set_affinity_hint(
+ trans_pcie->msix_entries[i].vector,
+ NULL);
+ }
- pci_disable_msix(trans_pcie->pci_dev);
trans_pcie->msix_enabled = false;
} else {
- free_irq(trans_pcie->pci_dev->irq, trans);
-
iwl_pcie_free_ict(trans);
-
- pci_disable_msi(trans_pcie->pci_dev);
}
- iounmap(trans_pcie->hw_base);
- pci_release_regions(trans_pcie->pci_dev);
- pci_disable_device(trans_pcie->pci_dev);
iwl_pcie_free_fw_monitor(trans);
txq->frozen = freeze;
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
goto next_queue;
if (freeze) {
txq->block--;
if (!txq->block) {
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (i << 8));
+ txq->write_ptr | (i << 8));
}
} else if (block) {
txq->block++;
int cnt;
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
+ txq->read_ptr, txq->write_ptr);
if (trans->cfg->use_tfh)
/* TODO: access new SCD registers and dump them */
return;
scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ SCD_TX_STTS_QUEUE_OFFSET(txq->id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
int ret = 0;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
- wr_ptr = ACCESS_ONCE(q->write_ptr);
+ wr_ptr = ACCESS_ONCE(txq->write_ptr);
- while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+ u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
usleep_range(1000, 2000);
}
- if (q->read_ptr != q->write_ptr) {
+ if (txq->read_ptr != txq->write_ptr) {
IWL_ERR(trans,
"fail to flush all tx fifo queues Q %d\n", cnt);
ret = -ETIMEDOUT;
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
char *buf;
int pos = 0;
int cnt;
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
- cnt, q->read_ptr, q->write_ptr,
+ cnt, txq->read_ptr, txq->write_ptr,
!!test_bit(cnt, trans_pcie->queue_used),
!!test_bit(cnt, trans_pcie->queue_stopped),
txq->need_update, txq->frozen,
}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
-static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < IWL_NUM_OF_TBS; i++)
- cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+ for (i = 0; i < trans_pcie->max_tbs; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
}
/* host commands */
len += sizeof(*data) +
- cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+ cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
if (trans_pcie->fw_mon_page) {
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
spin_lock_bh(&cmdq->lock);
- ptr = cmdq->q.write_ptr;
- for (i = 0; i < cmdq->q.n_window; i++) {
- u8 idx = get_cmd_index(&cmdq->q, ptr);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = get_cmd_index(cmdq, ptr);
u32 caplen, cmdlen;
- cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
+ trans_pcie->tfd_size * ptr);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
+ .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
+
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
struct iwl_trans *trans;
int ret, addr_size;
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ERR_PTR(ret);
+
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
if (!trans)
return ERR_PTR(-ENOMEM);
- trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
-
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans;
goto out_no_pci;
}
- ret = pci_enable_device(pdev);
- if (ret)
- goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
/*
else
addr_size = 36;
+ if (cfg->use_tfh) {
+ trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
+
+ } else {
+ trans_pcie->max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
+
pci_set_master(pdev);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
/* both attempts failed: */
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto out_pci_disable_device;
+ goto out_no_pci;
}
}
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
if (ret) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
- goto out_pci_disable_device;
+ dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
+ goto out_no_pci;
}
- trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
if (!trans_pcie->hw_base) {
- dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ dev_err(&pdev->dev, "pcim_iomap_table failed\n");
ret = -ENODEV;
- goto out_pci_release_regions;
+ goto out_no_pci;
}
/* We disable the RETRY_TIMEOUT register (0x41) to keep
ret = iwl_pcie_prepare_card_hw(trans);
if (ret) {
IWL_WARN(trans, "Exit HW not ready\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
/*
25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (trans_pcie->msix_enabled) {
if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
- goto out_pci_release_regions;
+ goto out_no_pci;
} else {
ret = iwl_pcie_alloc_ict(trans);
if (ret)
- goto out_pci_disable_msi;
+ goto out_no_pci;
- ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans);
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
if (ret) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
out_free_ict:
iwl_pcie_free_ict(trans);
-out_pci_disable_msi:
- pci_disable_msi(pdev);
-out_pci_release_regions:
- pci_release_regions(pdev);
-out_pci_disable_device:
- pci_disable_device(pdev);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
iwl_trans_free(trans);
*
***************************************************/
-static int iwl_queue_space(const struct iwl_queue *q)
+static int iwl_queue_space(const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
{
q->n_window = slots_num;
q->id = id;
spin_lock(&txq->lock);
/* check if triggered erroneously */
- if (txq->q.read_ptr == txq->q.write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_unlock(&txq->lock);
return;
}
spin_unlock(&txq->lock);
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
jiffies_to_msecs(txq->wd_timeout));
iwl_trans_pcie_log_scd_error(trans, txq);
* iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt)
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
u8 sec_ctl = 0;
- u8 sta_id = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *) txq->entries[txq->q.write_ptr].cmd->payload;
+ (void *)txq->entries[txq->write_ptr].cmd->payload;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- sta_id = tx_cmd->sta_id;
sec_ctl = tx_cmd->sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) {
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
-
if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
return;
- bc_ent = cpu_to_le16(len | (sta_id << 12));
+ if (trans->cfg->use_tfh) {
+ u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the
+ * TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ } else {
+ u8 sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+ }
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
+ int txq_id = txq->id;
+ int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+ (void *)txq->entries[read_ptr].cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
- int txq_id = txq->q.id;
+ int txq_id = txq->id;
lockdep_assert_held(&txq->lock);
* if not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx).
*/
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
if (!txq->block)
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ txq->write_ptr | (txq_id << 8));
}
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, int idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ return txq->tfds + trans_pcie->tfd_size * idx;
+}
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
- return addr;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return (dma_addr_t)(le64_to_cpu(tb->addr));
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ dma_addr_t hi_len;
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+ }
}
-static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd_fh->num_tbs = cpu_to_le16(idx + 1);
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+ u16 hi_n_len = len << 4;
- tb->hi_n_len = cpu_to_le16(hi_n_len);
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
- tfd->num_tbs = idx + 1;
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd_fh->num_tbs = idx + 1;
+ }
}
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
{
- return tfd->num_tbs & 0x1f;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+
+ return tfd->num_tbs & 0x1f;
+ }
}
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
+ struct iwl_txq *txq, int index)
{
- int i;
- int num_tbs;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
/* Sanity check on number of chunks */
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
/* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
- if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+ if (meta->tbs & BIT(i))
dma_unmap_page(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd, i),
DMA_TO_DEVICE);
else
dma_unmap_single(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd,
+ i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd,
+ i),
DMA_TO_DEVICE);
}
- tfd->num_tbs = 0;
+
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ }
+
}
/*
*/
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_tfd *tfd_tmp = txq->tfds;
-
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
- int rd_ptr = txq->q.read_ptr;
- int idx = get_cmd_index(&txq->q, rd_ptr);
+ int rd_ptr = txq->read_ptr;
+ int idx = get_cmd_index(txq, rd_ptr);
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
if (txq->entries) {
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ void *tfd;
u32 num_tbs;
- q = &txq->q;
- tfd_tmp = txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
+ tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
if (reset)
- memset(tfd, 0, sizeof(*tfd));
+ memset(tfd, 0, trans_pcie->tfd_size);
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
+ trans_pcie->max_tbs);
return -EINVAL;
}
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
+ iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
return num_tbs;
}
u32 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
size_t tb0_buf_sz;
int i;
(unsigned long)txq);
txq->trans_pcie = trans_pcie;
- txq->q.n_window = slots_num;
+ txq->n_window = slots_num;
txq->entries = kcalloc(slots_num,
sizeof(struct iwl_pcie_txq_entry),
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->q.dma_addr, GFP_KERNEL);
+ &txq->dma_addr, GFP_KERNEL);
if (!txq->tfds)
goto error;
if (!txq->first_tb_bufs)
goto err_free_tfds;
- txq->q.id = txq_id;
+ txq->id = txq_id;
return 0;
err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
error:
if (txq->entries && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, slots_num, txq_id);
+ ret = iwl_queue_init(txq, slots_num, txq_id);
if (ret)
return ret;
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
return 0;
}
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
spin_lock_bh(&txq->lock);
- while (q->write_ptr != q->read_ptr) {
+ while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, q->read_ptr);
+ txq_id, txq->read_ptr);
if (txq_id != trans_pcie->cmd_queue) {
- struct sk_buff *skb = txq->entries[q->read_ptr].skb;
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
if (txq_id != trans_pcie->cmd_queue) {
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
- q->id);
+ txq->id);
iwl_trans_unref(trans);
} else {
iwl_pcie_clear_cmd_in_flight(trans);
/* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++) {
+ for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
}
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
- txq->tfds, txq->q.dma_addr);
- txq->q.dma_addr = 0;
+ trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->dma_addr);
+ txq->dma_addr = 0;
txq->tfds = NULL;
dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->q.n_window,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
txq->first_tb_bufs, txq->first_tb_dma);
}
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
iwl_pcie_txq_unmap(trans, txq_id);
- txq->q.read_ptr = 0;
- txq->q.write_ptr = 0;
+ txq->read_ptr = 0;
+ txq->write_ptr = 0;
}
/* Tell NIC where to find the "keep warm" buffer */
* if empty delete timer, otherwise move timer forward
* since we're making progress on this queue
*/
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
del_timer(&txq->stuck_timer);
else
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
- struct iwl_queue *q = &txq->q;
int last_to_free;
/* This function is not meant to release cmd queue*/
goto out;
}
- if (txq->q.read_ptr == tfd_num)
+ if (txq->read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->q.read_ptr, tfd_num, ssn);
+ txq_id, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(tfd_num);
- if (!iwl_queue_used(q, last_to_free)) {
+ if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
goto out;
}
goto out;
for (;
- q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
- struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
+ txq->read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
__skb_queue_tail(skbs, skb);
- txq->entries[txq->q.read_ptr].skb = NULL;
+ txq->entries[txq->read_ptr].skb = NULL;
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+ if (!trans->cfg->use_tfh)
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
}
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ if (iwl_queue_space(txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ if (iwl_queue_space(txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
- if (q->read_ptr == q->write_ptr) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
+ if (txq->read_ptr == txq->write_ptr) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
iwl_trans_unref(trans);
}
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, q->write_ptr, q->read_ptr);
+ idx, txq->write_ptr, txq->read_ptr);
iwl_force_nmi(trans);
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
*/
iwl_scd_txq_disable_agg(trans, txq_id);
- ssn = txq->q.read_ptr;
+ ssn = txq->read_ptr;
}
}
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- txq->q.read_ptr = (ssn & 0xff);
- txq->q.write_ptr = (ssn & 0xff);
+ txq->read_ptr = (ssn & 0xff);
+ txq->write_ptr = (ssn & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
txq->ampdu = !shared_mode;
}
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->scd_bc_tbls.dma +
+ txq * sizeof(struct iwlagn_scd_bc_tbl);
+}
+
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- if (WARN(!trans_pcie->wide_cmd_header &&
+ if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
"unsupported wide command %#x\n", cmd->id))
return -EINVAL;
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
goto free_dup_buf;
}
- idx = get_cmd_index(q, q->write_ptr);
+ idx = get_cmd_index(txq, txq->write_ptr);
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
copy_size = sizeof(struct iwl_cmd_header_wide);
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
cmd_pos = sizeof(struct iwl_cmd_header);
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
- BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
- sizeof(out_meta->flags) * BITS_PER_BYTE);
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kzfree(txq->entries[idx].free_buf);
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
}
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
spin_lock_bh(&txq->lock);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
- iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
+ iwl_pcie_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
HOST_COMPLETE_TIMEOUT);
if (!ret) {
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
+ txq->read_ptr, txq->write_ptr);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
- struct iwl_queue *q = &txq->q;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 tb2_len;
int i;
skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
- out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+ out_meta->tbs |= BIT(tb_idx);
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- struct iwl_queue *q = &txq->q;
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
NULL, 0);
return 0;
out_unmap:
- iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
return ret;
}
#else /* CONFIG_INET */
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
- struct iwl_queue *q;
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
+ void *tfd;
u16 len, tb1_len;
bool wait_write_ptr;
__le16 fc;
bool amsdu;
txq = &trans_pcie->txq[txq_id];
- q = &txq->q;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
}
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
__skb_linearize(skb))
return -ENOMEM;
spin_lock(&txq->lock);
- if (iwl_queue_space(q) < q->high_mark) {
+ if (iwl_queue_space(txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(q) < 3)) {
+ if (unlikely(iwl_queue_space(txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE(txq->ampdu &&
- (wifi_seq & 0xff) != q->write_ptr,
+ (wifi_seq & 0xff) != txq->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, q->write_ptr);
+ txq_id, wifi_seq, txq->write_ptr);
/* Set up driver data for this TFD */
- txq->entries[q->write_ptr].skb = skb;
- txq->entries[q->write_ptr].cmd = dev_cmd;
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
+ INDEX_TO_SEQ(txq->write_ptr)));
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
+ tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[q->write_ptr].meta;
+ out_meta = &txq->entries[txq->write_ptr].meta;
out_meta->flags = 0;
/*
}
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_FIRST_TB_SIZE, true);
goto out_err;
}
+ tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_tfd_get_num_tbs(trans, tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
if (txq->wd_timeout) {
/*
* If the TXQ is active, then set the timer, if not,
else
txq->frozen_expiry_remainder = txq->wd_timeout;
}
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
iwl_trans_ref(trans);
}
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
return 0;
}
+/*
+ * CFG802.11 operation handler to set default mgmt key.
+ */
+static int
+mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *netdev,
+ u8 key_index)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ struct mwifiex_ds_encrypt_key encrypt_key;
+
+ wiphy_dbg(wiphy, "set default mgmt key, key index=%d\n", key_index);
+
+ memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
+ encrypt_key.key_len = WLAN_KEY_LEN_CCMP;
+ encrypt_key.key_index = key_index;
+ encrypt_key.is_igtk_def_key = true;
+ eth_broadcast_addr(encrypt_key.mac_addr);
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, true, &encrypt_key, true);
+}
+
/*
* This function sends domain information to the firmware.
*
.leave_ibss = mwifiex_cfg80211_leave_ibss,
.add_key = mwifiex_cfg80211_add_key,
.del_key = mwifiex_cfg80211_del_key,
+ .set_default_mgmt_key = mwifiex_cfg80211_set_default_mgmt_key,
.mgmt_tx = mwifiex_cfg80211_mgmt_tx,
.mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
.remain_on_channel = mwifiex_cfg80211_remain_on_channel,
KEY_TYPE_ID_AES,
KEY_TYPE_ID_WAPI,
KEY_TYPE_ID_AES_CMAC,
+ KEY_TYPE_ID_AES_CMAC_DEF,
};
#define WPA_PN_SIZE 8
u8 is_igtk_key;
u8 is_current_wep_key;
u8 is_rx_seq_valid;
+ u8 is_igtk_def_key;
};
struct mwifiex_power_cfg {
struct mwifiex_private *priv;
int i;
+ if (!adapter)
+ goto exit_return;
+
if (down_interruptible(sem))
goto exit_sem_err;
- if (!adapter)
- goto exit_remove;
-
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_deauthenticate(priv, NULL);
rtnl_unlock();
}
-exit_remove:
up(sem);
exit_sem_err:
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+exit_return:
return 0;
}
memcpy(km->key_param_set.key_params.cmac_aes.key,
enc_key->key_material, enc_key->key_len);
len += sizeof(struct mwifiex_cmac_aes_param);
+ } else if (enc_key->is_igtk_def_key) {
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set CMAC default Key index\n", __func__);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC_DEF;
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
} else {
mwifiex_dbg(adapter, INFO,
"%s: Set AES Key\n", __func__);
enum nl80211_band band;
chan = *buf++;
- if (!chan)
+ if (!chan) {
+ kfree(regd);
return NULL;
+ }
chflags = *buf++;
band = (chan <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
freq = ieee80211_channel_to_frequency(chan, band);
u16 action = le16_to_cpu(reg->action);
u16 tlv, tlv_buf_len, tlv_buf_left;
struct mwifiex_ie_types_header *head;
+ struct ieee80211_regdomain *regd;
u8 *tlv_buf;
if (action != HostCmd_ACT_GEN_GET)
mwifiex_dbg_dump(priv->adapter, CMD_D, "CHAN:",
(u8 *)head + sizeof(*head),
tlv_buf_len);
- priv->adapter->regd =
- mwifiex_create_custom_regdomain(priv,
- (u8 *)head +
- sizeof(*head), tlv_buf_len);
+ regd = mwifiex_create_custom_regdomain(priv,
+ (u8 *)head + sizeof(*head), tlv_buf_len);
+ if (!IS_ERR(regd))
+ priv->adapter->regd = regd;
break;
}
} else {
mwifiex_dbg(adapter, DATA,
"%s: DATA\n", __func__);
+ mwifiex_write_data_complete(adapter, context->skb, 0,
+ urb->status ? -1 : 0);
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
port = &card->port[i];
if (context->ep == port->tx_data_ep) {
}
}
adapter->data_sent = false;
- mwifiex_write_data_complete(adapter, context->skb, 0,
- urb->status ? -1 : 0);
}
if (card->mc_resync_flag)
struct usb_tx_data_port *port = NULL;
u8 *data = (u8 *)skb->data;
struct urb *tx_urb;
- int idx, ret;
+ int idx, ret = -EINPROGRESS;
if (adapter->is_suspended) {
mwifiex_dbg(adapter, ERROR,
if (atomic_read(&port->tx_data_urb_pending)
>= MWIFIEX_TX_DATA_URB) {
port->block_status = true;
- ret = -EBUSY;
- goto done;
+ adapter->data_sent =
+ mwifiex_usb_data_sent(adapter);
+ return -EBUSY;
}
if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
port->tx_data_ix = 0;
else
atomic_inc(&port->tx_data_urb_pending);
+ if (ep != card->tx_cmd_ep &&
+ atomic_read(&port->tx_data_urb_pending) ==
+ MWIFIEX_TX_DATA_URB) {
+ port->block_status = true;
+ adapter->data_sent = mwifiex_usb_data_sent(adapter);
+ ret = -ENOSR;
+ }
+
if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
mwifiex_dbg(adapter, ERROR,
"%s: usb_submit_urb failed\n", __func__);
} else {
atomic_dec(&port->tx_data_urb_pending);
port->block_status = false;
+ adapter->data_sent = false;
if (port->tx_data_ix)
port->tx_data_ix--;
else
port->tx_data_ix = MWIFIEX_TX_DATA_URB;
}
-
- return -1;
- } else {
- if (ep != card->tx_cmd_ep &&
- atomic_read(&port->tx_data_urb_pending) ==
- MWIFIEX_TX_DATA_URB) {
- port->block_status = true;
- ret = -ENOSR;
- goto done;
- }
+ ret = -1;
}
- return -EINPROGRESS;
-
-done:
- if (ep != card->tx_cmd_ep)
- adapter->data_sent = mwifiex_usb_data_sent(adapter);
-
return ret;
}
#define RTL8XXXU_DEBUG_H2C 0x800
#define RTL8XXXU_DEBUG_ACTION 0x1000
#define RTL8XXXU_DEBUG_EFUSE 0x2000
+#define RTL8XXXU_DEBUG_INTERRUPT 0x4000
#define RTW_USB_CONTROL_MSG_TIMEOUT 500
#define RTL8XXXU_MAX_REG_POLL 500
int (*power_on) (struct rtl8xxxu_priv *priv);
void (*power_off) (struct rtl8xxxu_priv *priv);
void (*reset_8051) (struct rtl8xxxu_priv *priv);
- int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+ int (*llt_init) (struct rtl8xxxu_priv *priv);
void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv);
-int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start);
int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv);
int rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv,
struct h2c_cmd *h2c, int len);
int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv);
void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv);
-int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv,
return ret;
}
+static int rtl8192eu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int retry, retval;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ retry = 100;
+ retval = -EBUSY;
+ /*
+ * Poll 32 bit wide 0x05f8 for 0x00000000 to ensure no TX is pending.
+ */
+ do {
+ val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD);
+ if (!val32) {
+ retval = 0;
+ break;
+ }
+ } while (retry--);
+
+ if (!retry) {
+ dev_warn(dev, "Failed to flush TX queue\n");
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Disable CCK and OFDM, clock gated */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ udelay(2);
+
+ /* Reset whole BB */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /* Reset MAC TRX */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 &= 0xff00;
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 &= ~CR_SECURITY_ENABLE;
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+ val8 |= DUAL_TSF_TX_OK;
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+out:
+ return retval;
+}
+
+static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ int count, ret = 0;
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* Switch DPDT_SEL_P output from register 0x65[2] */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 &= ~LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+ /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ if ((val8 & BIT(1)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+ __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8192eu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* 0x04[12:11] = 01 enable WL suspend */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ return 0;
+}
+
static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
{
u16 val16;
return ret;
}
+void rtl8192eu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+ rtl8192eu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8xxxu_reset_8051(priv);
+
+ rtl8192eu_active_to_emu(priv);
+ rtl8192eu_emu_to_disabled(priv);
+}
+
static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
{
u32 val32;
.parse_efuse = rtl8192eu_parse_efuse,
.load_firmware = rtl8192eu_load_firmware,
.power_on = rtl8192eu_power_on,
- .power_off = rtl8xxxu_power_off,
+ .power_off = rtl8192eu_power_off,
.reset_8051 = rtl8xxxu_reset_8051,
.llt_init = rtl8xxxu_auto_llt_table,
.init_phy_bb = rtl8192eu_init_phy_bb,
return ret;
}
-int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv)
{
int ret;
int i;
+ u8 last_tx_page;
+
+ last_tx_page = priv->fops->total_page_num;
for (i = 0; i < last_tx_page; i++) {
ret = rtl8xxxu_llt_write(priv, i, i + 1);
return ret;
}
-int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv)
{
u32 val32;
int ret = 0;
val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32);
- pubq = fops->total_page_num - hq - lq - nq;
+ pubq = fops->total_page_num - hq - lq - nq - 1;
val32 = RQPN_LOAD;
val32 |= (hq << RQPN_HI_PQ_SHIFT);
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_fileops *fops = priv->fops;
bool macpower;
int ret;
u8 val8;
else
macpower = true;
- ret = priv->fops->power_on(priv);
+ ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
goto exit;
/*
* Set RX page boundary
*/
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
ret = rtl8xxxu_download_firmware(priv);
dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
if (ret)
goto exit;
- if (priv->fops->phy_init_antenna_selection)
- priv->fops->phy_init_antenna_selection(priv);
+ if (fops->phy_init_antenna_selection)
+ fops->phy_init_antenna_selection(priv);
ret = rtl8xxxu_init_mac(priv);
if (ret)
goto exit;
- ret = priv->fops->init_phy_rf(priv);
+ ret = fops->init_phy_rf(priv);
if (ret)
goto exit;
/*
* Set TX buffer boundary
*/
- val8 = priv->fops->total_page_num + 1;
+ val8 = fops->total_page_num + 1;
rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
* The vendor drivers set PBP for all devices, except 8192e.
* There is no explanation for this in any of the sources.
*/
- val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
- (priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
+ val8 = (fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
+ (fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
if (priv->rtl_chip != RTL8192E)
rtl8xxxu_write8(priv, REG_PBP, val8);
dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
if (!macpower) {
- ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
+ ret = fops->llt_init(priv);
if (ret) {
dev_warn(dev, "%s: LLT table init failed\n", __func__);
goto exit;
/*
* Chip specific quirks
*/
- priv->fops->usb_quirks(priv);
+ fops->usb_quirks(priv);
/*
* Enable TX report and TX report timer for 8723bu/8188eu/...
*/
- if (priv->fops->has_tx_report) {
+ if (fops->has_tx_report) {
val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
}
- if (priv->fops->init_aggregation)
- priv->fops->init_aggregation(priv);
+ if (fops->init_aggregation)
+ fops->init_aggregation(priv);
/*
* Enable CCK and OFDM block
/*
* Start out with default power levels for channel 6, 20MHz
*/
- priv->fops->set_tx_power(priv, 1, false);
+ fops->set_tx_power(priv, 1, false);
/* Let the 8051 take control of antenna setting */
if (priv->rtl_chip != RTL8192E) {
rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
- if (priv->fops->init_statistics)
- priv->fops->init_statistics(priv);
+ if (fops->init_statistics)
+ fops->init_statistics(priv);
if (priv->rtl_chip == RTL8192E) {
/*
rtl8723a_phy_lc_calibrate(priv);
- priv->fops->phy_iq_calibrate(priv);
+ fops->phy_iq_calibrate(priv);
/*
* This should enable thermal meter
*/
- if (priv->fops->gen2_thermal_meter)
+ if (fops->gen2_thermal_meter)
rtl8xxxu_write_rfreg(priv,
RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
else
struct device *dev = &priv->udev->dev;
int ret;
- dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_INTERRUPT)
+ dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
if (urb->status == 0) {
usb_anchor_urb(urb, &priv->int_anchor);
ret = usb_submit_urb(urb, GFP_ATOMIC);
#define REG_SCH_TXCMD 0x05d0
/* define REG_FW_TSF_SYNC_CNT 0x04a0 */
+#define REG_SCH_TX_CMD 0x05f8
#define REG_FW_RESET_TSF_CNT_1 0x05fc
#define REG_FW_RESET_TSF_CNT_0 0x05fd
#define REG_FW_BCN_DIS_CNT 0x05fe
mac->bw_40 = false;
mac->bw_80 = false;
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ channel_type);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ rtlpci->const_support_pciaspm);
break;
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", state_toset);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
*((u64 *)(val)) = tsf;
break; }
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
default:
break;
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
default:
break;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
bwrite_success = true;
case HW_VAR_INT_AC:
*((bool *)(val)) = rtlpriv->dm.disable_tx_int;
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
*((u64 *)(val)) = tsf;
}
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
*((bool *)(val)) = rtlpriv->dm.current_mrc_switch;
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default: {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
*((u64 *)(val)) = tsf;
}
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n",
- variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
break;
case RF90_PATH_B:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
};
enum hw_variables {
- HW_VAR_ETHER_ADDR,
- HW_VAR_MULTICAST_REG,
- HW_VAR_BASIC_RATE,
- HW_VAR_BSSID,
- HW_VAR_MEDIA_STATUS,
- HW_VAR_SECURITY_CONF,
- HW_VAR_BEACON_INTERVAL,
- HW_VAR_ATIM_WINDOW,
- HW_VAR_LISTEN_INTERVAL,
- HW_VAR_CS_COUNTER,
- HW_VAR_DEFAULTKEY0,
- HW_VAR_DEFAULTKEY1,
- HW_VAR_DEFAULTKEY2,
- HW_VAR_DEFAULTKEY3,
- HW_VAR_SIFS,
- HW_VAR_R2T_SIFS,
- HW_VAR_DIFS,
- HW_VAR_EIFS,
- HW_VAR_SLOT_TIME,
- HW_VAR_ACK_PREAMBLE,
- HW_VAR_CW_CONFIG,
- HW_VAR_CW_VALUES,
- HW_VAR_RATE_FALLBACK_CONTROL,
- HW_VAR_CONTENTION_WINDOW,
- HW_VAR_RETRY_COUNT,
- HW_VAR_TR_SWITCH,
- HW_VAR_COMMAND,
- HW_VAR_WPA_CONFIG,
- HW_VAR_AMPDU_MIN_SPACE,
- HW_VAR_SHORTGI_DENSITY,
- HW_VAR_AMPDU_FACTOR,
- HW_VAR_MCS_RATE_AVAILABLE,
- HW_VAR_AC_PARAM,
- HW_VAR_ACM_CTRL,
- HW_VAR_DIS_Req_Qsize,
- HW_VAR_CCX_CHNL_LOAD,
- HW_VAR_CCX_NOISE_HISTOGRAM,
- HW_VAR_CCX_CLM_NHM,
- HW_VAR_TxOPLimit,
- HW_VAR_TURBO_MODE,
- HW_VAR_RF_STATE,
- HW_VAR_RF_OFF_BY_HW,
- HW_VAR_BUS_SPEED,
- HW_VAR_SET_DEV_POWER,
-
- HW_VAR_RCR,
- HW_VAR_RATR_0,
- HW_VAR_RRSR,
- HW_VAR_CPU_RST,
- HW_VAR_CHECK_BSSID,
- HW_VAR_LBK_MODE,
- HW_VAR_AES_11N_FIX,
- HW_VAR_USB_RX_AGGR,
- HW_VAR_USER_CONTROL_TURBO_MODE,
- HW_VAR_RETRY_LIMIT,
- HW_VAR_INIT_TX_RATE,
- HW_VAR_TX_RATE_REG,
- HW_VAR_EFUSE_USAGE,
- HW_VAR_EFUSE_BYTES,
- HW_VAR_AUTOLOAD_STATUS,
- HW_VAR_RF_2R_DISABLE,
- HW_VAR_SET_RPWM,
- HW_VAR_H2C_FW_PWRMODE,
- HW_VAR_H2C_FW_JOINBSSRPT,
- HW_VAR_H2C_FW_MEDIASTATUSRPT,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- HW_VAR_FW_PSMODE_STATUS,
- HW_VAR_INIT_RTS_RATE,
- HW_VAR_RESUME_CLK_ON,
- HW_VAR_FW_LPS_ACTION,
- HW_VAR_1X1_RECV_COMBINE,
- HW_VAR_STOP_SEND_BEACON,
- HW_VAR_TSF_TIMER,
- HW_VAR_IO_CMD,
-
- HW_VAR_RF_RECOVERY,
- HW_VAR_H2C_FW_UPDATE_GTK,
- HW_VAR_WF_MASK,
- HW_VAR_WF_CRC,
- HW_VAR_WF_IS_MAC_ADDR,
- HW_VAR_H2C_FW_OFFLOAD,
- HW_VAR_RESET_WFCRC,
-
- HW_VAR_HANDLE_FW_C2H,
- HW_VAR_DL_FW_RSVD_PAGE,
- HW_VAR_AID,
- HW_VAR_HW_SEQ_ENABLE,
- HW_VAR_CORRECT_TSF,
- HW_VAR_BCN_VALID,
- HW_VAR_FWLPS_RF_ON,
- HW_VAR_DUAL_TSF_RST,
- HW_VAR_SWITCH_EPHY_WoWLAN,
- HW_VAR_INT_MIGRATION,
- HW_VAR_INT_AC,
- HW_VAR_RF_TIMING,
-
- HAL_DEF_WOWLAN,
- HW_VAR_MRC,
- HW_VAR_KEEP_ALIVE,
- HW_VAR_NAV_UPPER,
-
- HW_VAR_MGT_FILTER,
- HW_VAR_CTRL_FILTER,
- HW_VAR_DATA_FILTER,
+ HW_VAR_ETHER_ADDR = 0x0,
+ HW_VAR_MULTICAST_REG = 0x1,
+ HW_VAR_BASIC_RATE = 0x2,
+ HW_VAR_BSSID = 0x3,
+ HW_VAR_MEDIA_STATUS= 0x4,
+ HW_VAR_SECURITY_CONF= 0x5,
+ HW_VAR_BEACON_INTERVAL = 0x6,
+ HW_VAR_ATIM_WINDOW = 0x7,
+ HW_VAR_LISTEN_INTERVAL = 0x8,
+ HW_VAR_CS_COUNTER = 0x9,
+ HW_VAR_DEFAULTKEY0 = 0xa,
+ HW_VAR_DEFAULTKEY1 = 0xb,
+ HW_VAR_DEFAULTKEY2 = 0xc,
+ HW_VAR_DEFAULTKEY3 = 0xd,
+ HW_VAR_SIFS = 0xe,
+ HW_VAR_R2T_SIFS = 0xf,
+ HW_VAR_DIFS = 0x10,
+ HW_VAR_EIFS = 0x11,
+ HW_VAR_SLOT_TIME = 0x12,
+ HW_VAR_ACK_PREAMBLE = 0x13,
+ HW_VAR_CW_CONFIG = 0x14,
+ HW_VAR_CW_VALUES = 0x15,
+ HW_VAR_RATE_FALLBACK_CONTROL= 0x16,
+ HW_VAR_CONTENTION_WINDOW = 0x17,
+ HW_VAR_RETRY_COUNT = 0x18,
+ HW_VAR_TR_SWITCH = 0x19,
+ HW_VAR_COMMAND = 0x1a,
+ HW_VAR_WPA_CONFIG = 0x1b,
+ HW_VAR_AMPDU_MIN_SPACE = 0x1c,
+ HW_VAR_SHORTGI_DENSITY = 0x1d,
+ HW_VAR_AMPDU_FACTOR = 0x1e,
+ HW_VAR_MCS_RATE_AVAILABLE = 0x1f,
+ HW_VAR_AC_PARAM = 0x20,
+ HW_VAR_ACM_CTRL = 0x21,
+ HW_VAR_DIS_Req_Qsize = 0x22,
+ HW_VAR_CCX_CHNL_LOAD = 0x23,
+ HW_VAR_CCX_NOISE_HISTOGRAM = 0x24,
+ HW_VAR_CCX_CLM_NHM = 0x25,
+ HW_VAR_TxOPLimit = 0x26,
+ HW_VAR_TURBO_MODE = 0x27,
+ HW_VAR_RF_STATE = 0x28,
+ HW_VAR_RF_OFF_BY_HW = 0x29,
+ HW_VAR_BUS_SPEED = 0x2a,
+ HW_VAR_SET_DEV_POWER = 0x2b,
+
+ HW_VAR_RCR = 0x2c,
+ HW_VAR_RATR_0 = 0x2d,
+ HW_VAR_RRSR = 0x2e,
+ HW_VAR_CPU_RST = 0x2f,
+ HW_VAR_CHECK_BSSID = 0x30,
+ HW_VAR_LBK_MODE = 0x31,
+ HW_VAR_AES_11N_FIX = 0x32,
+ HW_VAR_USB_RX_AGGR = 0x33,
+ HW_VAR_USER_CONTROL_TURBO_MODE = 0x34,
+ HW_VAR_RETRY_LIMIT = 0x35,
+ HW_VAR_INIT_TX_RATE = 0x36,
+ HW_VAR_TX_RATE_REG = 0x37,
+ HW_VAR_EFUSE_USAGE = 0x38,
+ HW_VAR_EFUSE_BYTES = 0x39,
+ HW_VAR_AUTOLOAD_STATUS = 0x3a,
+ HW_VAR_RF_2R_DISABLE = 0x3b,
+ HW_VAR_SET_RPWM = 0x3c,
+ HW_VAR_H2C_FW_PWRMODE = 0x3d,
+ HW_VAR_H2C_FW_JOINBSSRPT = 0x3e,
+ HW_VAR_H2C_FW_MEDIASTATUSRPT = 0x3f,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD = 0x40,
+ HW_VAR_FW_PSMODE_STATUS = 0x41,
+ HW_VAR_INIT_RTS_RATE = 0x42,
+ HW_VAR_RESUME_CLK_ON = 0x43,
+ HW_VAR_FW_LPS_ACTION = 0x44,
+ HW_VAR_1X1_RECV_COMBINE = 0x45,
+ HW_VAR_STOP_SEND_BEACON = 0x46,
+ HW_VAR_TSF_TIMER = 0x47,
+ HW_VAR_IO_CMD = 0x48,
+
+ HW_VAR_RF_RECOVERY = 0x49,
+ HW_VAR_H2C_FW_UPDATE_GTK = 0x4a,
+ HW_VAR_WF_MASK = 0x4b,
+ HW_VAR_WF_CRC = 0x4c,
+ HW_VAR_WF_IS_MAC_ADDR = 0x4d,
+ HW_VAR_H2C_FW_OFFLOAD = 0x4e,
+ HW_VAR_RESET_WFCRC = 0x4f,
+
+ HW_VAR_HANDLE_FW_C2H = 0x50,
+ HW_VAR_DL_FW_RSVD_PAGE = 0x51,
+ HW_VAR_AID = 0x52,
+ HW_VAR_HW_SEQ_ENABLE = 0x53,
+ HW_VAR_CORRECT_TSF = 0x54,
+ HW_VAR_BCN_VALID = 0x55,
+ HW_VAR_FWLPS_RF_ON = 0x56,
+ HW_VAR_DUAL_TSF_RST = 0x57,
+ HW_VAR_SWITCH_EPHY_WoWLAN = 0x58,
+ HW_VAR_INT_MIGRATION = 0x59,
+ HW_VAR_INT_AC = 0x5a,
+ HW_VAR_RF_TIMING = 0x5b,
+
+ HAL_DEF_WOWLAN = 0x5c,
+ HW_VAR_MRC = 0x5d,
+ HW_VAR_KEEP_ALIVE = 0x5e,
+ HW_VAR_NAV_UPPER = 0x5f,
+
+ HW_VAR_MGT_FILTER = 0x60,
+ HW_VAR_CTRL_FILTER = 0x61,
+ HW_VAR_DATA_FILTER = 0x62,
};
enum rt_media_status {
return ret;
}
-#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
-
static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
- struct wl18xx_priv_conf *priv_conf)
+ struct wl18xx_priv_conf *priv_conf,
+ const char *file)
{
struct wlcore_conf_file *conf_file;
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL18XX_CONF_FILE_NAME, dev);
+ ret = request_firmware(&fw, file, dev);
if (ret < 0) {
wl1271_error("could not get configuration binary %s: %d",
- WL18XX_CONF_FILE_NAME, ret);
+ file, ret);
return ret;
}
if (fw->size != WL18XX_CONF_SIZE) {
- wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
- WL18XX_CONF_SIZE, fw->size);
+ wl1271_error("%s configuration binary size is wrong, expected %zu got %zu",
+ file, WL18XX_CONF_SIZE, fw->size);
ret = -EINVAL;
goto out_release;
}
static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
{
+ struct platform_device *pdev = wl->pdev;
+ struct wlcore_platdev_data *pdata = dev_get_platdata(&pdev->dev);
struct wl18xx_priv *priv = wl->priv;
- if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
+ if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf,
+ pdata->family->cfg_name) < 0) {
wl1271_warning("falling back to default config");
/* apply driver default configuration */
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
-MODULE_FIRMWARE(WL18XX_CONF_FILE_NAME);
int wlcore_boot_upload_nvs(struct wl1271 *wl)
{
+ struct platform_device *pdev = wl->pdev;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
+ const char *nvs_name = "unknown";
size_t nvs_len, burst_len;
int i;
u32 dest_addr, val;
return -ENODEV;
}
+ if (pdev_data && pdev_data->family)
+ nvs_name = pdev_data->family->nvs_name;
+
if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
struct wl1271_nvs_file *nvs =
(struct wl1271_nvs_file *)wl->nvs;
if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
(wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
wl->enable_11a)) {
- wl1271_error("nvs size is not as expected: %zu != %zu",
- wl->nvs_len, sizeof(struct wl1271_nvs_file));
+ wl1271_error("%s size is not as expected: %zu != %zu",
+ nvs_name, wl->nvs_len,
+ sizeof(struct wl1271_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
wl->nvs_len = 0;
if (nvs->general_params.dual_mode_select)
wl->enable_11a = true;
} else {
- wl1271_error("nvs size is not as expected: %zu != %zu",
- wl->nvs_len,
+ wl1271_error("%s size is not as expected: %zu != %zu",
+ nvs_name, wl->nvs_len,
sizeof(struct wl128x_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
goto out;
}
wl->nvs_len = fw->size;
- } else {
+ } else if (pdev_data->family->nvs_name) {
wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
- WL12XX_NVS_NAME);
+ pdev_data->family->nvs_name);
+ wl->nvs = NULL;
+ wl->nvs_len = 0;
+ } else {
wl->nvs = NULL;
wl->nvs_len = 0;
}
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
{
- int ret;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
+ const char *nvs_name;
+ int ret = 0;
- if (!wl->ops || !wl->ptable)
+ if (!wl->ops || !wl->ptable || !pdev_data)
return -EINVAL;
wl->dev = &pdev->dev;
wl->pdev = pdev;
platform_set_drvdata(pdev, wl);
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
- wl, wlcore_nvs_cb);
- if (ret < 0) {
- wl1271_error("request_firmware_nowait failed: %d", ret);
- complete_all(&wl->nvs_loading_complete);
+ if (pdev_data->family && pdev_data->family->nvs_name) {
+ nvs_name = pdev_data->family->nvs_name;
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ nvs_name, &pdev->dev, GFP_KERNEL,
+ wl, wlcore_nvs_cb);
+ if (ret < 0) {
+ wl1271_error("request_firmware_nowait failed for %s: %d",
+ nvs_name, ret);
+ complete_all(&wl->nvs_loading_complete);
+ }
+ } else {
+ wlcore_nvs_cb(NULL, wl);
}
return ret;
int wlcore_remove(struct platform_device *pdev)
{
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl1271 *wl = platform_get_drvdata(pdev);
- wait_for_completion(&wl->nvs_loading_complete);
+ if (pdev_data->family && pdev_data->family->nvs_name)
+ wait_for_completion(&wl->nvs_loading_complete);
if (!wl->initialized)
return 0;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL12XX_NVS_NAME);
};
#ifdef CONFIG_OF
+
+static const struct wilink_family_data wl127x_data = {
+ .name = "wl127x",
+ .nvs_name = "ti-connectivity/wl127x-nvs.bin",
+};
+
+static const struct wilink_family_data wl128x_data = {
+ .name = "wl128x",
+ .nvs_name = "ti-connectivity/wl128x-nvs.bin",
+};
+
+static const struct wilink_family_data wl18xx_data = {
+ .name = "wl18xx",
+ .cfg_name = "ti-connectivity/wl18xx-conf.bin",
+};
+
static const struct of_device_id wlcore_sdio_of_match_table[] = {
- { .compatible = "ti,wl1271" },
- { .compatible = "ti,wl1273" },
- { .compatible = "ti,wl1281" },
- { .compatible = "ti,wl1283" },
- { .compatible = "ti,wl1801" },
- { .compatible = "ti,wl1805" },
- { .compatible = "ti,wl1807" },
- { .compatible = "ti,wl1831" },
- { .compatible = "ti,wl1835" },
- { .compatible = "ti,wl1837" },
+ { .compatible = "ti,wl1271", .data = &wl127x_data },
+ { .compatible = "ti,wl1273", .data = &wl127x_data },
+ { .compatible = "ti,wl1281", .data = &wl128x_data },
+ { .compatible = "ti,wl1283", .data = &wl128x_data },
+ { .compatible = "ti,wl1801", .data = &wl18xx_data },
+ { .compatible = "ti,wl1805", .data = &wl18xx_data },
+ { .compatible = "ti,wl1807", .data = &wl18xx_data },
+ { .compatible = "ti,wl1831", .data = &wl18xx_data },
+ { .compatible = "ti,wl1835", .data = &wl18xx_data },
+ { .compatible = "ti,wl1837", .data = &wl18xx_data },
{ }
};
struct wlcore_platdev_data *pdev_data)
{
struct device_node *np = dev->of_node;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(wlcore_sdio_of_match_table, np);
+ if (!of_id)
+ return -ENODEV;
- if (!np || !of_match_node(wlcore_sdio_of_match_table, np))
- return -ENODATA;
+ pdev_data->family = of_id->data;
*irq = irq_of_parse_and_map(np, 0);
if (!*irq) {
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wlcore_platdev_data pdev_data;
+ struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
if (func->num != 0x02)
return -ENODEV;
- memset(&pdev_data, 0x00, sizeof(pdev_data));
- pdev_data.if_ops = &sdio_ops;
+ pdev_data = devm_kzalloc(&func->dev, sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ return -ENOMEM;
- glue = kzalloc(sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&func->dev, "can't allocate glue\n");
- goto out;
- }
+ pdev_data->if_ops = &sdio_ops;
+
+ glue = devm_kzalloc(&func->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
glue->dev = &func->dev;
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+ ret = wlcore_probe_of(&func->dev, &irq, pdev_data);
if (ret)
- goto out_free_glue;
+ goto out;
/* if sdio can keep power while host is suspended, enable wow */
mmcflags = sdio_get_host_pm_caps(func);
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- pdev_data.pwr_in_suspend = true;
+ pdev_data->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
- goto out_free_glue;
+ goto out;
}
glue->core->dev.parent = &func->dev;
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, &pdev_data,
- sizeof(pdev_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
out_dev_put:
platform_device_put(glue->core);
-out_free_glue:
- kfree(glue);
-
out:
return ret;
}
#define WSPI_MAX_NUM_OF_CHUNKS \
((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
-
-struct wilink_familiy_data {
- char name[8];
+static const struct wilink_family_data wl127x_data = {
+ .name = "wl127x",
+ .nvs_name = "ti-connectivity/wl127x-nvs.bin",
};
-static const struct wilink_familiy_data *wilink_data;
-
-static const struct wilink_familiy_data wl18xx_data = {
- .name = "wl18xx",
+static const struct wilink_family_data wl128x_data = {
+ .name = "wl128x",
+ .nvs_name = "ti-connectivity/wl128x-nvs.bin",
};
-static const struct wilink_familiy_data wl12xx_data = {
- .name = "wl12xx",
+static const struct wilink_family_data wl18xx_data = {
+ .name = "wl18xx",
+ .cfg_name = "ti-connectivity/wl18xx-conf.bin",
};
struct wl12xx_spi_glue {
};
static const struct of_device_id wlcore_spi_of_match_table[] = {
- { .compatible = "ti,wl1271", .data = &wl12xx_data},
- { .compatible = "ti,wl1273", .data = &wl12xx_data},
- { .compatible = "ti,wl1281", .data = &wl12xx_data},
- { .compatible = "ti,wl1283", .data = &wl12xx_data},
+ { .compatible = "ti,wl1271", .data = &wl127x_data},
+ { .compatible = "ti,wl1273", .data = &wl127x_data},
+ { .compatible = "ti,wl1281", .data = &wl128x_data},
+ { .compatible = "ti,wl1283", .data = &wl128x_data},
{ .compatible = "ti,wl1801", .data = &wl18xx_data},
{ .compatible = "ti,wl1805", .data = &wl18xx_data},
{ .compatible = "ti,wl1807", .data = &wl18xx_data},
if (!of_id)
return -ENODEV;
- wilink_data = of_id->data;
- dev_info(&spi->dev, "selected chip familiy is %s\n",
- wilink_data->name);
+ pdev_data->family = of_id->data;
+ dev_info(&spi->dev, "selected chip family is %s\n",
+ pdev_data->family->name);
if (of_find_property(dt_node, "clock-xtal", NULL))
pdev_data->ref_clock_xtal = true;
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wlcore_platdev_data pdev_data;
+ struct wlcore_platdev_data *pdev_data;
struct resource res[1];
int ret;
- memset(&pdev_data, 0x00, sizeof(pdev_data));
+ pdev_data = devm_kzalloc(&spi->dev, sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ return -ENOMEM;
- pdev_data.if_ops = &spi_ops;
+ pdev_data->if_ops = &spi_ops;
glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
return PTR_ERR(glue->reg);
}
- ret = wlcore_probe_of(spi, glue, &pdev_data);
+ ret = wlcore_probe_of(spi, glue, pdev_data);
if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
return ret;
}
- glue->core = platform_device_alloc(wilink_data->name,
+ glue->core = platform_device_alloc(pdev_data->family->name,
PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, &pdev_data,
- sizeof(pdev_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
#include "conf.h"
#include "ini.h"
-/*
- * wl127x and wl128x are using the same NVS file name. However, the
- * ini parameters between them are different. The driver validates
- * the correct NVS size in wl1271_boot_upload_nvs().
- */
-#define WL12XX_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
+struct wilink_family_data {
+ const char *name;
+ const char *nvs_name; /* wl12xx nvs file */
+ const char *cfg_name; /* wl18xx cfg file */
+};
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
struct wlcore_platdev_data {
struct wl1271_if_operations *if_ops;
+ const struct wilink_family_data *family;
bool ref_clock_xtal; /* specify whether the clock is XTAL or not */
u32 ref_clock_freq; /* in Hertz */