and
- phy-handle: See ethernet.txt file in the same directory.
+ - phy-mode: See ethernet.txt file in the same directory.
or
If this option is enabled, the connection tracking code will
provide userspace with connection tracking events via ctnetlink.
-nf_conntrack_events_retry_timeout - INTEGER (seconds)
- default 15
-
- This option is only relevant when "reliable connection tracking
- events" are used. Normally, ctnetlink is "lossy", that is,
- events are normally dropped when userspace listeners can't keep up.
-
- Userspace can request "reliable event mode". When this mode is
- active, the conntrack will only be destroyed after the event was
- delivered. If event delivery fails, the kernel periodically
- re-tries to send the event to userspace.
-
- This is the maximum interval the kernel should use when re-trying
- to deliver the destroy event.
-
- A higher number means there will be fewer delivery retries and it
- will take longer for a backlog to be processed.
-
nf_conntrack_expect_max - INTEGER
Maximum size of expectation table. Default value is
nf_conntrack_buckets / 256. Minimum is 1.
F: drivers/net/ethernet/broadcom/genet/
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Dept-HSGLinuxNICDev@qlogic.com
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Harish Patil <harish.patil@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2.*
F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <ariel.elior@qlogic.com>
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <ariel.elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/brocade/bna/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
-M: Manish Chopra <manish.chopra@qlogic.com>
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Rajesh Borundia <rajesh.borundia@qlogic.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Rahul Verma <rahul.verma@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
-W: http://www.qlogic.com
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
-M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
-M: Ron Mercer <ron.mercer@qlogic.com>
-M: linux-driver@qlogic.com
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Dept-GELinuxNICDev@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@qlogic.com>
-M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
-M: Dept-GELinuxNICDev@qlogic.com
-M: linux-driver@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@qlogic.com>
-M: Ariel Elior <Ariel.Elior@qlogic.com>
-M: everest-linux-l2@qlogic.com
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
return 0;
}
+static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ /* For a kernel about to be kexec'd we want to keep the GPHY on for a
+ * successful MDIO bus scan to occur. If we did turn off the GPHY
+ * before (e.g: port_disable), this will also power it back on.
+ *
+ * Do not rely on kexec_in_progress, just power the PHY on.
+ */
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
.remove = bcm_sf2_sw_remove,
+ .shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
.of_match_table = bcm_sf2_of_match,
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connecton failed\n");
+ dev_err(bgmac->dev, "PHY connection failed\n");
return PTR_ERR(phy_dev);
}
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
- bp->cyclecounter.shift = 1;
+ bp->cyclecounter.shift = 0;
bp->cyclecounter.mult = 1;
}
}
static int alloc_uld_rxqs(struct adapter *adap,
- struct sge_uld_rxq_info *rxq_info,
- unsigned int nq, unsigned int offset, bool lro)
+ struct sge_uld_rxq_info *rxq_info, bool lro)
{
struct sge *s = &adap->sge;
- struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
- unsigned short *ids = rxq_info->rspq_id + offset;
- unsigned int per_chan = nq / adap->params.nports;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
unsigned int bmap_idx = 0;
- int i, err, msi_idx;
+ unsigned int per_chan;
+ int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
if (adap->flags & USING_MSIX)
msi_idx = 1;
msi_idx = -((int)s->intrq.abs_id + 1);
for (i = 0; i < nq; i++, q++) {
+ if (i == rxq_info->nrxq) {
+ /* start allocation of concentrator queues */
+ per_chan = rxq_info->nciq / adap->params.nports;
+ que_idx = 0;
+ }
+
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
+ adap->port[que_idx++ / per_chan],
msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
if (err)
goto freeout;
if (msi_idx >= 0)
- rxq_info->msix_tbl[i + offset] = bmap_idx;
+ rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
}
return 0;
freeout:
- q = rxq_info->uldrxq + offset;
+ q = rxq_info->uldrxq;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
}
-
- /* We need to free rxq also in case of ciq allocation failure */
- if (offset) {
- q = rxq_info->uldrxq + offset;
- for ( ; i; i--, q++) {
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
- }
- }
return err;
}
return -ENOMEM;
}
- ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
- !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
- rxq_info->nrxq, lro));
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE &&
skb_put(skb, pkt_len - 4);
data = skb->data;
+ if (!is_copybreak && need_swap)
+ swap_buffer(data, pkt_len);
+
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC)
data = skb_pull_inline(skb, 2);
#endif
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
-
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
if (fep->bufdesc_ex)
spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
+static void ibmvnic_xport_event(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ ibmvnic_xport);
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ if (adapter->migrated) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ dev_err(dev, "Error after enable rc=%ld\n", rc);
+ adapter->migrated = false;
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc)
+ dev_err(dev, "Error sending init rc=%ld\n", rc);
+ }
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Re-enabling adapter\n");
adapter->migrated = true;
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
+ schedule_work(&adapter->ibmvnic_xport);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
netif_carrier_off(netdev);
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
+ schedule_work(&adapter->ibmvnic_xport);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
SET_NETDEV_DEV(netdev, &dev->dev);
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+ INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
spin_lock_init(&adapter->stats_lock);
u8 map_id;
struct work_struct vnic_crq_init;
+ struct work_struct ibmvnic_xport;
bool failover;
};
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static int get_phy_mode(struct mv643xx_eth_private *mp)
+{
+ struct device *dev = mp->dev->dev.parent;
+ int iface = -1;
+
+ if (dev->of_node)
+ iface = of_get_phy_mode(dev->of_node);
+
+ /* Historical default if unspecified. We could also read/write
+ * the interface state in the PSC1
+ */
+ if (iface < 0)
+ iface = PHY_INTERFACE_MODE_GMII;
+ return iface;
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
"orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!IS_ERR(phydev)) {
phy_addr_set(mp, addr);
break;
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
mp = netdev_priv(dev);
platform_set_drvdata(pdev, mp);
if (pd->phy_node) {
mp->phy = of_phy_connect(mp->dev, pd->phy_node,
mv643xx_eth_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!mp->phy)
err = -ENODEV;
else
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
- if (eq)
- *eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
+ if (!err && eq)
+ *eq = r;
+
return err;
}
#include "mlx5_core.h"
+struct mlx5_db_pgdir {
+ struct list_head list;
+ unsigned long *bitmap;
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0.
*/
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
- bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+ pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!pgdir->bitmap) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ bitmap_fill(pgdir->bitmap, db_per_page);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
+ kfree(pgdir->bitmap);
kfree(pgdir);
return NULL;
}
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
int offset;
int i;
- i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
- if (i >= MLX5_DB_PER_PAGE)
+ i = find_first_bit(pgdir->bitmap, db_per_page);
+ if (i >= db_per_page)
return -ENOMEM;
__clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir;
db->index = i;
- offset = db->index * L1_CACHE_BYTES;
+ offset = db->index * cache_line_size();
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap);
- if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+ if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir->bitmap);
kfree(db->u.pgdir);
}
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+#define MLX5E_DEFAULT_LRO_TIMEOUT 32
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
struct ieee_ets ets;
#endif
bool rx_am_enabled;
+ u32 lro_timeout;
};
struct mlx5e_tstamp {
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
#endif /* __MLX5_EN_H__ */
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[2]));
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
}
}
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->params.lro_timeout =
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(mdev, priv);
}
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = priv->netdev;
+ unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
mutex_unlock(&esw->state_lock);
}
-static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
int table_size = 2;
int err = 0;
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->egress.acl))
- return;
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
mlx5_destroy_flow_table(acl);
+ return err;
}
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
int table_size = 4;
int err = 0;
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->ingress.acl))
- return;
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
}
kvfree(flow_group_in);
+ return err;
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
return 0;
}
- esw_vport_enable_egress_acl(esw, vport);
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
+
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
tree_add_node(&fg->node, &ft->node);
/* Add node to group list */
- list_add(&fg->node.list, ft->node.children.prev);
+ list_add(&fg->node.list, prev_fg);
return fg;
}
return ERR_PTR(-EPERM);
lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
+ fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
unlock_ref_node(&ft->node);
return fg;
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = &ft->node.children;
+ struct list_head *prev = ft->node.children.prev;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
goto err_out;
if (aging) {
+ counter->cache.lastuse = jiffies;
counter->aging = true;
spin_lock(&fc_stats->addlist_lock);
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
};
-static u8 get_nic_interface(struct mlx5_core_dev *dev)
+enum {
+ MLX5_DROP_NEW_HEALTH_WORK,
+};
+
+static u8 get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_interface(dev);
+ u8 nic_interface = get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
mlx5_disable_device(dev);
}
+static void health_recover(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct delayed_work *dwork;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u8 nic_state;
+
+ dwork = container_of(work, struct delayed_work, work);
+ health = container_of(dwork, struct mlx5_core_health, recover_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ nic_state = get_nic_state(dev);
+ if (nic_state == MLX5_NIC_IFC_INVALID) {
+ dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ return;
+ }
+
+ dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_recover_device(dev);
+}
+
+/* How much time to wait until health resetting the driver (in msecs) */
+#define MLX5_RECOVERY_DELAY_MSECS 60000
static void health_care(struct work_struct *work)
{
+ unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
+
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ schedule_delayed_work(&health->recover_work, recover_delay);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
static const char *hsynd_str(u8 synd)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- schedule_work(&health->work);
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
}
struct mlx5_core_health *health = &dev->priv.health;
init_timer(&health->timer);
+ health->sick = 0;
+ clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
del_timer_sync(&health->timer);
}
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&health->recover_work);
+ cancel_work_sync(&health->work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- flush_work(&health->work);
+ destroy_workqueue(health->wq);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
kfree(name);
-
+ if (!health->wq)
+ return -ENOMEM;
+ spin_lock_init(&health->wq_lock);
INIT_WORK(&health->work, health_care);
+ INIT_DELAYED_WORK(&health->recover_work, health_recover);
return 0;
}
struct pci_dev *pdev = dev->pdev;
int err;
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto out;
- }
-
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
mlx5_start_health_poll(dev);
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto err_stop_poll;
+ }
+
if (boot && mlx5_init_once(dev, priv)) {
dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__);
+
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- pci_save_state(pdev);
- mlx5_pci_disable_device(dev);
+ /* In case of kernel call save the pci state and drain health wq */
+ if (state) {
+ pci_save_state(pdev);
+ mlx5_drain_health_wq(dev);
+ mlx5_pci_disable_device(dev);
+ }
+
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
return PCI_ERS_RESULT_RECOVERED;
}
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+void mlx5_recover_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
+ mlx5_pci_resume(dev->pdev);
+}
+
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct page *page;
+ u64 zero_addr = 1;
u64 addr;
int err;
int nid = dev_to_node(&dev->pdev->dev);
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
}
+map:
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
- goto out_alloc;
+ goto err_mapping;
}
+
+ /* Firmware doesn't support page with physical address 0 */
+ if (addr == 0) {
+ zero_addr = addr;
+ goto map;
+ }
+
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- goto out_mapping;
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
- return 0;
-
-out_mapping:
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+err_mapping:
+ if (err)
+ __free_page(page);
-out_alloc:
- __free_page(page);
+ if (zero_addr == 0)
+ dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
return err;
}
.cmd_exec = mlxsw_pci_cmd_exec,
};
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
{
unsigned long end;
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+ }
+
wmb(); /* reset needs to be written before we read control register */
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci);
+ err = mlxsw_pci_sw_reset(mlxsw_pci, id);
if (err) {
dev_err(&pdev->dev, "Software reset failed\n");
goto err_sw_reset;
lpm_tree);
if (err)
goto err_left_struct_set;
+ memcpy(&lpm_tree->prefix_usage, prefix_usage,
+ sizeof(lpm_tree->prefix_usage));
return lpm_tree;
err_left_struct_set:
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->proto == proto &&
+ if (lpm_tree->ref_count != 0 &&
+ lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage))
goto inc_ref_count;
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
if (mlxsw_sp->router.aborted)
- return 0;
+ return;
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
- if (!fib_entry) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
- return -ENOENT;
- }
+ if (!fib_entry)
+ return;
if (fib_entry->ref_count == 1) {
mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
}
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return 0;
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
if (err)
return err;
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_set:
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
port_not_usable:
if (!dcbx_info)
return -ENOMEM;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
kfree(dcbx_info);
if (!dcbx_info)
return NULL;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
}
}
+ cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev);
if (rc)
goto err;
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
-#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(&edev->rss_params.rss_ind_table, 0,
+ sizeof(edev->rss_params.rss_ind_table));
+ }
+
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
struct qede_dev *edev = netdev_priv(dev);
int i;
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL;
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
+ int i, rc = 0;
u8 *data_ptr;
- int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
- if (qede_has_rx_work(rxq))
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative
+ * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB,
+ * and then the CPU reads the hw_comp_cons, it will use an old
+ * CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
- usleep_range(100, 200);
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (!qede_has_rx_work(rxq)) {
+ if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ qede_update_rx_prod(edev, rxq);
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD before reading hw_comp_cons. If the CQE is read before it is
- * written by FW, then FW writes CQE and SB, and then the CPU reads the
- * hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- data_ptr = (u8 *)(page_address(sw_rx_data->data) +
- fp_cqe->placement_offset + sw_rx_data->page_offset);
- for (i = ETH_HLEN; i < len; i++)
- if (data_ptr[i] != (unsigned char)(i & 0xff)) {
- DP_NOTICE(edev, "Loopback test failed\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
- return -1;
- }
-
- qede_recycle_rx_bd_ring(rxq, edev, 1);
-
- return 0;
+ return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
nbd--;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < nbd; i++) {
return 0;
}
-static inline void qede_update_rx_prod(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
if (rocker->wops) {
if (rocker->wops->mode != mode) {
dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
- return err;
+ return -EINVAL;
}
return 0;
}
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- if (p->des0)
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(p),
- p->des0, p->des1, p->des2, p->des3);
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
}
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- return stmmac_ptp_register(priv);
+ stmmac_ptp_register(priv);
+
+ return 0;
}
static void stmmac_release_ptp(struct stmmac_priv *priv)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret)
- netdev_warn(priv->dev, "PTP support cannot init.\n");
+ netdev_warn(priv->dev, "fail to init PTP.\n");
}
#ifdef CONFIG_DEBUG_FS
* Description: this function will register the ptp clock driver
* to kernel. It also does some house keeping work.
*/
-int stmmac_ptp_register(struct stmmac_priv *priv)
+void stmmac_ptp_register(struct stmmac_priv *priv)
{
spin_lock_init(&priv->ptp_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
- int ret = PTR_ERR(priv->ptp_clock);
-
+ netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- return ret;
- }
-
- spin_lock_init(&priv->ptp_lock);
-
- netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
-
- return 0;
+ } else if (priv->ptp_clock)
+ netdev_info(priv->dev, "registered PTP clock\n");
}
/**
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- if (csum_info) {
- /* We only look at the IP checksum here.
- * Should we be dropping the packet if checksum
- * failed? How do we deal with other checksums - TCP/UDP?
- */
- if (csum_info->receive.ip_checksum_succeeded)
+
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+ /*
+ * In Linux, the IP checksum is always checked.
+ * Do L4 checksum offload if enabled and present.
+ */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
}
if (vlan_tci & VLAN_TAG_PRESENT)
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
-
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+static bool send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+
static sci_t make_sci(u8 *addr, __be16 port)
{
sci_t sci;
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
static void macsec_fill_sectag(struct macsec_eth_header *h,
- const struct macsec_secy *secy, u32 pn)
+ const struct macsec_secy *secy, u32 pn,
+ bool sci_present)
{
const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
+ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
h->eth.h_proto = htons(ETH_P_MACSEC);
- if (tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
+ if (sci_present) {
h->tci_an |= MACSEC_TCI_SC;
memcpy(&h->secure_channel_id, &secy->sci,
sizeof(h->secure_channel_id));
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
+ bool sci_present;
u32 pn;
secy = &macsec->secy;
unprotected_len = skb->len;
eth = eth_hdr(skb);
- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
+ sci_present = send_sci(secy);
+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn);
+ macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
+ int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
+ aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_SGMII 0x01
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
{
int value;
int wol_enabled;
- int ccr;
mutex_lock(&phydev->lock);
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-down SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
static int at803x_resume(struct phy_device *phydev)
{
int value;
- int ccr;
mutex_lock(&phydev->lock);
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-up SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
}
}
+static int at803x_aneg_done(struct phy_device *phydev)
+{
+ int ccr;
+
+ int aneg_done = genphy_aneg_done(phydev);
+ if (aneg_done != BMSR_ANEGCOMPLETE)
+ return aneg_done;
+
+ /*
+ * in SGMII mode, if copper side autoneg is successful,
+ * also check SGMII side autoneg result
+ */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
+ return aneg_done;
+
+ /* switch to SGMII/fiber page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+
+ /* check if the SGMII link is OK. */
+ if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
+ pr_warn("803x_aneg_done: SGMII link is not ok\n");
+ aneg_done = 0;
+ }
+ /* switch back to copper page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+ return aneg_done;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
} };
status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
- if (status < 0) {
+ if (status) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(driver_of(intf), intf);
return status;
}
}
- pp = eth_gro_receive(head, skb);
+ pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
out:
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
-static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
-{
- const struct kobject *kobj = &device_obj->device.kobj;
-
- return kobj->name;
-}
-
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
u32 prev;
int miss_counter;
bool sick;
+ /* wq spinlock to synchronize draining */
+ spinlock_t wq_lock;
struct workqueue_struct *wq;
+ unsigned long flags;
struct work_struct work;
+ struct delayed_work recover_work;
};
struct mlx5_cq_table {
int index;
};
-enum {
- MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
-};
-
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
MLX5_PTYS_EN = 1 << 2,
};
-struct mlx5_db_pgdir {
- struct list_head list;
- DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
- __be32 *db_page;
- dma_addr_t db_dma;
-};
-
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
/* Used to determine if flush_id can be ignored */
u8 is_atomic:1;
- /* 5 bit hole */
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
+ struct sk_buff *);
+static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
ldev = netdev_all_lower_get_next(dev, &(iter)))
#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
- for (iter = (dev)->all_adj_list.lower.next, \
+ for (iter = &(dev)->all_adj_list.lower, \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
ldev; \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
+#define QED_DEFAULT_RX_USECS 12
/* forward */
struct qed_dev;
/**
* skb_fclone_busy - check if fclone is busy
+ * @sk: socket
* @skb: buffer
*
* Returns true if skb is a fast clone, and its clone is not freed.
const struct in6_addr *addr);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+void __ipv6_sock_mc_close(struct sock *sk);
void ipv6_sock_mc_close(struct sock *sk);
bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
const struct in6_addr *src_addr);
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
- ip_cmsg_recv_offset(msg, skb, 0);
+ ip_cmsg_recv_offset(msg, skb, 0, 0);
}
bool icmp_global_allow(void);
rwlock_t tb6_lock;
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
+ unsigned int flags;
+#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
* Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
+ * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_backlog_rcv: callback to process the backlog
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
- */
+ * @sk_rcu: used during RCU grace period
+ */
struct sock {
/*
* Now struct inet_timewait_sock also uses sock_common, so please just
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
#define RTNH_F_OFFLOAD 8 /* offloaded route */
#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
/* Macros to handle hexthops */
{ },
INTERNAL,
{ 0x34 },
- { { 1, 0xbef } },
+ { { ETH_HLEN, 0xbef } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
},
/*
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
batadv_softif_destroy_sysfs(hard_iface->soft_iface);
}
- hard_iface->soft_iface = NULL;
batadv_hardif_put(hard_iface);
out:
BATADV_DBG_NC = BIT(5),
BATADV_DBG_MCAST = BIT(6),
BATADV_DBG_TP_METER = BIT(7),
- BATADV_DBG_ALL = 127,
+ BATADV_DBG_ALL = 255,
};
#ifdef CONFIG_BATMAN_ADV_DEBUG
if (bat_priv->algo_ops->neigh.hardif_init)
bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
- hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
+ hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
out:
spin_unlock_bh(&hard_iface->neigh_list_lock);
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
-static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
- size_t complete_len;
size_t short_len;
- int max_len;
-
- max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
- complete_len = strlen(hdev->dev_name);
- short_len = strlen(hdev->short_name);
-
- /* no space left for name */
- if (max_len < 1)
- return ad_len;
+ size_t complete_len;
- /* no name set */
- if (!complete_len)
+ /* no space left for name (+ NULL + type + len) */
+ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
return ad_len;
- /* complete name fits and is eq to max short name len or smaller */
- if (complete_len <= max_len &&
- complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
+ /* use complete name if present and fits */
+ complete_len = strlen(hdev->dev_name);
+ if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len);
- }
+ hdev->dev_name, complete_len + 1);
- /* short name set and fits */
- if (short_len && short_len <= max_len) {
+ /* use short name if present */
+ short_len = strlen(hdev->short_name);
+ if (short_len)
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->short_name, short_len);
- }
+ hdev->short_name, short_len + 1);
- /* no short name set so shorten complete name */
- if (!short_len) {
- return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->dev_name, max_len);
+ /* use shortened full name if present, we already know that name
+ * is longer then HCI_MAX_SHORT_NAME_LENGTH
+ */
+ if (complete_len) {
+ u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
+
+ memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
+ name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
+
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
+ sizeof(name));
}
return ad_len;
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
+
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
u8 *data, u8 data_len)
{
return err;
}
-static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
+static u8 calculate_name_len(struct hci_dev *hdev)
+{
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+
+ return append_local_name(hdev, buf, 0);
+}
+
+static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
+ bool is_adv_data)
{
u8 max_len = HCI_MAX_AD_LENGTH;
if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
max_len -= 3;
} else {
- /* at least 1 byte of name should fit in */
if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
- max_len -= 3;
+ max_len -= calculate_name_len(hdev);
if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
max_len -= 4;
return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
}
-static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
+static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
+ u8 len, bool is_adv_data)
{
int i, cur_len;
u8 max_len;
- max_len = tlv_data_max_len(adv_flags, is_adv_data);
+ max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
if (len > max_len)
return false;
goto unlock;
}
- if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
- !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
+ if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
+ !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
cp->scan_rsp_len, false)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
rp.instance = cp->instance;
rp.flags = cp->flags;
- rp.max_adv_data_len = tlv_data_max_len(flags, true);
- rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
+ rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
+ rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
}
return head;
}
+EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
NAPI_GRO_CB(skb)->is_fou = 0;
NAPI_GRO_CB(skb)->is_atomic = 1;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
{
struct netdev_adjacent *lower;
- lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
- struct netdev_adjacent, list);
+ lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+ if (&lower->list == &dev->all_adj_list.lower)
+ return NULL;
+
+ *iter = &lower->list;
- return lower ? lower->dev : NULL;
+ return lower->dev;
}
EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
+ struct vlan_hdr _vlan;
+ bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
- if (skb_vlan_tag_present(skb))
+ if (vlan_tag_present)
proto = skb->protocol;
- if (!skb_vlan_tag_present(skb) ||
- proto == cpu_to_be16(ETH_P_8021Q) ||
- proto == cpu_to_be16(ETH_P_8021AD)) {
- struct vlan_hdr _vlan;
-
+ if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
data, hlen, &_vlan);
if (!vlan)
FLOW_DISSECTOR_KEY_VLAN,
target_container);
- if (skb_vlan_tag_present(skb)) {
+ if (vlan_tag_present) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
key_vlan->vlan_priority =
(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
+ unsigned long flags;
bool alloc;
int id;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id;
/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
+ unsigned long flags;
int id;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
id = __peernet2id(net, peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
return id;
}
EXPORT_SYMBOL(peernet2id);
struct net *get_net_ns_by_id(struct net *net, int id)
{
+ unsigned long flags;
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock();
return peer;
for_each_net(tmp) {
int id;
- spin_lock_bh(&tmp->nsid_lock);
+ spin_lock_irq(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
+ spin_unlock_irq(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irq(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irq(&net->nsid_lock);
}
rtnl_unlock();
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ unsigned long flags;
struct net *peer;
int nsid, err;
if (IS_ERR(peer))
return PTR_ERR(peer);
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
err = -EEXIST;
goto out;
}
err = alloc_netid(net, peer, nsid);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
.idx = 0,
.s_idx = cb->args[0],
};
+ unsigned long flags;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx;
return skb->len;
skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
}
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
__wsum csum = skb->csum;
return;
if (offset != 0)
- csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
- offset, 0));
+ csum = csum_sub(csum,
+ csum_partial(skb_transport_header(skb) + tlen,
+ offset, 0));
put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
}
}
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned int flags = inet->cmsg_flags;
}
if (flags & IP_CMSG_CHECKSUM)
- ip_cmsg_recv_checksum(msg, skb, offset);
+ ip_cmsg_recv_checksum(msg, skb, tlen, offset);
}
EXPORT_SYMBOL(ip_cmsg_recv_offset);
.init = ping_init_sock,
.close = ping_close,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.sendmsg = ping_v4_sendmsg,
.close = raw_close,
.destroy = raw_destroy,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
.setsockopt = raw_setsockopt,
container_of(table->data, struct net, ipv4.ping_group_range.range);
unsigned int seq;
do {
- seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
+ seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
*low = data[0];
*high = data[1];
- } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
+ } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
}
/* Update system visible IP port range */
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
- write_seqlock(&net->ipv4.ip_local_ports.lock);
+ write_seqlock(&net->ipv4.ping_group_range.lock);
data[0] = low;
data[1] = high;
- write_sequnlock(&net->ipv4.ip_local_ports.lock);
+ write_sequnlock(&net->ipv4.ping_group_range.lock);
}
/* Validate changes from /proc interface. */
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
-EXPORT_SYMBOL(sysctl_tcp_low_latency);
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
- ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
+ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
err = copied;
if (flags & MSG_TRUNC)
goto try_again;
}
-int udp_disconnect(struct sock *sk, int flags)
+int __udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
sk_dst_reset(sk);
return 0;
}
+EXPORT_SYMBOL(__udp_disconnect);
+
+int udp_disconnect(struct sock *sk, int flags)
+{
+ lock_sock(sk);
+ __udp_disconnect(sk, flags);
+ release_sock(sk);
+ return 0;
+}
EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
sk->sk_err = err;
sk->sk_error_report(sk);
- udp_disconnect(sk, 0);
+ __udp_disconnect(sk, 0);
release_sock(sk);
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
- pp = udp_sk(sk)->gro_receive(sk, head, skb);
+ pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
out_unlock:
rcu_read_unlock();
u32 addr_flags;
unsigned long now = jiffies;
long max_desync_factor;
+ s32 cnf_temp_preferred_lft;
write_lock_bh(&idev->lock);
if (ift) {
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
*/
+ cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
max_desync_factor = min_t(__u32,
idev->cnf.max_desync_factor,
- idev->cnf.temp_prefered_lft - regen_advance);
+ cnf_temp_preferred_lft - regen_advance);
if (unlikely(idev->desync_factor > max_desync_factor)) {
if (max_desync_factor > 0) {
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
- tmp_prefered_lft = idev->cnf.temp_prefered_lft + age -
+ tmp_prefered_lft = cnf_temp_preferred_lft + age -
idev->desync_factor;
- /* guard against underflow in case of concurrent updates to cnf */
- if (unlikely(tmp_prefered_lft < 0))
- tmp_prefered_lft = 0;
tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
tmp_plen = ifp->prefix_len;
tmp_tstamp = ifp->tstamp;
int inet6_hash(struct sock *sk)
{
+ int err = 0;
+
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
+ err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
local_bh_enable();
}
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(inet6_hash);
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
if (err)
return err;
+ skb->protocol = htons(ETH_P_IPV6);
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
static bool setsockopt_needs_rtnl(int optname)
{
switch (optname) {
+ case IPV6_ADDRFORM:
case IPV6_ADD_MEMBERSHIP:
case IPV6_DROP_MEMBERSHIP:
case IPV6_JOIN_ANYCAST:
}
fl6_free_socklist(sk);
- ipv6_sock_mc_close(sk);
+ __ipv6_sock_mc_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so
return idev;
}
-void ipv6_sock_mc_close(struct sock *sk)
+void __ipv6_sock_mc_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc_lst;
struct net *net = sock_net(sk);
- if (!rcu_access_pointer(np->ipv6_mc_list))
- return;
+ ASSERT_RTNL();
- rtnl_lock();
while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
struct net_device *dev;
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
-
}
+}
+
+void ipv6_sock_mc_close(struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (!rcu_access_pointer(np->ipv6_mc_list))
+ return;
+ rtnl_lock();
+ __ipv6_sock_mc_close(sk);
rtnl_unlock();
}
.init = ping_init_sock,
.close = ping_close,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.sendmsg = ping_v6_sendmsg,
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
memmove(head->head + sizeof(struct frag_hdr), head->head,
(head->data - head->head) - sizeof(struct frag_hdr));
- head->mac_header += sizeof(struct frag_hdr);
+ if (skb_mac_header_was_set(head))
+ head->mac_header += sizeof(struct frag_hdr);
head->network_header += sizeof(struct frag_hdr);
skb_reset_transport_header(head);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex);
+ const struct in6_addr *gwaddr,
+ struct net_device *dev);
#endif
struct uncached_list {
struct net_device *dev = rt->dst.dev;
if (dev && !netif_carrier_ok(dev) &&
- idev->cnf.ignore_routes_with_linkdown)
+ idev->cnf.ignore_routes_with_linkdown &&
+ !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
goto out;
if (rt6_check_expired(rt))
rt = rt6_get_dflt_router(gwaddr, dev);
else
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
- gwaddr, dev->ifindex);
+ gwaddr, dev);
if (rt && !lifetime) {
ip6_del_rt(rt);
}
if (!rt && lifetime)
- rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
- pref);
+ rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+ dev, pref);
else if (rt)
rt->rt6i_flags = RTF_ROUTEINFO |
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
int strict = 0;
strict |= flags & RT6_LOOKUP_F_IFACE;
+ strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
if (net->ipv6.devconf_all->forwarding == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = RT6_LOOKUP_F_IFACE;
+ int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
table = fib6_get_table(net, cfg->fc_table);
if (!table)
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex)
+ const struct in6_addr *gwaddr,
+ struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+ int ifindex = dev->ifindex;
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(net, RT6_TABLE_INFO);
+ table = fib6_get_table(net, tb_id);
if (!table)
return NULL;
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref)
{
struct fib6_config cfg = {
.fc_metric = IP6_RT_PRIO_USER,
- .fc_ifindex = ifindex,
+ .fc_ifindex = dev->ifindex,
.fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref),
.fc_nlinfo.nl_net = net,
};
- cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
+ cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
ip6_route_add(&cfg);
- return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+ return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
}
#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
struct rt6_info *rt;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+ table = fib6_get_table(dev_net(dev), tb_id);
if (!table)
return NULL;
cfg.fc_gateway = *gwaddr;
- ip6_route_add(&cfg);
+ if (!ip6_route_add(&cfg)) {
+ struct fib6_table *table;
+
+ table = fib6_get_table(dev_net(dev), cfg.fc_table);
+ if (table)
+ table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
+ }
return rt6_get_dflt_router(gwaddr, dev);
}
-void rt6_purge_dflt_routers(struct net *net)
+static void __rt6_purge_dflt_routers(struct fib6_table *table)
{
struct rt6_info *rt;
- struct fib6_table *table;
-
- /* NOTE: Keep consistent with rt6_get_dflt_router */
- table = fib6_get_table(net, RT6_TABLE_DFLT);
- if (!table)
- return;
restart:
read_lock_bh(&table->tb6_lock);
}
}
read_unlock_bh(&table->tb6_lock);
+
+ table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+}
+
+void rt6_purge_dflt_routers(struct net *net)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
+ __rt6_purge_dflt_routers(table);
+ }
+ }
+
+ rcu_read_unlock();
}
static void rtmsg_to_fib6_config(struct net *net,
if (is_udp4) {
if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
+ ip_cmsg_recv_offset(msg, skb,
+ sizeof(struct udphdr), off);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
ncsi_dev_state_config_gls,
ncsi_dev_state_config_done,
ncsi_dev_state_suspend_select = 0x0401,
+ ncsi_dev_state_suspend_gls,
ncsi_dev_state_suspend_dcnt,
ncsi_dev_state_suspend_dc,
ncsi_dev_state_suspend_deselect,
#endif
unsigned int package_num; /* Number of packages */
struct list_head packages; /* List of packages */
+ struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
unsigned int request_id; /* Last used request ID */
#define NCSI_REQ_START_IDX 1
return -ENODEV;
/* If the channel is active one, we need reconfigure it */
+ spin_lock_irqsave(&nc->lock, flags);
ncm = &nc->modes[NCSI_MODE_LINK];
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_ACTIVE ||
- (ncm->data[3] & 0x1))
+ nc->state != NCSI_CHANNEL_ACTIVE) {
+ spin_unlock_irqrestore(&nc->lock, flags);
return 0;
+ }
- if (ndp->flags & NCSI_DEV_HWA)
+ spin_unlock_irqrestore(&nc->lock, flags);
+ if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
ndp->flags |= NCSI_DEV_RESHUFFLE;
/* If this channel is the active one and the link doesn't
* work, we have to choose another channel to be active one.
* The logic here is exactly similar to what we do when link
* is down on the active channel.
+ *
+ * On the other hand, we need configure it when host driver
+ * state on the active channel becomes ready.
*/
ncsi_stop_channel_monitor(nc);
+
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
+ NCSI_CHANNEL_ACTIVE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
nd->state = ncsi_dev_state_suspend_select;
/* Fall through */
case ncsi_dev_state_suspend_select:
- case ncsi_dev_state_suspend_dcnt:
- case ncsi_dev_state_suspend_dc:
- case ncsi_dev_state_suspend_deselect:
ndp->pending_req_num = 1;
- np = ndp->active_package;
- nc = ndp->active_channel;
+ nca.type = NCSI_PKT_CMD_SP;
nca.package = np->id;
- if (nd->state == ncsi_dev_state_suspend_select) {
- nca.type = NCSI_PKT_CMD_SP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- if (ndp->flags & NCSI_DEV_HWA)
- nca.bytes[0] = 0;
- else
- nca.bytes[0] = 1;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+ if (ndp->flags & NCSI_DEV_HWA)
+ nca.bytes[0] = 0;
+ else
+ nca.bytes[0] = 1;
+
+ /* To retrieve the last link states of channels in current
+ * package when current active channel needs fail over to
+ * another one. It means we will possibly select another
+ * channel as next active one. The link states of channels
+ * are most important factor of the selection. So we need
+ * accurate link states. Unfortunately, the link states on
+ * inactive channels can't be updated with LSC AEN in time.
+ */
+ if (ndp->flags & NCSI_DEV_RESHUFFLE)
+ nd->state = ncsi_dev_state_suspend_gls;
+ else
nd->state = ncsi_dev_state_suspend_dcnt;
- } else if (nd->state == ncsi_dev_state_suspend_dcnt) {
- nca.type = NCSI_PKT_CMD_DCNT;
- nca.channel = nc->id;
- nd->state = ncsi_dev_state_suspend_dc;
- } else if (nd->state == ncsi_dev_state_suspend_dc) {
- nca.type = NCSI_PKT_CMD_DC;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_gls:
+ ndp->pending_req_num = np->channel_num;
+
+ nca.type = NCSI_PKT_CMD_GLS;
+ nca.package = np->id;
+
+ nd->state = ncsi_dev_state_suspend_dcnt;
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
nca.channel = nc->id;
- nca.bytes[0] = 1;
- nd->state = ncsi_dev_state_suspend_deselect;
- } else if (nd->state == ncsi_dev_state_suspend_deselect) {
- nca.type = NCSI_PKT_CMD_DP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
}
+ break;
+ case ncsi_dev_state_suspend_dcnt:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DCNT;
+ nca.package = np->id;
+ nca.channel = nc->id;
+
+ nd->state = ncsi_dev_state_suspend_dc;
ret = ncsi_xmit_cmd(&nca);
- if (ret) {
- nd->state = ncsi_dev_state_functional;
- return;
- }
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_dc:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DC;
+ nca.package = np->id;
+ nca.channel = nc->id;
+ nca.bytes[0] = 1;
+
+ nd->state = ncsi_dev_state_suspend_deselect;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_deselect:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DP;
+ nca.package = np->id;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+
+ nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
break;
case ncsi_dev_state_suspend_done:
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
nd->state);
}
+
+ return;
+error:
+ nd->state = ncsi_dev_state_functional;
}
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
struct net_device *dev = nd->dev;
struct ncsi_package *np = ndp->active_package;
struct ncsi_channel *nc = ndp->active_channel;
+ struct ncsi_channel *hot_nc = NULL;
struct ncsi_cmd_arg nca;
unsigned char index;
unsigned long flags;
break;
case ncsi_dev_state_config_done:
spin_lock_irqsave(&nc->lock, flags);
- if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
+ if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
+ hot_nc = nc;
nc->state = NCSI_CHANNEL_ACTIVE;
- else
+ } else {
+ hot_nc = NULL;
nc->state = NCSI_CHANNEL_INACTIVE;
+ }
spin_unlock_irqrestore(&nc->lock, flags);
+ /* Update the hot channel */
+ spin_lock_irqsave(&ndp->lock, flags);
+ ndp->hot_channel = hot_nc;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
ncsi_start_channel_monitor(nc);
ncsi_process_next_channel(ndp);
break;
static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_package *np;
- struct ncsi_channel *nc, *found;
+ struct ncsi_channel *nc, *found, *hot_nc;
struct ncsi_channel_mode *ncm;
unsigned long flags;
+ spin_lock_irqsave(&ndp->lock, flags);
+ hot_nc = ndp->hot_channel;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
/* The search is done once an inactive channel with up
* link is found.
*/
if (!found)
found = nc;
+ if (nc == hot_nc)
+ found = nc;
+
ncm = &nc->modes[NCSI_MODE_LINK];
if (ncm->data[2] & 0x1) {
spin_unlock_irqrestore(&nc->lock, flags);
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- int err;
-
- RCU_INIT_POINTER(state->hook_entries, entry);
- err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
- goto next_hook;
- kfree_skb(skb);
- }
+ ret = nf_queue(skb, state, &entry, verdict);
+ if (ret == 1 && entry)
+ goto next_hook;
}
return ret;
}
return;
ratio = scanned ? expired_count * 100 / scanned : 0;
- if (ratio >= 90)
+ if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
next_run = 0;
gc_work->last_bucket = i;
/* nf_queue.c */
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
- unsigned int queuenum);
+ struct nf_hook_entry **entryp, unsigned int verdict);
void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
int __init netfilter_queue_init(void);
rcu_read_unlock();
}
-/*
- * Any packet that leaves via this function must come back
- * through nf_reinject().
- */
-int nf_queue(struct sk_buff *skb,
- struct nf_hook_state *state,
- unsigned int queuenum)
+static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
+ unsigned int queuenum)
{
int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
return status;
}
+/* Packets leaving via this function must come back through nf_reinject(). */
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+ struct nf_hook_entry **entryp, unsigned int verdict)
+{
+ struct nf_hook_entry *entry = *entryp;
+ int ret;
+
+ RCU_INIT_POINTER(state->hook_entries, entry);
+ ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
+ if (ret < 0) {
+ if (ret == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
+ *entryp = rcu_dereference(entry->next);
+ return 1;
+ }
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
struct nf_hook_entry *hook_entry;
entry->state.thresh = INT_MIN;
if (verdict == NF_ACCEPT) {
- next_hook:
- verdict = nf_iterate(skb, &entry->state, &hook_entry);
+ hook_entry = rcu_dereference(hook_entry->next);
+ if (hook_entry)
+next_hook:
+ verdict = nf_iterate(skb, &entry->state, &hook_entry);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
+okfn:
local_bh_disable();
entry->state.okfn(entry->state.net, entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
- RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
- err = nf_queue(skb, &entry->state,
- verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ err = nf_queue(skb, &entry->state, &hook_entry, verdict);
+ if (err == 1) {
+ if (hook_entry)
goto next_hook;
- kfree_skb(skb);
+ goto okfn;
}
break;
case NF_STOLEN:
*/
unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
{
- int val;
+ u32 val;
val = ntohl(nla_get_be32(attr));
if (val > max)
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+ tb[NFTA_DYNSET_TIMEOUT])));
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
goto nla_put_failure;
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
+ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
+ cpu_to_be64(jiffies_to_msecs(priv->timeout)),
NFTA_DYNSET_PAD))
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- u32 offset, len, err;
+ u32 offset, len;
+ int err;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
[NFTA_HASH_LEN] = { .type = NLA_U32 },
[NFTA_HASH_MODULUS] = { .type = NLA_U32 },
[NFTA_HASH_SEED] = { .type = NLA_U32 },
+ [NFTA_HASH_OFFSET] = { .type = NLA_U32 },
};
static int nft_hash_init(const struct nft_ctx *ctx,
const struct nft_pktinfo *pkt)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
- bool mismatch;
int d1, d2;
d1 = memcmp(®s->data[priv->sreg], &priv->data_from, priv->len);
d2 = memcmp(®s->data[priv->sreg], &priv->data_to, priv->len);
switch (priv->op) {
case NFT_RANGE_EQ:
- mismatch = (d1 < 0 || d2 > 0);
+ if (d1 < 0 || d2 > 0)
+ regs->verdict.code = NFT_BREAK;
break;
case NFT_RANGE_NEQ:
- mismatch = (d1 >= 0 && d2 <= 0);
+ if (d1 >= 0 && d2 <= 0)
+ regs->verdict.code = NFT_BREAK;
break;
}
-
- if (mismatch)
- regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
struct nft_range_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc_from, desc_to;
int err;
+ u32 op;
err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
&desc_from, tb[NFTA_RANGE_FROM_DATA]);
if (err < 0)
goto err2;
- priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP]));
+ err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
+ if (err < 0)
+ goto err2;
+
+ switch (op) {
+ case NFT_RANGE_EQ:
+ case NFT_RANGE_NEQ:
+ break;
+ default:
+ err = -EINVAL;
+ goto err2;
+ }
+
+ priv->op = op;
priv->len = desc_from.len;
return 0;
err2:
if (!num_hooks)
return ERR_PTR(-EINVAL);
- ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
+ ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
+ li.u.ulog.flags = 0;
if (info->flags & XT_NFLOG_F_COPY_LEN)
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
*/
#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
-#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24))
+#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
* us the power of 2 below the theoretical max, so GCC simply does a
return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
XT_HASHLIMIT_SCALE);
} else {
- if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+ if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
* HZ * CREDITS_PER_JIFFY;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
+MODULE_ALIAS("ipt_ipcomp");
+MODULE_ALIAS("ip6t_ipcomp");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
static int packet_direct_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- netdev_features_t features;
+ struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
!netif_carrier_ok(dev)))
goto drop;
- features = netif_skb_features(skb);
- if (skb_needs_linearize(skb, features) &&
- __skb_linearize(skb))
+ skb = validate_xmit_skb_list(skb, dev);
+ if (skb != orig_skb)
goto drop;
txq = skb_get_tx_queue(dev, skb);
return ret;
drop:
atomic_long_inc(&dev->tx_dropped);
- kfree_skb(skb);
+ kfree_skb_list(skb);
return NET_XMIT_DROP;
}
if (tb[1] == NULL)
return NULL;
- if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
- nla_len(tb[1]), NULL) < 0)
+ if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0)
return NULL;
kind = tb2[TCA_ACT_KIND];
static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
u64 lastuse)
{
- tcf_lastuse_update(&a->tcfa_tm);
+ struct tcf_mirred *m = to_mirred(a);
+ struct tcf_t *tm = &m->tcf_tm;
+
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+ tm->lastuse = lastuse;
}
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
if (err == 0) {
struct tcf_proto *next = rtnl_dereference(tp->next);
- tfilter_notify(net, skb, n, tp, fh,
+ tfilter_notify(net, skb, n, tp,
+ t->tcm_handle,
RTM_DELTFILTER, false);
if (tcf_destroy(tp, false))
RCU_INIT_POINTER(*back, next);
__u8 has_data = 0;
int gso = 0;
int pktcount = 0;
+ int auth_len = 0;
struct dst_entry *dst;
unsigned char *auth = NULL; /* pointer to auth in skb data */
list_for_each_entry(chunk, &packet->chunk_list, list) {
int padded = SCTP_PAD4(chunk->skb->len);
- if (pkt_size + padded > tp->pathmtu)
+ if (chunk == packet->auth)
+ auth_len = padded;
+ else if (auth_len + padded + packet->overhead >
+ tp->pathmtu)
+ goto nomem;
+ else if (pkt_size + padded > tp->pathmtu)
break;
pkt_size += padded;
}
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
}
}
- /* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
- commands);
-
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
- if (len <= 0)
+ if (len == 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
if (get_user(len, optlen))
return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
lock_sock(sk);
switch (optname) {
u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
int err;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
err = switchdev_port_attr_get(dev, &attr);
if (err && err != -EOPNOTSUPP)
return err;
struct nlattr *afspec;
int err = 0;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_PROTINFO);
if (protinfo) {
{
struct nlattr *afspec;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_AF_SPEC);
if (afspec)
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/if_ether.h>
#include <uapi/linux/in6.h>
#include <uapi/linux/ipv6.h>