net: mediatek: fix off by one in the TX ring allocation
[cascardo/linux.git] / drivers / net / ethernet / mediatek / mtk_eth_soc.c
index c984462..c097e9e 100644 (file)
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
 static void mtk_phy_link_adjust(struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
+       u16 lcl_adv = 0, rmt_adv = 0;
+       u8 flowctrl;
        u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
                  MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
                  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
        if (mac->phy_dev->link)
                mcr |= MAC_MCR_FORCE_LINK;
 
-       if (mac->phy_dev->duplex)
+       if (mac->phy_dev->duplex) {
                mcr |= MAC_MCR_FORCE_DPX;
 
-       if (mac->phy_dev->pause)
-               mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+               if (mac->phy_dev->pause)
+                       rmt_adv = LPA_PAUSE_CAP;
+               if (mac->phy_dev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+
+               if (mac->phy_dev->advertising & ADVERTISED_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_CAP;
+               if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+               if (flowctrl & FLOW_CTRL_TX)
+                       mcr |= MAC_MCR_FORCE_TX_FC;
+               if (flowctrl & FLOW_CTRL_RX)
+                       mcr |= MAC_MCR_FORCE_RX_FC;
+
+               netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+                         flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+                         flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+       }
 
        mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        u32 val, ge_mode;
 
        np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+       if (!np && of_phy_is_fixed_link(mac->of_node))
+               if (!of_phy_register_fixed_link(mac->of_node))
+                       np = of_node_get(mac->of_node);
        if (!np)
                return -ENODEV;
 
        switch (of_get_phy_mode(np)) {
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII:
                ge_mode = 0;
                break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        mac->phy_dev->autoneg = AUTONEG_ENABLE;
        mac->phy_dev->speed = 0;
        mac->phy_dev->duplex = 0;
-       mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+       mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+                                  SUPPORTED_Asym_Pause;
        mac->phy_dev->advertising = mac->phy_dev->supported |
                                    ADVERTISED_Autoneg;
        phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        return 0;
 
 err_free_bus:
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 
 err_put_node:
        of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
 
        mdiobus_unregister(eth->mii_bus);
        of_node_put(eth->mii_bus->dev.of_node);
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -453,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
 /* the qdma core needs scratch memory to be setup */
 static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
-       dma_addr_t phy_ring_head, phy_ring_tail;
+       dma_addr_t phy_ring_tail;
        int cnt = MTK_DMA_SIZE;
        dma_addr_t dma_addr;
        int i;
 
        eth->scratch_ring = dma_alloc_coherent(eth->dev,
                                               cnt * sizeof(struct mtk_tx_dma),
-                                              &phy_ring_head,
+                                              &eth->phy_scratch_ring,
                                               GFP_ATOMIC | __GFP_ZERO);
        if (unlikely(!eth->scratch_ring))
                return -ENOMEM;
 
        eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
                                    GFP_KERNEL);
+       if (unlikely(!eth->scratch_head))
+               return -ENOMEM;
+
        dma_addr = dma_map_single(eth->dev,
                                  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
                                  DMA_FROM_DEVICE);
@@ -474,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
                return -ENOMEM;
 
        memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-       phy_ring_tail = phy_ring_head +
+       phy_ring_tail = eth->phy_scratch_ring +
                        (sizeof(struct mtk_tx_dma) * (cnt - 1));
 
        for (i = 0; i < cnt; i++) {
                eth->scratch_ring[i].txd1 =
                                        (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
                if (i < cnt - 1)
-                       eth->scratch_ring[i].txd2 = (phy_ring_head +
+                       eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
                                ((i + 1) * sizeof(struct mtk_tx_dma)));
                eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
        }
 
-       mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+       mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
        mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
        mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
        mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -643,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 
 err_dma:
        do {
-               tx_buf = mtk_desc_to_tx_buf(ring, txd);
+               tx_buf = mtk_desc_to_tx_buf(ring, itxd);
 
                /* unmap dma */
                mtk_tx_unmap(&dev->dev, tx_buf);
@@ -798,6 +829,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                                          DMA_FROM_DEVICE);
                if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
                        skb_free_frag(new_data);
+                       netdev->stats.rx_dropped++;
                        goto release_desc;
                }
 
@@ -805,6 +837,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                skb = build_skb(data, ring->frag_size);
                if (unlikely(!skb)) {
                        put_page(virt_to_head_page(new_data));
+                       netdev->stats.rx_dropped++;
                        goto release_desc;
                }
                skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -893,7 +926,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
                }
                mtk_tx_unmap(eth->dev, tx_buf);
 
-               ring->last_free->txd2 = next_cpu;
                ring->last_free = desc;
                atomic_inc(&ring->free_count);
 
@@ -999,9 +1031,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
 
        atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
        ring->next_free = &ring->dma[0];
-       ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
-       ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
-                             MAX_SKB_FRAGS);
+       ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+       ring->thresh = MAX_SKB_FRAGS;
 
        /* make sure that all changes to the dma ring are flushed before we
         * continue
@@ -1179,6 +1210,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
        for (i = 0; i < MTK_MAC_COUNT; i++)
                if (eth->netdev[i])
                        netdev_reset_queue(eth->netdev[i]);
+       if (eth->scratch_ring) {
+               dma_free_coherent(eth->dev,
+                                 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+                                 eth->scratch_ring,
+                                 eth->phy_scratch_ring);
+               eth->scratch_ring = NULL;
+               eth->phy_scratch_ring = 0;
+       }
        mtk_tx_clean(eth);
        mtk_rx_clean(eth);
        kfree(eth->scratch_head);
@@ -1241,7 +1280,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
        mtk_w32(eth,
                MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
                MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
-               MTK_RX_BT_32DWORDS,
+               MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
                MTK_QDMA_GLO_CFG);
 
        return 0;
@@ -1355,7 +1394,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
 
        /* disable delay and normal interrupt */
        mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-       mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+       mtk_irq_disable(eth, ~0);
        mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
        mtk_w32(eth, 0, MTK_RST_GL);
 
@@ -1669,7 +1708,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 
        SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-       eth->netdev[id]->watchdog_timeo = HZ;
+       eth->netdev[id]->watchdog_timeo = 5 * HZ;
        eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
        eth->netdev[id]->base_addr = (unsigned long)eth->base;
        eth->netdev[id]->vlan_features = MTK_HW_FEATURES &