stmmac: support extend descriptors
[cascardo/linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc.,
18   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20   The full GNU General Public License is included in this distribution in
21   the file called "COPYING".
22
23   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25   Documentation available at:
26         http://www.stlinux.com
27   Support available at:
28         https://bugzilla.stlinux.com/
29 *******************************************************************************/
30
31 #include <linux/clk.h>
32 #include <linux/kernel.h>
33 #include <linux/interrupt.h>
34 #include <linux/ip.h>
35 #include <linux/tcp.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/crc32.h>
40 #include <linux/mii.h>
41 #include <linux/if.h>
42 #include <linux/if_vlan.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
45 #include <linux/prefetch.h>
46 #ifdef CONFIG_STMMAC_DEBUG_FS
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #endif
50 #include "stmmac.h"
51
52 #undef STMMAC_DEBUG
53 /*#define STMMAC_DEBUG*/
54 #ifdef STMMAC_DEBUG
55 #define DBG(nlevel, klevel, fmt, args...) \
56                 ((void)(netif_msg_##nlevel(priv) && \
57                 printk(KERN_##klevel fmt, ## args)))
58 #else
59 #define DBG(nlevel, klevel, fmt, args...) do { } while (0)
60 #endif
61
62 #undef STMMAC_RX_DEBUG
63 /*#define STMMAC_RX_DEBUG*/
64 #ifdef STMMAC_RX_DEBUG
65 #define RX_DBG(fmt, args...)  printk(fmt, ## args)
66 #else
67 #define RX_DBG(fmt, args...)  do { } while (0)
68 #endif
69
70 #undef STMMAC_XMIT_DEBUG
71 /*#define STMMAC_XMIT_DEBUG*/
72 #ifdef STMMAC_XMIT_DEBUG
73 #define TX_DBG(fmt, args...)  printk(fmt, ## args)
74 #else
75 #define TX_DBG(fmt, args...)  do { } while (0)
76 #endif
77
78 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
79 #define JUMBO_LEN       9000
80
81 /* Module parameters */
82 #define TX_TIMEO 5000 /* default 5 seconds */
83 static int watchdog = TX_TIMEO;
84 module_param(watchdog, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
86
87 static int debug = -1;          /* -1: default, 0: no output, 16:  all */
88 module_param(debug, int, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
90
91 int phyaddr = -1;
92 module_param(phyaddr, int, S_IRUGO);
93 MODULE_PARM_DESC(phyaddr, "Physical device address");
94
95 #define DMA_TX_SIZE 256
96 static int dma_txsize = DMA_TX_SIZE;
97 module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
98 MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
99
100 #define DMA_RX_SIZE 256
101 static int dma_rxsize = DMA_RX_SIZE;
102 module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
103 MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
104
105 static int flow_ctrl = FLOW_OFF;
106 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
108
109 static int pause = PAUSE_TIME;
110 module_param(pause, int, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
112
113 #define TC_DEFAULT 64
114 static int tc = TC_DEFAULT;
115 module_param(tc, int, S_IRUGO | S_IWUSR);
116 MODULE_PARM_DESC(tc, "DMA threshold control value");
117
118 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
119 static int buf_sz = DMA_BUFFER_SIZE;
120 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
121 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
122
123 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
124                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
125                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
126
127 #define STMMAC_DEFAULT_LPI_TIMER        1000
128 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
129 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
130 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
131 #define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
132
133 /* By default the driver will use the ring mode to manage tx and rx descriptors
134  * but passing this value so user can force to use the chain instead of the ring
135  */
136 static unsigned int chain_mode;
137 module_param(chain_mode, int, S_IRUGO);
138 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
139
140 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
141
142 #ifdef CONFIG_STMMAC_DEBUG_FS
143 static int stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(void);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
148
149 /**
150  * stmmac_verify_args - verify the driver parameters.
151  * Description: it verifies if some wrong parameter is passed to the driver.
152  * Note that wrong parameters are replaced with the default values.
153  */
154 static void stmmac_verify_args(void)
155 {
156         if (unlikely(watchdog < 0))
157                 watchdog = TX_TIMEO;
158         if (unlikely(dma_rxsize < 0))
159                 dma_rxsize = DMA_RX_SIZE;
160         if (unlikely(dma_txsize < 0))
161                 dma_txsize = DMA_TX_SIZE;
162         if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
163                 buf_sz = DMA_BUFFER_SIZE;
164         if (unlikely(flow_ctrl > 1))
165                 flow_ctrl = FLOW_AUTO;
166         else if (likely(flow_ctrl < 0))
167                 flow_ctrl = FLOW_OFF;
168         if (unlikely((pause < 0) || (pause > 0xffff)))
169                 pause = PAUSE_TIME;
170         if (eee_timer < 0)
171                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
172 }
173
174 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
175 {
176         u32 clk_rate;
177
178         clk_rate = clk_get_rate(priv->stmmac_clk);
179
180         /* Platform provided default clk_csr would be assumed valid
181          * for all other cases except for the below mentioned ones. */
182         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
183                 if (clk_rate < CSR_F_35M)
184                         priv->clk_csr = STMMAC_CSR_20_35M;
185                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
186                         priv->clk_csr = STMMAC_CSR_35_60M;
187                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
188                         priv->clk_csr = STMMAC_CSR_60_100M;
189                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
190                         priv->clk_csr = STMMAC_CSR_100_150M;
191                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
192                         priv->clk_csr = STMMAC_CSR_150_250M;
193                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
194                         priv->clk_csr = STMMAC_CSR_250_300M;
195         } /* For values higher than the IEEE 802.3 specified frequency
196            * we can not estimate the proper divider as it is not known
197            * the frequency of clk_csr_i. So we do not change the default
198            * divider. */
199 }
200
201 #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
202 static void print_pkt(unsigned char *buf, int len)
203 {
204         int j;
205         pr_info("len = %d byte, buf addr: 0x%p", len, buf);
206         for (j = 0; j < len; j++) {
207                 if ((j % 16) == 0)
208                         pr_info("\n %03x:", j);
209                 pr_info(" %02x", buf[j]);
210         }
211         pr_info("\n");
212 }
213 #endif
214
215 /* minimum number of free TX descriptors required to wake up TX process */
216 #define STMMAC_TX_THRESH(x)     (x->dma_tx_size/4)
217
218 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
219 {
220         return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
221 }
222
223 /* On some ST platforms, some HW system configuraton registers have to be
224  * set according to the link speed negotiated.
225  */
226 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
227 {
228         struct phy_device *phydev = priv->phydev;
229
230         if (likely(priv->plat->fix_mac_speed))
231                 priv->plat->fix_mac_speed(priv->plat->bsp_priv,
232                                           phydev->speed);
233 }
234
235 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
236 {
237         /* Check and enter in LPI mode */
238         if ((priv->dirty_tx == priv->cur_tx) &&
239             (priv->tx_path_in_lpi_mode == false))
240                 priv->hw->mac->set_eee_mode(priv->ioaddr);
241 }
242
243 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
244 {
245         /* Exit and disable EEE in case of we are are in LPI state. */
246         priv->hw->mac->reset_eee_mode(priv->ioaddr);
247         del_timer_sync(&priv->eee_ctrl_timer);
248         priv->tx_path_in_lpi_mode = false;
249 }
250
251 /**
252  * stmmac_eee_ctrl_timer
253  * @arg : data hook
254  * Description:
255  *  If there is no data transfer and if we are not in LPI state,
256  *  then MAC Transmitter can be moved to LPI state.
257  */
258 static void stmmac_eee_ctrl_timer(unsigned long arg)
259 {
260         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
261
262         stmmac_enable_eee_mode(priv);
263         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
264 }
265
266 /**
267  * stmmac_eee_init
268  * @priv: private device pointer
269  * Description:
270  *  If the EEE support has been enabled while configuring the driver,
271  *  if the GMAC actually supports the EEE (from the HW cap reg) and the
272  *  phy can also manage EEE, so enable the LPI state and start the timer
273  *  to verify if the tx path can enter in LPI state.
274  */
275 bool stmmac_eee_init(struct stmmac_priv *priv)
276 {
277         bool ret = false;
278
279         /* MAC core supports the EEE feature. */
280         if (priv->dma_cap.eee) {
281                 /* Check if the PHY supports EEE */
282                 if (phy_init_eee(priv->phydev, 1))
283                         goto out;
284
285                 priv->eee_active = 1;
286                 init_timer(&priv->eee_ctrl_timer);
287                 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
288                 priv->eee_ctrl_timer.data = (unsigned long)priv;
289                 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
290                 add_timer(&priv->eee_ctrl_timer);
291
292                 priv->hw->mac->set_eee_timer(priv->ioaddr,
293                                              STMMAC_DEFAULT_LIT_LS_TIMER,
294                                              priv->tx_lpi_timer);
295
296                 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
297
298                 ret = true;
299         }
300 out:
301         return ret;
302 }
303
304 static void stmmac_eee_adjust(struct stmmac_priv *priv)
305 {
306         /* When the EEE has been already initialised we have to
307          * modify the PLS bit in the LPI ctrl & status reg according
308          * to the PHY link status. For this reason.
309          */
310         if (priv->eee_enabled)
311                 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
312 }
313
314 /**
315  * stmmac_adjust_link
316  * @dev: net device structure
317  * Description: it adjusts the link parameters.
318  */
319 static void stmmac_adjust_link(struct net_device *dev)
320 {
321         struct stmmac_priv *priv = netdev_priv(dev);
322         struct phy_device *phydev = priv->phydev;
323         unsigned long flags;
324         int new_state = 0;
325         unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
326
327         if (phydev == NULL)
328                 return;
329
330         DBG(probe, DEBUG, "stmmac_adjust_link: called.  address %d link %d\n",
331             phydev->addr, phydev->link);
332
333         spin_lock_irqsave(&priv->lock, flags);
334
335         if (phydev->link) {
336                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
337
338                 /* Now we make sure that we can be in full duplex mode.
339                  * If not, we operate in half-duplex mode. */
340                 if (phydev->duplex != priv->oldduplex) {
341                         new_state = 1;
342                         if (!(phydev->duplex))
343                                 ctrl &= ~priv->hw->link.duplex;
344                         else
345                                 ctrl |= priv->hw->link.duplex;
346                         priv->oldduplex = phydev->duplex;
347                 }
348                 /* Flow Control operation */
349                 if (phydev->pause)
350                         priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
351                                                  fc, pause_time);
352
353                 if (phydev->speed != priv->speed) {
354                         new_state = 1;
355                         switch (phydev->speed) {
356                         case 1000:
357                                 if (likely(priv->plat->has_gmac))
358                                         ctrl &= ~priv->hw->link.port;
359                                         stmmac_hw_fix_mac_speed(priv);
360                                 break;
361                         case 100:
362                         case 10:
363                                 if (priv->plat->has_gmac) {
364                                         ctrl |= priv->hw->link.port;
365                                         if (phydev->speed == SPEED_100) {
366                                                 ctrl |= priv->hw->link.speed;
367                                         } else {
368                                                 ctrl &= ~(priv->hw->link.speed);
369                                         }
370                                 } else {
371                                         ctrl &= ~priv->hw->link.port;
372                                 }
373                                 stmmac_hw_fix_mac_speed(priv);
374                                 break;
375                         default:
376                                 if (netif_msg_link(priv))
377                                         pr_warning("%s: Speed (%d) is not 10"
378                                        " or 100!\n", dev->name, phydev->speed);
379                                 break;
380                         }
381
382                         priv->speed = phydev->speed;
383                 }
384
385                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
386
387                 if (!priv->oldlink) {
388                         new_state = 1;
389                         priv->oldlink = 1;
390                 }
391         } else if (priv->oldlink) {
392                 new_state = 1;
393                 priv->oldlink = 0;
394                 priv->speed = 0;
395                 priv->oldduplex = -1;
396         }
397
398         if (new_state && netif_msg_link(priv))
399                 phy_print_status(phydev);
400
401         stmmac_eee_adjust(priv);
402
403         spin_unlock_irqrestore(&priv->lock, flags);
404
405         DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
406 }
407
408 /**
409  * stmmac_init_phy - PHY initialization
410  * @dev: net device structure
411  * Description: it initializes the driver's PHY state, and attaches the PHY
412  * to the mac driver.
413  *  Return value:
414  *  0 on success
415  */
416 static int stmmac_init_phy(struct net_device *dev)
417 {
418         struct stmmac_priv *priv = netdev_priv(dev);
419         struct phy_device *phydev;
420         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
421         char bus_id[MII_BUS_ID_SIZE];
422         int interface = priv->plat->interface;
423         priv->oldlink = 0;
424         priv->speed = 0;
425         priv->oldduplex = -1;
426
427         if (priv->plat->phy_bus_name)
428                 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
429                                 priv->plat->phy_bus_name, priv->plat->bus_id);
430         else
431                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
432                                 priv->plat->bus_id);
433
434         snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
435                  priv->plat->phy_addr);
436         pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
437
438         phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
439
440         if (IS_ERR(phydev)) {
441                 pr_err("%s: Could not attach to PHY\n", dev->name);
442                 return PTR_ERR(phydev);
443         }
444
445         /* Stop Advertising 1000BASE Capability if interface is not GMII */
446         if ((interface == PHY_INTERFACE_MODE_MII) ||
447             (interface == PHY_INTERFACE_MODE_RMII))
448                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
449                                          SUPPORTED_1000baseT_Full);
450
451         /*
452          * Broken HW is sometimes missing the pull-up resistor on the
453          * MDIO line, which results in reads to non-existent devices returning
454          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
455          * device as well.
456          * Note: phydev->phy_id is the result of reading the UID PHY registers.
457          */
458         if (phydev->phy_id == 0) {
459                 phy_disconnect(phydev);
460                 return -ENODEV;
461         }
462         pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
463                  " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
464
465         priv->phydev = phydev;
466
467         return 0;
468 }
469
470 /**
471  * stmmac_display_ring
472  * @p: pointer to the ring.
473  * @size: size of the ring.
474  * Description: display the control/status and buffer descriptors.
475  */
476 static void stmmac_display_ring(void *head, int size, int extend_desc)
477 {
478         int i;
479         struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
480         struct dma_desc *p = (struct dma_desc *) head;
481
482         for (i = 0; i < size; i++) {
483                 u64 x;
484                 if (extend_desc) {
485                         x = *(u64 *) ep;
486                         pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
487                                 i, (unsigned int) virt_to_phys(ep),
488                                 (unsigned int) x, (unsigned int) (x >> 32),
489                                 ep->basic.des2, ep->basic.des3);
490                         ep++;
491                 } else {
492                         x = *(u64 *) p;
493                         pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
494                                 i, (unsigned int) virt_to_phys(p),
495                                 (unsigned int) x, (unsigned int) (x >> 32),
496                                 p->des2, p->des3);
497                         p++;
498                 }
499                 pr_info("\n");
500         }
501 }
502
503 static void stmmac_display_rings(struct stmmac_priv *priv)
504 {
505         unsigned int txsize = priv->dma_tx_size;
506         unsigned int rxsize = priv->dma_rx_size;
507
508         if (priv->extend_desc) {
509                 pr_info("Extended RX descriptor ring:\n");
510                 stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
511                 pr_info("Extended TX descriptor ring:\n");
512                 stmmac_display_ring((void *) priv->dma_etx, txsize, 1);
513         } else {
514                 pr_info("RX descriptor ring:\n");
515                 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
516                 pr_info("TX descriptor ring:\n");
517                 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
518         }
519 }
520
521 static int stmmac_set_bfsize(int mtu, int bufsize)
522 {
523         int ret = bufsize;
524
525         if (mtu >= BUF_SIZE_4KiB)
526                 ret = BUF_SIZE_8KiB;
527         else if (mtu >= BUF_SIZE_2KiB)
528                 ret = BUF_SIZE_4KiB;
529         else if (mtu >= DMA_BUFFER_SIZE)
530                 ret = BUF_SIZE_2KiB;
531         else
532                 ret = DMA_BUFFER_SIZE;
533
534         return ret;
535 }
536
537 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
538 {
539         int i;
540         unsigned int txsize = priv->dma_tx_size;
541         unsigned int rxsize = priv->dma_rx_size;
542
543         /* Clear the Rx/Tx descriptors */
544         for (i = 0; i < rxsize; i++)
545                 if (priv->extend_desc)
546                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
547                                                      priv->use_riwt, priv->mode,
548                                                      (i == rxsize - 1));
549                 else
550                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
551                                                      priv->use_riwt, priv->mode,
552                                                      (i == rxsize - 1));
553         for (i = 0; i < txsize; i++)
554                 if (priv->extend_desc)
555                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
556                                                      priv->mode,
557                                                      (i == txsize - 1));
558                 else
559                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
560                                                      priv->mode,
561                                                      (i == txsize - 1));
562 }
563
564 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
565                                   int i)
566 {
567         struct sk_buff *skb;
568
569         skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
570                                  GFP_KERNEL);
571         if (unlikely(skb == NULL)) {
572                 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
573                 return 1;
574         }
575         skb_reserve(skb, NET_IP_ALIGN);
576         priv->rx_skbuff[i] = skb;
577         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
578                                                 priv->dma_buf_sz,
579                                                 DMA_FROM_DEVICE);
580
581         p->des2 = priv->rx_skbuff_dma[i];
582
583         if ((priv->mode == STMMAC_RING_MODE) &&
584             (priv->dma_buf_sz == BUF_SIZE_16KiB))
585                 priv->hw->ring->init_desc3(p);
586
587         return 0;
588 }
589
590 /**
591  * init_dma_desc_rings - init the RX/TX descriptor rings
592  * @dev: net device structure
593  * Description:  this function initializes the DMA RX/TX descriptors
594  * and allocates the socket buffers. It suppors the chained and ring
595  * modes.
596  */
597 static void init_dma_desc_rings(struct net_device *dev)
598 {
599         int i;
600         struct stmmac_priv *priv = netdev_priv(dev);
601         unsigned int txsize = priv->dma_tx_size;
602         unsigned int rxsize = priv->dma_rx_size;
603         unsigned int bfsize = 0;
604
605         /* Set the max buffer size according to the DESC mode
606          * and the MTU. Note that RING mode allows 16KiB bsize. */
607         if (priv->mode == STMMAC_RING_MODE)
608                 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
609
610         if (bfsize < BUF_SIZE_16KiB)
611                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
612
613         DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
614             txsize, rxsize, bfsize);
615
616         if (priv->extend_desc) {
617                 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
618                                                    sizeof(struct
619                                                           dma_extended_desc),
620                                                    &priv->dma_rx_phy,
621                                                    GFP_KERNEL);
622                 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
623                                                    sizeof(struct
624                                                           dma_extended_desc),
625                                                    &priv->dma_tx_phy,
626                                                    GFP_KERNEL);
627                 if ((!priv->dma_erx) || (!priv->dma_etx))
628                         return;
629         } else {
630                 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
631                                                   sizeof(struct dma_desc),
632                                                   &priv->dma_rx_phy,
633                                                   GFP_KERNEL);
634                 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
635                                                   sizeof(struct dma_desc),
636                                                   &priv->dma_tx_phy,
637                                                   GFP_KERNEL);
638                 if ((!priv->dma_rx) || (!priv->dma_tx))
639                         return;
640         }
641
642         priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
643                                             GFP_KERNEL);
644         priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
645                                         GFP_KERNEL);
646         priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
647                                         GFP_KERNEL);
648         if (netif_msg_drv(priv))
649                 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
650                          (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
651
652         /* RX INITIALIZATION */
653         DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
654         for (i = 0; i < rxsize; i++) {
655                 struct dma_desc *p;
656                 if (priv->extend_desc)
657                         p = &((priv->dma_erx + i)->basic);
658                 else
659                         p = priv->dma_rx + i;
660
661                 if (stmmac_init_rx_buffers(priv, p, i))
662                         break;
663
664                 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
665                         priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
666         }
667         priv->cur_rx = 0;
668         priv->dirty_rx = (unsigned int)(i - rxsize);
669         priv->dma_buf_sz = bfsize;
670         buf_sz = bfsize;
671
672         /* Setup the chained descriptor addresses */
673         if (priv->mode == STMMAC_CHAIN_MODE) {
674                 if (priv->extend_desc) {
675                         priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
676                                               rxsize, 1);
677                         priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
678                                               txsize, 1);
679                 } else {
680                         priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
681                                               rxsize, 0);
682                         priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
683                                               txsize, 0);
684                 }
685         }
686
687         /* TX INITIALIZATION */
688         for (i = 0; i < txsize; i++) {
689                 struct dma_desc *p;
690                 if (priv->extend_desc)
691                         p = &((priv->dma_etx + i)->basic);
692                 else
693                         p = priv->dma_tx + i;
694                 p->des2 = 0;
695                 priv->tx_skbuff[i] = NULL;
696         }
697
698         priv->dirty_tx = 0;
699         priv->cur_tx = 0;
700
701         stmmac_clear_descriptors(priv);
702
703         if (netif_msg_hw(priv))
704                 stmmac_display_rings(priv);
705 }
706
707 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
708 {
709         int i;
710
711         for (i = 0; i < priv->dma_rx_size; i++) {
712                 if (priv->rx_skbuff[i]) {
713                         dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
714                                          priv->dma_buf_sz, DMA_FROM_DEVICE);
715                         dev_kfree_skb_any(priv->rx_skbuff[i]);
716                 }
717                 priv->rx_skbuff[i] = NULL;
718         }
719 }
720
721 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
722 {
723         int i;
724
725         for (i = 0; i < priv->dma_tx_size; i++) {
726                 if (priv->tx_skbuff[i] != NULL) {
727                         struct dma_desc *p;
728                         if (priv->extend_desc)
729                                 p = &((priv->dma_etx + i)->basic);
730                         else
731                                 p = priv->dma_tx + i;
732
733                         if (p->des2)
734                                 dma_unmap_single(priv->device, p->des2,
735                                                  priv->hw->desc->get_tx_len(p),
736                                                  DMA_TO_DEVICE);
737                         dev_kfree_skb_any(priv->tx_skbuff[i]);
738                         priv->tx_skbuff[i] = NULL;
739                 }
740         }
741 }
742
743 static void free_dma_desc_resources(struct stmmac_priv *priv)
744 {
745         /* Release the DMA TX/RX socket buffers */
746         dma_free_rx_skbufs(priv);
747         dma_free_tx_skbufs(priv);
748
749         /* Free the region of consistent memory previously allocated for
750          * the DMA */
751         if (!priv->extend_desc) {
752                 dma_free_coherent(priv->device,
753                                   priv->dma_tx_size * sizeof(struct dma_desc),
754                                   priv->dma_tx, priv->dma_tx_phy);
755                 dma_free_coherent(priv->device,
756                                   priv->dma_rx_size * sizeof(struct dma_desc),
757                                   priv->dma_rx, priv->dma_rx_phy);
758         } else {
759                 dma_free_coherent(priv->device, priv->dma_tx_size *
760                                   sizeof(struct dma_extended_desc),
761                                   priv->dma_etx, priv->dma_tx_phy);
762                 dma_free_coherent(priv->device, priv->dma_rx_size *
763                                   sizeof(struct dma_extended_desc),
764                                   priv->dma_erx, priv->dma_rx_phy);
765         }
766         kfree(priv->rx_skbuff_dma);
767         kfree(priv->rx_skbuff);
768         kfree(priv->tx_skbuff);
769 }
770
771 /**
772  *  stmmac_dma_operation_mode - HW DMA operation mode
773  *  @priv : pointer to the private device structure.
774  *  Description: it sets the DMA operation mode: tx/rx DMA thresholds
775  *  or Store-And-Forward capability.
776  */
777 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
778 {
779         if (likely(priv->plat->force_sf_dma_mode ||
780                 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
781                 /*
782                  * In case of GMAC, SF mode can be enabled
783                  * to perform the TX COE in HW. This depends on:
784                  * 1) TX COE if actually supported
785                  * 2) There is no bugged Jumbo frame support
786                  *    that needs to not insert csum in the TDES.
787                  */
788                 priv->hw->dma->dma_mode(priv->ioaddr,
789                                         SF_DMA_MODE, SF_DMA_MODE);
790                 tc = SF_DMA_MODE;
791         } else
792                 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
793 }
794
795 /**
796  * stmmac_tx_clean:
797  * @priv: private data pointer
798  * Description: it reclaims resources after transmission completes.
799  */
800 static void stmmac_tx_clean(struct stmmac_priv *priv)
801 {
802         unsigned int txsize = priv->dma_tx_size;
803
804         spin_lock(&priv->tx_lock);
805
806         priv->xstats.tx_clean++;
807
808         while (priv->dirty_tx != priv->cur_tx) {
809                 int last;
810                 unsigned int entry = priv->dirty_tx % txsize;
811                 struct sk_buff *skb = priv->tx_skbuff[entry];
812                 struct dma_desc *p;
813
814                 if (priv->extend_desc)
815                         p = (struct dma_desc *) (priv->dma_etx + entry);
816                 else
817                         p = priv->dma_tx + entry;
818
819                 /* Check if the descriptor is owned by the DMA. */
820                 if (priv->hw->desc->get_tx_owner(p))
821                         break;
822
823                 /* Verify tx error by looking at the last segment. */
824                 last = priv->hw->desc->get_tx_ls(p);
825                 if (likely(last)) {
826                         int tx_error =
827                                 priv->hw->desc->tx_status(&priv->dev->stats,
828                                                           &priv->xstats, p,
829                                                           priv->ioaddr);
830                         if (likely(tx_error == 0)) {
831                                 priv->dev->stats.tx_packets++;
832                                 priv->xstats.tx_pkt_n++;
833                         } else
834                                 priv->dev->stats.tx_errors++;
835                 }
836                 TX_DBG("%s: curr %d, dirty %d\n", __func__,
837                         priv->cur_tx, priv->dirty_tx);
838
839                 if (likely(p->des2))
840                         dma_unmap_single(priv->device, p->des2,
841                                          priv->hw->desc->get_tx_len(p),
842                                          DMA_TO_DEVICE);
843                 if (priv->mode == STMMAC_RING_MODE)
844                         priv->hw->ring->clean_desc3(p);
845
846                 if (likely(skb != NULL)) {
847                         dev_kfree_skb(skb);
848                         priv->tx_skbuff[entry] = NULL;
849                 }
850
851                 priv->hw->desc->release_tx_desc(p, priv->mode);
852
853                 priv->dirty_tx++;
854         }
855         if (unlikely(netif_queue_stopped(priv->dev) &&
856                      stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
857                 netif_tx_lock(priv->dev);
858                 if (netif_queue_stopped(priv->dev) &&
859                      stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
860                         TX_DBG("%s: restart transmit\n", __func__);
861                         netif_wake_queue(priv->dev);
862                 }
863                 netif_tx_unlock(priv->dev);
864         }
865
866         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
867                 stmmac_enable_eee_mode(priv);
868                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
869         }
870         spin_unlock(&priv->tx_lock);
871 }
872
873 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
874 {
875         priv->hw->dma->enable_dma_irq(priv->ioaddr);
876 }
877
878 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
879 {
880         priv->hw->dma->disable_dma_irq(priv->ioaddr);
881 }
882
883
884 /**
885  * stmmac_tx_err:
886  * @priv: pointer to the private device structure
887  * Description: it cleans the descriptors and restarts the transmission
888  * in case of errors.
889  */
890 static void stmmac_tx_err(struct stmmac_priv *priv)
891 {
892         int i;
893         int txsize = priv->dma_tx_size;
894         netif_stop_queue(priv->dev);
895
896         priv->hw->dma->stop_tx(priv->ioaddr);
897         dma_free_tx_skbufs(priv);
898         for (i = 0; i < txsize; i++)
899                 if (priv->extend_desc)
900                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
901                                                      priv->mode,
902                                                      (i == txsize - 1));
903                 else
904                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
905                                                      priv->mode,
906                                                      (i == txsize - 1));
907         priv->dirty_tx = 0;
908         priv->cur_tx = 0;
909         priv->hw->dma->start_tx(priv->ioaddr);
910
911         priv->dev->stats.tx_errors++;
912         netif_wake_queue(priv->dev);
913 }
914
915 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
916 {
917         int status;
918
919         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
920         if (likely((status & handle_rx)) || (status & handle_tx)) {
921                 if (likely(napi_schedule_prep(&priv->napi))) {
922                         stmmac_disable_dma_irq(priv);
923                         __napi_schedule(&priv->napi);
924                 }
925         }
926         if (unlikely(status & tx_hard_error_bump_tc)) {
927                 /* Try to bump up the dma threshold on this failure */
928                 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
929                         tc += 64;
930                         priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
931                         priv->xstats.threshold = tc;
932                 }
933         } else if (unlikely(status == tx_hard_error))
934                 stmmac_tx_err(priv);
935 }
936
937 static void stmmac_mmc_setup(struct stmmac_priv *priv)
938 {
939         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
940                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
941
942         /* Mask MMC irq, counters are managed in SW and registers
943          * are cleared on each READ eventually. */
944         dwmac_mmc_intr_all_mask(priv->ioaddr);
945
946         if (priv->dma_cap.rmon) {
947                 dwmac_mmc_ctrl(priv->ioaddr, mode);
948                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
949         } else
950                 pr_info(" No MAC Management Counters available\n");
951 }
952
953 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
954 {
955         u32 hwid = priv->hw->synopsys_uid;
956
957         /* Only check valid Synopsys Id because old MAC chips
958          * have no HW registers where get the ID */
959         if (likely(hwid)) {
960                 u32 uid = ((hwid & 0x0000ff00) >> 8);
961                 u32 synid = (hwid & 0x000000ff);
962
963                 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
964                         uid, synid);
965
966                 return synid;
967         }
968         return 0;
969 }
970
971 /**
972  * stmmac_selec_desc_mode
973  * @priv : private structure
974  * Description: select the Enhanced/Alternate or Normal descriptors
975  */
976 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
977 {
978         if (priv->plat->enh_desc) {
979                 pr_info(" Enhanced/Alternate descriptors\n");
980
981                 /* GMAC older than 3.50 has no extended descriptors */
982                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
983                         pr_info("\tEnabled extended descriptors\n");
984                         priv->extend_desc = 1;
985                 } else
986                         pr_warn("Extended descriptors not supported\n");
987
988                 priv->hw->desc = &enh_desc_ops;
989         } else {
990                 pr_info(" Normal descriptors\n");
991                 priv->hw->desc = &ndesc_ops;
992         }
993 }
994
995 /**
996  * stmmac_get_hw_features
997  * @priv : private device pointer
998  * Description:
999  *  new GMAC chip generations have a new register to indicate the
1000  *  presence of the optional feature/functions.
1001  *  This can be also used to override the value passed through the
1002  *  platform and necessary for old MAC10/100 and GMAC chips.
1003  */
1004 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1005 {
1006         u32 hw_cap = 0;
1007
1008         if (priv->hw->dma->get_hw_feature) {
1009                 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
1010
1011                 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
1012                 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
1013                 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
1014                 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
1015                 priv->dma_cap.multi_addr =
1016                         (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5;
1017                 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1018                 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1019                 priv->dma_cap.pmt_remote_wake_up =
1020                         (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1021                 priv->dma_cap.pmt_magic_frame =
1022                         (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1023                 /* MMC */
1024                 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1025                 /* IEEE 1588-2002*/
1026                 priv->dma_cap.time_stamp =
1027                         (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1028                 /* IEEE 1588-2008*/
1029                 priv->dma_cap.atime_stamp =
1030                         (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1031                 /* 802.3az - Energy-Efficient Ethernet (EEE) */
1032                 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1033                 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1034                 /* TX and RX csum */
1035                 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1036                 priv->dma_cap.rx_coe_type1 =
1037                         (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1038                 priv->dma_cap.rx_coe_type2 =
1039                         (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1040                 priv->dma_cap.rxfifo_over_2048 =
1041                         (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1042                 /* TX and RX number of channels */
1043                 priv->dma_cap.number_rx_channel =
1044                         (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1045                 priv->dma_cap.number_tx_channel =
1046                         (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1047                 /* Alternate (enhanced) DESC mode*/
1048                 priv->dma_cap.enh_desc =
1049                         (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1050         }
1051
1052         return hw_cap;
1053 }
1054
1055 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1056 {
1057         /* verify if the MAC address is valid, in case of failures it
1058          * generates a random MAC address */
1059         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1060                 priv->hw->mac->get_umac_addr((void __iomem *)
1061                                              priv->dev->base_addr,
1062                                              priv->dev->dev_addr, 0);
1063                 if  (!is_valid_ether_addr(priv->dev->dev_addr))
1064                         eth_hw_addr_random(priv->dev);
1065         }
1066         pr_warning("%s: device MAC address %pM\n", priv->dev->name,
1067                                                    priv->dev->dev_addr);
1068 }
1069
1070 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1071 {
1072         int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
1073         int mixed_burst = 0;
1074         int atds = 0;
1075
1076         /* Some DMA parameters can be passed from the platform;
1077          * in case of these are not passed we keep a default
1078          * (good for all the chips) and init the DMA! */
1079         if (priv->plat->dma_cfg) {
1080                 pbl = priv->plat->dma_cfg->pbl;
1081                 fixed_burst = priv->plat->dma_cfg->fixed_burst;
1082                 mixed_burst = priv->plat->dma_cfg->mixed_burst;
1083                 burst_len = priv->plat->dma_cfg->burst_len;
1084         }
1085
1086         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1087                 atds = 1;
1088
1089         return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1090                                    burst_len, priv->dma_tx_phy,
1091                                    priv->dma_rx_phy, atds);
1092 }
1093
1094 /**
1095  * stmmac_tx_timer:
1096  * @data: data pointer
1097  * Description:
1098  * This is the timer handler to directly invoke the stmmac_tx_clean.
1099  */
1100 static void stmmac_tx_timer(unsigned long data)
1101 {
1102         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1103
1104         stmmac_tx_clean(priv);
1105 }
1106
1107 /**
1108  * stmmac_tx_timer:
1109  * @priv: private data structure
1110  * Description:
1111  * This inits the transmit coalesce parameters: i.e. timer rate,
1112  * timer handler and default threshold used for enabling the
1113  * interrupt on completion bit.
1114  */
1115 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1116 {
1117         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1118         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1119         init_timer(&priv->txtimer);
1120         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1121         priv->txtimer.data = (unsigned long)priv;
1122         priv->txtimer.function = stmmac_tx_timer;
1123         add_timer(&priv->txtimer);
1124 }
1125
1126 /**
1127  *  stmmac_open - open entry point of the driver
1128  *  @dev : pointer to the device structure.
1129  *  Description:
1130  *  This function is the open entry point of the driver.
1131  *  Return value:
1132  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1133  *  file on failure.
1134  */
1135 static int stmmac_open(struct net_device *dev)
1136 {
1137         struct stmmac_priv *priv = netdev_priv(dev);
1138         int ret;
1139
1140         clk_prepare_enable(priv->stmmac_clk);
1141
1142         stmmac_check_ether_addr(priv);
1143
1144         ret = stmmac_init_phy(dev);
1145         if (unlikely(ret)) {
1146                 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
1147                 goto open_error;
1148         }
1149
1150         /* Create and initialize the TX/RX descriptors chains. */
1151         priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1152         priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1153         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1154         init_dma_desc_rings(dev);
1155
1156         /* DMA initialization and SW reset */
1157         ret = stmmac_init_dma_engine(priv);
1158         if (ret < 0) {
1159                 pr_err("%s: DMA initialization failed\n", __func__);
1160                 goto open_error;
1161         }
1162
1163         /* Copy the MAC addr into the HW  */
1164         priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1165
1166         /* If required, perform hw setup of the bus. */
1167         if (priv->plat->bus_setup)
1168                 priv->plat->bus_setup(priv->ioaddr);
1169
1170         /* Initialize the MAC Core */
1171         priv->hw->mac->core_init(priv->ioaddr);
1172
1173         /* Request the IRQ lines */
1174         ret = request_irq(dev->irq, stmmac_interrupt,
1175                          IRQF_SHARED, dev->name, dev);
1176         if (unlikely(ret < 0)) {
1177                 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1178                        __func__, dev->irq, ret);
1179                 goto open_error;
1180         }
1181
1182         /* Request the Wake IRQ in case of another line is used for WoL */
1183         if (priv->wol_irq != dev->irq) {
1184                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1185                                   IRQF_SHARED, dev->name, dev);
1186                 if (unlikely(ret < 0)) {
1187                         pr_err("%s: ERROR: allocating the ext WoL IRQ %d "
1188                                "(error: %d)\n", __func__, priv->wol_irq, ret);
1189                         goto open_error_wolirq;
1190                 }
1191         }
1192
1193         /* Request the IRQ lines */
1194         if (priv->lpi_irq != -ENXIO) {
1195                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1196                                   dev->name, dev);
1197                 if (unlikely(ret < 0)) {
1198                         pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1199                                __func__, priv->lpi_irq, ret);
1200                         goto open_error_lpiirq;
1201                 }
1202         }
1203
1204         /* Enable the MAC Rx/Tx */
1205         stmmac_set_mac(priv->ioaddr, true);
1206
1207         /* Set the HW DMA mode and the COE */
1208         stmmac_dma_operation_mode(priv);
1209
1210         /* Extra statistics */
1211         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1212         priv->xstats.threshold = tc;
1213
1214         stmmac_mmc_setup(priv);
1215
1216 #ifdef CONFIG_STMMAC_DEBUG_FS
1217         ret = stmmac_init_fs(dev);
1218         if (ret < 0)
1219                 pr_warning("%s: failed debugFS registration\n", __func__);
1220 #endif
1221         /* Start the ball rolling... */
1222         DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1223         priv->hw->dma->start_tx(priv->ioaddr);
1224         priv->hw->dma->start_rx(priv->ioaddr);
1225
1226         /* Dump DMA/MAC registers */
1227         if (netif_msg_hw(priv)) {
1228                 priv->hw->mac->dump_regs(priv->ioaddr);
1229                 priv->hw->dma->dump_regs(priv->ioaddr);
1230         }
1231
1232         if (priv->phydev)
1233                 phy_start(priv->phydev);
1234
1235         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
1236         priv->eee_enabled = stmmac_eee_init(priv);
1237
1238         stmmac_init_tx_coalesce(priv);
1239
1240         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1241                 priv->rx_riwt = MAX_DMA_RIWT;
1242                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1243         }
1244
1245         napi_enable(&priv->napi);
1246         netif_start_queue(dev);
1247
1248         return 0;
1249
1250 open_error_lpiirq:
1251         if (priv->wol_irq != dev->irq)
1252                 free_irq(priv->wol_irq, dev);
1253
1254 open_error_wolirq:
1255         free_irq(dev->irq, dev);
1256
1257 open_error:
1258         if (priv->phydev)
1259                 phy_disconnect(priv->phydev);
1260
1261         clk_disable_unprepare(priv->stmmac_clk);
1262
1263         return ret;
1264 }
1265
1266 /**
1267  *  stmmac_release - close entry point of the driver
1268  *  @dev : device pointer.
1269  *  Description:
1270  *  This is the stop entry point of the driver.
1271  */
1272 static int stmmac_release(struct net_device *dev)
1273 {
1274         struct stmmac_priv *priv = netdev_priv(dev);
1275
1276         if (priv->eee_enabled)
1277                 del_timer_sync(&priv->eee_ctrl_timer);
1278
1279         /* Stop and disconnect the PHY */
1280         if (priv->phydev) {
1281                 phy_stop(priv->phydev);
1282                 phy_disconnect(priv->phydev);
1283                 priv->phydev = NULL;
1284         }
1285
1286         netif_stop_queue(dev);
1287
1288         napi_disable(&priv->napi);
1289
1290         del_timer_sync(&priv->txtimer);
1291
1292         /* Free the IRQ lines */
1293         free_irq(dev->irq, dev);
1294         if (priv->wol_irq != dev->irq)
1295                 free_irq(priv->wol_irq, dev);
1296         if (priv->lpi_irq != -ENXIO)
1297                 free_irq(priv->lpi_irq, dev);
1298
1299         /* Stop TX/RX DMA and clear the descriptors */
1300         priv->hw->dma->stop_tx(priv->ioaddr);
1301         priv->hw->dma->stop_rx(priv->ioaddr);
1302
1303         /* Release and free the Rx/Tx resources */
1304         free_dma_desc_resources(priv);
1305
1306         /* Disable the MAC Rx/Tx */
1307         stmmac_set_mac(priv->ioaddr, false);
1308
1309         netif_carrier_off(dev);
1310
1311 #ifdef CONFIG_STMMAC_DEBUG_FS
1312         stmmac_exit_fs();
1313 #endif
1314         clk_disable_unprepare(priv->stmmac_clk);
1315
1316         return 0;
1317 }
1318
1319 /**
1320  *  stmmac_xmit:
1321  *  @skb : the socket buffer
1322  *  @dev : device pointer
1323  *  Description : Tx entry point of the driver.
1324  */
1325 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1326 {
1327         struct stmmac_priv *priv = netdev_priv(dev);
1328         unsigned int txsize = priv->dma_tx_size;
1329         unsigned int entry;
1330         int i, csum_insertion = 0, is_jumbo = 0;
1331         int nfrags = skb_shinfo(skb)->nr_frags;
1332         struct dma_desc *desc, *first;
1333         unsigned int nopaged_len = skb_headlen(skb);
1334
1335         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1336                 if (!netif_queue_stopped(dev)) {
1337                         netif_stop_queue(dev);
1338                         /* This is a hard error, log it. */
1339                         pr_err("%s: BUG! Tx Ring full when queue awake\n",
1340                                 __func__);
1341                 }
1342                 return NETDEV_TX_BUSY;
1343         }
1344
1345         spin_lock(&priv->tx_lock);
1346
1347         if (priv->tx_path_in_lpi_mode)
1348                 stmmac_disable_eee_mode(priv);
1349
1350         entry = priv->cur_tx % txsize;
1351
1352 #ifdef STMMAC_XMIT_DEBUG
1353         if ((skb->len > ETH_FRAME_LEN) || nfrags)
1354                 pr_debug("stmmac xmit: [entry %d]\n"
1355                          "\tskb addr %p - len: %d - nopaged_len: %d\n"
1356                          "\tn_frags: %d - ip_summed: %d - %s gso\n"
1357                          "\ttx_count_frames %d\n", entry,
1358                          skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1359                          !skb_is_gso(skb) ? "isn't" : "is",
1360                          priv->tx_count_frames);
1361 #endif
1362
1363         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1364
1365         if (priv->extend_desc)
1366                 desc = (struct dma_desc *) (priv->dma_etx + entry);
1367         else
1368                 desc = priv->dma_tx + entry;
1369
1370         first = desc;
1371
1372 #ifdef STMMAC_XMIT_DEBUG
1373         if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1374                 pr_debug("\tskb len: %d, nopaged_len: %d,\n"
1375                          "\t\tn_frags: %d, ip_summed: %d\n",
1376                          skb->len, nopaged_len, nfrags, skb->ip_summed);
1377 #endif
1378         priv->tx_skbuff[entry] = skb;
1379
1380         /* To program the descriptors according to the size of the frame */
1381         if (priv->mode == STMMAC_RING_MODE) {
1382                 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1383                                                         priv->plat->enh_desc);
1384                 if (unlikely(is_jumbo))
1385                         entry = priv->hw->ring->jumbo_frm(priv, skb,
1386                                                           csum_insertion);
1387         } else {
1388                 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1389                                                         priv->plat->enh_desc);
1390                 if (unlikely(is_jumbo))
1391                         entry = priv->hw->chain->jumbo_frm(priv, skb,
1392                                                            csum_insertion);
1393         }
1394         if (likely(!is_jumbo)) {
1395                 desc->des2 = dma_map_single(priv->device, skb->data,
1396                                         nopaged_len, DMA_TO_DEVICE);
1397                 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1398                                                 csum_insertion, priv->mode);
1399         } else
1400                 desc = first;
1401
1402         for (i = 0; i < nfrags; i++) {
1403                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1404                 int len = skb_frag_size(frag);
1405
1406                 entry = (++priv->cur_tx) % txsize;
1407                 if (priv->extend_desc)
1408                         desc = (struct dma_desc *) (priv->dma_etx + entry);
1409                 else
1410                         desc = priv->dma_tx + entry;
1411
1412                 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1413                 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1414                                               DMA_TO_DEVICE);
1415                 priv->tx_skbuff[entry] = NULL;
1416                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1417                                                 priv->mode);
1418                 wmb();
1419                 priv->hw->desc->set_tx_owner(desc);
1420                 wmb();
1421         }
1422
1423         /* Finalize the latest segment. */
1424         priv->hw->desc->close_tx_desc(desc);
1425
1426         wmb();
1427         /* According to the coalesce parameter the IC bit for the latest
1428          * segment could be reset and the timer re-started to invoke the
1429          * stmmac_tx function. This approach takes care about the fragments.
1430          */
1431         priv->tx_count_frames += nfrags + 1;
1432         if (priv->tx_coal_frames > priv->tx_count_frames) {
1433                 priv->hw->desc->clear_tx_ic(desc);
1434                 priv->xstats.tx_reset_ic_bit++;
1435                 TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
1436                        priv->tx_count_frames);
1437                 mod_timer(&priv->txtimer,
1438                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
1439         } else
1440                 priv->tx_count_frames = 0;
1441
1442         /* To avoid raise condition */
1443         priv->hw->desc->set_tx_owner(first);
1444         wmb();
1445
1446         priv->cur_tx++;
1447
1448 #ifdef STMMAC_XMIT_DEBUG
1449         if (netif_msg_pktdata(priv)) {
1450                 pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
1451                        "first=%p, nfrags=%d\n",
1452                        (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
1453                        entry, first, nfrags);
1454                 if (priv->extend_desc)
1455                         stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1456                 else
1457                         stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1458
1459                 pr_info(">>> frame to be transmitted: ");
1460                 print_pkt(skb->data, skb->len);
1461         }
1462 #endif
1463         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1464                 TX_DBG("%s: stop transmitted packets\n", __func__);
1465                 netif_stop_queue(dev);
1466         }
1467
1468         dev->stats.tx_bytes += skb->len;
1469
1470         skb_tx_timestamp(skb);
1471
1472         priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1473
1474         spin_unlock(&priv->tx_lock);
1475
1476         return NETDEV_TX_OK;
1477 }
1478
1479 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1480 {
1481         unsigned int rxsize = priv->dma_rx_size;
1482         int bfsize = priv->dma_buf_sz;
1483
1484         for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1485                 unsigned int entry = priv->dirty_rx % rxsize;
1486                 struct dma_desc *p;
1487
1488                 if (priv->extend_desc)
1489                         p = (struct dma_desc *) (priv->dma_erx + entry);
1490                 else
1491                         p = priv->dma_rx + entry;
1492
1493                 if (likely(priv->rx_skbuff[entry] == NULL)) {
1494                         struct sk_buff *skb;
1495
1496                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1497
1498                         if (unlikely(skb == NULL))
1499                                 break;
1500
1501                         priv->rx_skbuff[entry] = skb;
1502                         priv->rx_skbuff_dma[entry] =
1503                             dma_map_single(priv->device, skb->data, bfsize,
1504                                            DMA_FROM_DEVICE);
1505
1506                         p->des2 = priv->rx_skbuff_dma[entry];
1507
1508                         if (unlikely((priv->mode == STMMAC_RING_MODE) &&
1509                                      (priv->plat->has_gmac)))
1510                                 priv->hw->ring->refill_desc3(bfsize, p);
1511
1512                         RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1513                 }
1514                 wmb();
1515                 priv->hw->desc->set_rx_owner(p);
1516                 wmb();
1517         }
1518 }
1519
1520 static int stmmac_rx(struct stmmac_priv *priv, int limit)
1521 {
1522         unsigned int rxsize = priv->dma_rx_size;
1523         unsigned int entry = priv->cur_rx % rxsize;
1524         unsigned int next_entry;
1525         unsigned int count = 0;
1526
1527 #ifdef STMMAC_RX_DEBUG
1528         if (netif_msg_hw(priv)) {
1529                 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1530                 if (priv->extend_desc)
1531                         stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
1532                 else
1533                         stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
1534         }
1535 #endif
1536         while (count < limit) {
1537                 int status;
1538                 struct dma_desc *p, *p_next;
1539
1540                 if (priv->extend_desc)
1541                         p = (struct dma_desc *) (priv->dma_erx + entry);
1542                 else
1543                         p = priv->dma_rx + entry ;
1544
1545                 if (priv->hw->desc->get_rx_owner(p))
1546                         break;
1547
1548                 count++;
1549
1550                 next_entry = (++priv->cur_rx) % rxsize;
1551                 if (priv->extend_desc)
1552                         p_next = (struct dma_desc *) (priv->dma_erx +
1553                                                       next_entry);
1554                 else
1555                         p_next = priv->dma_rx + next_entry;
1556
1557                 prefetch(p_next);
1558
1559                 /* read the status of the incoming frame */
1560                 status = priv->hw->desc->rx_status(&priv->dev->stats,
1561                                                    &priv->xstats, p);
1562                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
1563                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
1564                                                            &priv->xstats,
1565                                                            priv->dma_erx +
1566                                                            entry);
1567                 if (unlikely(status == discard_frame))
1568                         priv->dev->stats.rx_errors++;
1569                 else {
1570                         struct sk_buff *skb;
1571                         int frame_len;
1572
1573                         frame_len = priv->hw->desc->get_rx_frame_len(p,
1574                                         priv->plat->rx_coe);
1575                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1576                          * Type frames (LLC/LLC-SNAP) */
1577                         if (unlikely(status != llc_snap))
1578                                 frame_len -= ETH_FCS_LEN;
1579 #ifdef STMMAC_RX_DEBUG
1580                         if (frame_len > ETH_FRAME_LEN)
1581                                 pr_debug("\tRX frame size %d, COE status: %d\n",
1582                                         frame_len, status);
1583
1584                         if (netif_msg_hw(priv))
1585                                 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
1586                                         p, entry, p->des2);
1587 #endif
1588                         skb = priv->rx_skbuff[entry];
1589                         if (unlikely(!skb)) {
1590                                 pr_err("%s: Inconsistent Rx descriptor chain\n",
1591                                         priv->dev->name);
1592                                 priv->dev->stats.rx_dropped++;
1593                                 break;
1594                         }
1595                         prefetch(skb->data - NET_IP_ALIGN);
1596                         priv->rx_skbuff[entry] = NULL;
1597
1598                         skb_put(skb, frame_len);
1599                         dma_unmap_single(priv->device,
1600                                          priv->rx_skbuff_dma[entry],
1601                                          priv->dma_buf_sz, DMA_FROM_DEVICE);
1602 #ifdef STMMAC_RX_DEBUG
1603                         if (netif_msg_pktdata(priv)) {
1604                                 pr_info(" frame received (%dbytes)", frame_len);
1605                                 print_pkt(skb->data, frame_len);
1606                         }
1607 #endif
1608                         skb->protocol = eth_type_trans(skb, priv->dev);
1609
1610                         if (unlikely(!priv->plat->rx_coe))
1611                                 skb_checksum_none_assert(skb);
1612                         else
1613                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1614
1615                         napi_gro_receive(&priv->napi, skb);
1616
1617                         priv->dev->stats.rx_packets++;
1618                         priv->dev->stats.rx_bytes += frame_len;
1619                 }
1620                 entry = next_entry;
1621         }
1622
1623         stmmac_rx_refill(priv);
1624
1625         priv->xstats.rx_pkt_n += count;
1626
1627         return count;
1628 }
1629
1630 /**
1631  *  stmmac_poll - stmmac poll method (NAPI)
1632  *  @napi : pointer to the napi structure.
1633  *  @budget : maximum number of packets that the current CPU can receive from
1634  *            all interfaces.
1635  *  Description :
1636  *  To look at the incoming frames and clear the tx resources.
1637  */
1638 static int stmmac_poll(struct napi_struct *napi, int budget)
1639 {
1640         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
1641         int work_done = 0;
1642
1643         priv->xstats.napi_poll++;
1644         stmmac_tx_clean(priv);
1645
1646         work_done = stmmac_rx(priv, budget);
1647         if (work_done < budget) {
1648                 napi_complete(napi);
1649                 stmmac_enable_dma_irq(priv);
1650         }
1651         return work_done;
1652 }
1653
1654 /**
1655  *  stmmac_tx_timeout
1656  *  @dev : Pointer to net device structure
1657  *  Description: this function is called when a packet transmission fails to
1658  *   complete within a reasonable time. The driver will mark the error in the
1659  *   netdev structure and arrange for the device to be reset to a sane state
1660  *   in order to transmit a new packet.
1661  */
1662 static void stmmac_tx_timeout(struct net_device *dev)
1663 {
1664         struct stmmac_priv *priv = netdev_priv(dev);
1665
1666         /* Clear Tx resources and restart transmitting again */
1667         stmmac_tx_err(priv);
1668 }
1669
1670 /* Configuration changes (passed on by ifconfig) */
1671 static int stmmac_config(struct net_device *dev, struct ifmap *map)
1672 {
1673         if (dev->flags & IFF_UP)        /* can't act on a running interface */
1674                 return -EBUSY;
1675
1676         /* Don't allow changing the I/O address */
1677         if (map->base_addr != dev->base_addr) {
1678                 pr_warning("%s: can't change I/O address\n", dev->name);
1679                 return -EOPNOTSUPP;
1680         }
1681
1682         /* Don't allow changing the IRQ */
1683         if (map->irq != dev->irq) {
1684                 pr_warning("%s: can't change IRQ number %d\n",
1685                        dev->name, dev->irq);
1686                 return -EOPNOTSUPP;
1687         }
1688
1689         /* ignore other fields */
1690         return 0;
1691 }
1692
1693 /**
1694  *  stmmac_set_rx_mode - entry point for multicast addressing
1695  *  @dev : pointer to the device structure
1696  *  Description:
1697  *  This function is a driver entry point which gets called by the kernel
1698  *  whenever multicast addresses must be enabled/disabled.
1699  *  Return value:
1700  *  void.
1701  */
1702 static void stmmac_set_rx_mode(struct net_device *dev)
1703 {
1704         struct stmmac_priv *priv = netdev_priv(dev);
1705
1706         spin_lock(&priv->lock);
1707         priv->hw->mac->set_filter(dev, priv->synopsys_id);
1708         spin_unlock(&priv->lock);
1709 }
1710
1711 /**
1712  *  stmmac_change_mtu - entry point to change MTU size for the device.
1713  *  @dev : device pointer.
1714  *  @new_mtu : the new MTU size for the device.
1715  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
1716  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
1717  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
1718  *  Return value:
1719  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1720  *  file on failure.
1721  */
1722 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1723 {
1724         struct stmmac_priv *priv = netdev_priv(dev);
1725         int max_mtu;
1726
1727         if (netif_running(dev)) {
1728                 pr_err("%s: must be stopped to change its MTU\n", dev->name);
1729                 return -EBUSY;
1730         }
1731
1732         if (priv->plat->enh_desc)
1733                 max_mtu = JUMBO_LEN;
1734         else
1735                 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
1736
1737         if ((new_mtu < 46) || (new_mtu > max_mtu)) {
1738                 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
1739                 return -EINVAL;
1740         }
1741
1742         dev->mtu = new_mtu;
1743         netdev_update_features(dev);
1744
1745         return 0;
1746 }
1747
1748 static netdev_features_t stmmac_fix_features(struct net_device *dev,
1749         netdev_features_t features)
1750 {
1751         struct stmmac_priv *priv = netdev_priv(dev);
1752
1753         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
1754                 features &= ~NETIF_F_RXCSUM;
1755         else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
1756                 features &= ~NETIF_F_IPV6_CSUM;
1757         if (!priv->plat->tx_coe)
1758                 features &= ~NETIF_F_ALL_CSUM;
1759
1760         /* Some GMAC devices have a bugged Jumbo frame support that
1761          * needs to have the Tx COE disabled for oversized frames
1762          * (due to limited buffer sizes). In this case we disable
1763          * the TX csum insertionin the TDES and not use SF. */
1764         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
1765                 features &= ~NETIF_F_ALL_CSUM;
1766
1767         return features;
1768 }
1769
1770 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1771 {
1772         struct net_device *dev = (struct net_device *)dev_id;
1773         struct stmmac_priv *priv = netdev_priv(dev);
1774
1775         if (unlikely(!dev)) {
1776                 pr_err("%s: invalid dev pointer\n", __func__);
1777                 return IRQ_NONE;
1778         }
1779
1780         /* To handle GMAC own interrupts */
1781         if (priv->plat->has_gmac) {
1782                 int status = priv->hw->mac->host_irq_status((void __iomem *)
1783                                                             dev->base_addr);
1784                 if (unlikely(status)) {
1785                         if (status & core_mmc_tx_irq)
1786                                 priv->xstats.mmc_tx_irq_n++;
1787                         if (status & core_mmc_rx_irq)
1788                                 priv->xstats.mmc_rx_irq_n++;
1789                         if (status & core_mmc_rx_csum_offload_irq)
1790                                 priv->xstats.mmc_rx_csum_offload_irq_n++;
1791                         if (status & core_irq_receive_pmt_irq)
1792                                 priv->xstats.irq_receive_pmt_irq_n++;
1793
1794                         /* For LPI we need to save the tx status */
1795                         if (status & core_irq_tx_path_in_lpi_mode) {
1796                                 priv->xstats.irq_tx_path_in_lpi_mode_n++;
1797                                 priv->tx_path_in_lpi_mode = true;
1798                         }
1799                         if (status & core_irq_tx_path_exit_lpi_mode) {
1800                                 priv->xstats.irq_tx_path_exit_lpi_mode_n++;
1801                                 priv->tx_path_in_lpi_mode = false;
1802                         }
1803                         if (status & core_irq_rx_path_in_lpi_mode)
1804                                 priv->xstats.irq_rx_path_in_lpi_mode_n++;
1805                         if (status & core_irq_rx_path_exit_lpi_mode)
1806                                 priv->xstats.irq_rx_path_exit_lpi_mode_n++;
1807                 }
1808         }
1809
1810         /* To handle DMA interrupts */
1811         stmmac_dma_interrupt(priv);
1812
1813         return IRQ_HANDLED;
1814 }
1815
1816 #ifdef CONFIG_NET_POLL_CONTROLLER
1817 /* Polling receive - used by NETCONSOLE and other diagnostic tools
1818  * to allow network I/O with interrupts disabled. */
1819 static void stmmac_poll_controller(struct net_device *dev)
1820 {
1821         disable_irq(dev->irq);
1822         stmmac_interrupt(dev->irq, dev);
1823         enable_irq(dev->irq);
1824 }
1825 #endif
1826
1827 /**
1828  *  stmmac_ioctl - Entry point for the Ioctl
1829  *  @dev: Device pointer.
1830  *  @rq: An IOCTL specefic structure, that can contain a pointer to
1831  *  a proprietary structure used to pass information to the driver.
1832  *  @cmd: IOCTL command
1833  *  Description:
1834  *  Currently there are no special functionality supported in IOCTL, just the
1835  *  phy_mii_ioctl(...) can be invoked.
1836  */
1837 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1838 {
1839         struct stmmac_priv *priv = netdev_priv(dev);
1840         int ret;
1841
1842         if (!netif_running(dev))
1843                 return -EINVAL;
1844
1845         if (!priv->phydev)
1846                 return -EINVAL;
1847
1848         ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1849
1850         return ret;
1851 }
1852
1853 #ifdef CONFIG_STMMAC_DEBUG_FS
1854 static struct dentry *stmmac_fs_dir;
1855 static struct dentry *stmmac_rings_status;
1856 static struct dentry *stmmac_dma_cap;
1857
1858 static void sysfs_display_ring(void *head, int size, int extend_desc,
1859                                 struct seq_file *seq)
1860 {
1861         int i;
1862         struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
1863         struct dma_desc *p = (struct dma_desc *) head;
1864
1865         for (i = 0; i < size; i++) {
1866                 u64 x;
1867                 if (extend_desc) {
1868                         x = *(u64 *) ep;
1869                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
1870                                    i, (unsigned int) virt_to_phys(ep),
1871                                    (unsigned int) x, (unsigned int) (x >> 32),
1872                                    ep->basic.des2, ep->basic.des3);
1873                         ep++;
1874                 } else {
1875                         x = *(u64 *) p;
1876                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
1877                                    i, (unsigned int) virt_to_phys(ep),
1878                                    (unsigned int) x, (unsigned int) (x >> 32),
1879                                    p->des2, p->des3);
1880                         p++;
1881                 }
1882                 seq_printf(seq, "\n");
1883         }
1884 }
1885
1886 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
1887 {
1888         struct net_device *dev = seq->private;
1889         struct stmmac_priv *priv = netdev_priv(dev);
1890         unsigned int txsize = priv->dma_tx_size;
1891         unsigned int rxsize = priv->dma_rx_size;
1892
1893         if (priv->extend_desc) {
1894                 seq_printf(seq, "Extended RX descriptor ring:\n");
1895                 sysfs_display_ring((void *) priv->dma_erx, rxsize, 1, seq);
1896                 seq_printf(seq, "Extended TX descriptor ring:\n");
1897                 sysfs_display_ring((void *) priv->dma_etx, txsize, 1, seq);
1898         } else {
1899                 seq_printf(seq, "RX descriptor ring:\n");
1900                 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
1901                 seq_printf(seq, "TX descriptor ring:\n");
1902                 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
1903         }
1904
1905         return 0;
1906 }
1907
1908 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
1909 {
1910         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
1911 }
1912
1913 static const struct file_operations stmmac_rings_status_fops = {
1914         .owner = THIS_MODULE,
1915         .open = stmmac_sysfs_ring_open,
1916         .read = seq_read,
1917         .llseek = seq_lseek,
1918         .release = single_release,
1919 };
1920
1921 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
1922 {
1923         struct net_device *dev = seq->private;
1924         struct stmmac_priv *priv = netdev_priv(dev);
1925
1926         if (!priv->hw_cap_support) {
1927                 seq_printf(seq, "DMA HW features not supported\n");
1928                 return 0;
1929         }
1930
1931         seq_printf(seq, "==============================\n");
1932         seq_printf(seq, "\tDMA HW features\n");
1933         seq_printf(seq, "==============================\n");
1934
1935         seq_printf(seq, "\t10/100 Mbps %s\n",
1936                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
1937         seq_printf(seq, "\t1000 Mbps %s\n",
1938                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
1939         seq_printf(seq, "\tHalf duple %s\n",
1940                    (priv->dma_cap.half_duplex) ? "Y" : "N");
1941         seq_printf(seq, "\tHash Filter: %s\n",
1942                    (priv->dma_cap.hash_filter) ? "Y" : "N");
1943         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
1944                    (priv->dma_cap.multi_addr) ? "Y" : "N");
1945         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
1946                    (priv->dma_cap.pcs) ? "Y" : "N");
1947         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
1948                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
1949         seq_printf(seq, "\tPMT Remote wake up: %s\n",
1950                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
1951         seq_printf(seq, "\tPMT Magic Frame: %s\n",
1952                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
1953         seq_printf(seq, "\tRMON module: %s\n",
1954                    (priv->dma_cap.rmon) ? "Y" : "N");
1955         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
1956                    (priv->dma_cap.time_stamp) ? "Y" : "N");
1957         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
1958                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
1959         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
1960                    (priv->dma_cap.eee) ? "Y" : "N");
1961         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
1962         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
1963                    (priv->dma_cap.tx_coe) ? "Y" : "N");
1964         seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
1965                    (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
1966         seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
1967                    (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
1968         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
1969                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
1970         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
1971                    priv->dma_cap.number_rx_channel);
1972         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
1973                    priv->dma_cap.number_tx_channel);
1974         seq_printf(seq, "\tEnhanced descriptors: %s\n",
1975                    (priv->dma_cap.enh_desc) ? "Y" : "N");
1976
1977         return 0;
1978 }
1979
1980 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
1981 {
1982         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
1983 }
1984
1985 static const struct file_operations stmmac_dma_cap_fops = {
1986         .owner = THIS_MODULE,
1987         .open = stmmac_sysfs_dma_cap_open,
1988         .read = seq_read,
1989         .llseek = seq_lseek,
1990         .release = single_release,
1991 };
1992
1993 static int stmmac_init_fs(struct net_device *dev)
1994 {
1995         /* Create debugfs entries */
1996         stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
1997
1998         if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
1999                 pr_err("ERROR %s, debugfs create directory failed\n",
2000                        STMMAC_RESOURCE_NAME);
2001
2002                 return -ENOMEM;
2003         }
2004
2005         /* Entry to report DMA RX/TX rings */
2006         stmmac_rings_status = debugfs_create_file("descriptors_status",
2007                                            S_IRUGO, stmmac_fs_dir, dev,
2008                                            &stmmac_rings_status_fops);
2009
2010         if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
2011                 pr_info("ERROR creating stmmac ring debugfs file\n");
2012                 debugfs_remove(stmmac_fs_dir);
2013
2014                 return -ENOMEM;
2015         }
2016
2017         /* Entry to report the DMA HW features */
2018         stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
2019                                              dev, &stmmac_dma_cap_fops);
2020
2021         if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
2022                 pr_info("ERROR creating stmmac MMC debugfs file\n");
2023                 debugfs_remove(stmmac_rings_status);
2024                 debugfs_remove(stmmac_fs_dir);
2025
2026                 return -ENOMEM;
2027         }
2028
2029         return 0;
2030 }
2031
2032 static void stmmac_exit_fs(void)
2033 {
2034         debugfs_remove(stmmac_rings_status);
2035         debugfs_remove(stmmac_dma_cap);
2036         debugfs_remove(stmmac_fs_dir);
2037 }
2038 #endif /* CONFIG_STMMAC_DEBUG_FS */
2039
2040 static const struct net_device_ops stmmac_netdev_ops = {
2041         .ndo_open = stmmac_open,
2042         .ndo_start_xmit = stmmac_xmit,
2043         .ndo_stop = stmmac_release,
2044         .ndo_change_mtu = stmmac_change_mtu,
2045         .ndo_fix_features = stmmac_fix_features,
2046         .ndo_set_rx_mode = stmmac_set_rx_mode,
2047         .ndo_tx_timeout = stmmac_tx_timeout,
2048         .ndo_do_ioctl = stmmac_ioctl,
2049         .ndo_set_config = stmmac_config,
2050 #ifdef CONFIG_NET_POLL_CONTROLLER
2051         .ndo_poll_controller = stmmac_poll_controller,
2052 #endif
2053         .ndo_set_mac_address = eth_mac_addr,
2054 };
2055
2056 /**
2057  *  stmmac_hw_init - Init the MAC device
2058  *  @priv : pointer to the private device structure.
2059  *  Description: this function detects which MAC device
2060  *  (GMAC/MAC10-100) has to attached, checks the HW capability
2061  *  (if supported) and sets the driver's features (for example
2062  *  to use the ring or chaine mode or support the normal/enh
2063  *  descriptor structure).
2064  */
2065 static int stmmac_hw_init(struct stmmac_priv *priv)
2066 {
2067         int ret;
2068         struct mac_device_info *mac;
2069
2070         /* Identify the MAC HW device */
2071         if (priv->plat->has_gmac) {
2072                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
2073                 mac = dwmac1000_setup(priv->ioaddr);
2074         } else {
2075                 mac = dwmac100_setup(priv->ioaddr);
2076         }
2077         if (!mac)
2078                 return -ENOMEM;
2079
2080         priv->hw = mac;
2081
2082         /* Get and dump the chip ID */
2083         priv->synopsys_id = stmmac_get_synopsys_id(priv);
2084
2085         /* To use alternate (extended) or normal descriptor structures */
2086         stmmac_selec_desc_mode(priv);
2087
2088         /* To use the chained or ring mode */
2089         if (chain_mode) {
2090                 priv->hw->chain = &chain_mode_ops;
2091                 pr_info(" Chain mode enabled\n");
2092                 priv->mode = STMMAC_CHAIN_MODE;
2093         } else {
2094                 priv->hw->ring = &ring_mode_ops;
2095                 pr_info(" Ring mode enabled\n");
2096                 priv->mode = STMMAC_RING_MODE;
2097         }
2098
2099         /* Get the HW capability (new GMAC newer than 3.50a) */
2100         priv->hw_cap_support = stmmac_get_hw_features(priv);
2101         if (priv->hw_cap_support) {
2102                 pr_info(" DMA HW capability register supported");
2103
2104                 /* We can override some gmac/dma configuration fields: e.g.
2105                  * enh_desc, tx_coe (e.g. that are passed through the
2106                  * platform) with the values from the HW capability
2107                  * register (if supported).
2108                  */
2109                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2110                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2111
2112                 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2113
2114                 if (priv->dma_cap.rx_coe_type2)
2115                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2116                 else if (priv->dma_cap.rx_coe_type1)
2117                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2118
2119         } else
2120                 pr_info(" No HW DMA feature register supported");
2121
2122         /* Enable the IPC (Checksum Offload) and check if the feature has been
2123          * enabled during the core configuration. */
2124         ret = priv->hw->mac->rx_ipc(priv->ioaddr);
2125         if (!ret) {
2126                 pr_warning(" RX IPC Checksum Offload not configured.\n");
2127                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2128         }
2129
2130         if (priv->plat->rx_coe)
2131                 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2132                         priv->plat->rx_coe);
2133         if (priv->plat->tx_coe)
2134                 pr_info(" TX Checksum insertion supported\n");
2135
2136         if (priv->plat->pmt) {
2137                 pr_info(" Wake-Up On Lan supported\n");
2138                 device_set_wakeup_capable(priv->device, 1);
2139         }
2140
2141         return 0;
2142 }
2143
2144 /**
2145  * stmmac_dvr_probe
2146  * @device: device pointer
2147  * @plat_dat: platform data pointer
2148  * @addr: iobase memory address
2149  * Description: this is the main probe function used to
2150  * call the alloc_etherdev, allocate the priv structure.
2151  */
2152 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2153                                      struct plat_stmmacenet_data *plat_dat,
2154                                      void __iomem *addr)
2155 {
2156         int ret = 0;
2157         struct net_device *ndev = NULL;
2158         struct stmmac_priv *priv;
2159
2160         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2161         if (!ndev)
2162                 return NULL;
2163
2164         SET_NETDEV_DEV(ndev, device);
2165
2166         priv = netdev_priv(ndev);
2167         priv->device = device;
2168         priv->dev = ndev;
2169
2170         ether_setup(ndev);
2171
2172         stmmac_set_ethtool_ops(ndev);
2173         priv->pause = pause;
2174         priv->plat = plat_dat;
2175         priv->ioaddr = addr;
2176         priv->dev->base_addr = (unsigned long)addr;
2177
2178         /* Verify driver arguments */
2179         stmmac_verify_args();
2180
2181         /* Override with kernel parameters if supplied XXX CRS XXX
2182          * this needs to have multiple instances */
2183         if ((phyaddr >= 0) && (phyaddr <= 31))
2184                 priv->plat->phy_addr = phyaddr;
2185
2186         /* Init MAC and get the capabilities */
2187         ret = stmmac_hw_init(priv);
2188         if (ret)
2189                 goto error_free_netdev;
2190
2191         ndev->netdev_ops = &stmmac_netdev_ops;
2192
2193         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2194                             NETIF_F_RXCSUM;
2195         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2196         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2197 #ifdef STMMAC_VLAN_TAG_USED
2198         /* Both mac100 and gmac support receive VLAN tag detection */
2199         ndev->features |= NETIF_F_HW_VLAN_RX;
2200 #endif
2201         priv->msg_enable = netif_msg_init(debug, default_msg_level);
2202
2203         if (flow_ctrl)
2204                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
2205
2206         /* Rx Watchdog is available in the COREs newer than the 3.40.
2207          * In some case, for example on bugged HW this feature
2208          * has to be disable and this can be done by passing the
2209          * riwt_off field from the platform.
2210          */
2211         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2212                 priv->use_riwt = 1;
2213                 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2214         }
2215
2216         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
2217
2218         spin_lock_init(&priv->lock);
2219         spin_lock_init(&priv->tx_lock);
2220
2221         ret = register_netdev(ndev);
2222         if (ret) {
2223                 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2224                 goto error_netdev_register;
2225         }
2226
2227         priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
2228         if (IS_ERR(priv->stmmac_clk)) {
2229                 pr_warning("%s: warning: cannot get CSR clock\n", __func__);
2230                 goto error_clk_get;
2231         }
2232
2233         /* If a specific clk_csr value is passed from the platform
2234          * this means that the CSR Clock Range selection cannot be
2235          * changed at run-time and it is fixed. Viceversa the driver'll try to
2236          * set the MDC clock dynamically according to the csr actual
2237          * clock input.
2238          */
2239         if (!priv->plat->clk_csr)
2240                 stmmac_clk_csr_set(priv);
2241         else
2242                 priv->clk_csr = priv->plat->clk_csr;
2243
2244         /* MDIO bus Registration */
2245         ret = stmmac_mdio_register(ndev);
2246         if (ret < 0) {
2247                 pr_debug("%s: MDIO bus (id: %d) registration failed",
2248                          __func__, priv->plat->bus_id);
2249                 goto error_mdio_register;
2250         }
2251
2252         return priv;
2253
2254 error_mdio_register:
2255         clk_put(priv->stmmac_clk);
2256 error_clk_get:
2257         unregister_netdev(ndev);
2258 error_netdev_register:
2259         netif_napi_del(&priv->napi);
2260 error_free_netdev:
2261         free_netdev(ndev);
2262
2263         return NULL;
2264 }
2265
2266 /**
2267  * stmmac_dvr_remove
2268  * @ndev: net device pointer
2269  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2270  * changes the link status, releases the DMA descriptor rings.
2271  */
2272 int stmmac_dvr_remove(struct net_device *ndev)
2273 {
2274         struct stmmac_priv *priv = netdev_priv(ndev);
2275
2276         pr_info("%s:\n\tremoving driver", __func__);
2277
2278         priv->hw->dma->stop_rx(priv->ioaddr);
2279         priv->hw->dma->stop_tx(priv->ioaddr);
2280
2281         stmmac_set_mac(priv->ioaddr, false);
2282         stmmac_mdio_unregister(ndev);
2283         netif_carrier_off(ndev);
2284         unregister_netdev(ndev);
2285         free_netdev(ndev);
2286
2287         return 0;
2288 }
2289
2290 #ifdef CONFIG_PM
2291 int stmmac_suspend(struct net_device *ndev)
2292 {
2293         struct stmmac_priv *priv = netdev_priv(ndev);
2294         unsigned long flags;
2295
2296         if (!ndev || !netif_running(ndev))
2297                 return 0;
2298
2299         if (priv->phydev)
2300                 phy_stop(priv->phydev);
2301
2302         spin_lock_irqsave(&priv->lock, flags);
2303
2304         netif_device_detach(ndev);
2305         netif_stop_queue(ndev);
2306
2307         napi_disable(&priv->napi);
2308
2309         /* Stop TX/RX DMA */
2310         priv->hw->dma->stop_tx(priv->ioaddr);
2311         priv->hw->dma->stop_rx(priv->ioaddr);
2312
2313         stmmac_clear_descriptors(priv);
2314
2315         /* Enable Power down mode by programming the PMT regs */
2316         if (device_may_wakeup(priv->device))
2317                 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
2318         else {
2319                 stmmac_set_mac(priv->ioaddr, false);
2320                 /* Disable clock in case of PWM is off */
2321                 clk_disable_unprepare(priv->stmmac_clk);
2322         }
2323         spin_unlock_irqrestore(&priv->lock, flags);
2324         return 0;
2325 }
2326
2327 int stmmac_resume(struct net_device *ndev)
2328 {
2329         struct stmmac_priv *priv = netdev_priv(ndev);
2330         unsigned long flags;
2331
2332         if (!netif_running(ndev))
2333                 return 0;
2334
2335         spin_lock_irqsave(&priv->lock, flags);
2336
2337         /* Power Down bit, into the PM register, is cleared
2338          * automatically as soon as a magic packet or a Wake-up frame
2339          * is received. Anyway, it's better to manually clear
2340          * this bit because it can generate problems while resuming
2341          * from another devices (e.g. serial console). */
2342         if (device_may_wakeup(priv->device))
2343                 priv->hw->mac->pmt(priv->ioaddr, 0);
2344         else
2345                 /* enable the clk prevously disabled */
2346                 clk_prepare_enable(priv->stmmac_clk);
2347
2348         netif_device_attach(ndev);
2349
2350         /* Enable the MAC and DMA */
2351         stmmac_set_mac(priv->ioaddr, true);
2352         priv->hw->dma->start_tx(priv->ioaddr);
2353         priv->hw->dma->start_rx(priv->ioaddr);
2354
2355         napi_enable(&priv->napi);
2356
2357         netif_start_queue(ndev);
2358
2359         spin_unlock_irqrestore(&priv->lock, flags);
2360
2361         if (priv->phydev)
2362                 phy_start(priv->phydev);
2363
2364         return 0;
2365 }
2366
2367 int stmmac_freeze(struct net_device *ndev)
2368 {
2369         if (!ndev || !netif_running(ndev))
2370                 return 0;
2371
2372         return stmmac_release(ndev);
2373 }
2374
2375 int stmmac_restore(struct net_device *ndev)
2376 {
2377         if (!ndev || !netif_running(ndev))
2378                 return 0;
2379
2380         return stmmac_open(ndev);
2381 }
2382 #endif /* CONFIG_PM */
2383
2384 /* Driver can be configured w/ and w/ both PCI and Platf drivers
2385  * depending on the configuration selected.
2386  */
2387 static int __init stmmac_init(void)
2388 {
2389         int ret;
2390
2391         ret = stmmac_register_platform();
2392         if (ret)
2393                 goto err;
2394         ret = stmmac_register_pci();
2395         if (ret)
2396                 goto err_pci;
2397         return 0;
2398 err_pci:
2399         stmmac_unregister_platform();
2400 err:
2401         pr_err("stmmac: driver registration failed\n");
2402         return ret;
2403 }
2404
2405 static void __exit stmmac_exit(void)
2406 {
2407         stmmac_unregister_platform();
2408         stmmac_unregister_pci();
2409 }
2410
2411 module_init(stmmac_init);
2412 module_exit(stmmac_exit);
2413
2414 #ifndef MODULE
2415 static int __init stmmac_cmdline_opt(char *str)
2416 {
2417         char *opt;
2418
2419         if (!str || !*str)
2420                 return -EINVAL;
2421         while ((opt = strsep(&str, ",")) != NULL) {
2422                 if (!strncmp(opt, "debug:", 6)) {
2423                         if (kstrtoint(opt + 6, 0, &debug))
2424                                 goto err;
2425                 } else if (!strncmp(opt, "phyaddr:", 8)) {
2426                         if (kstrtoint(opt + 8, 0, &phyaddr))
2427                                 goto err;
2428                 } else if (!strncmp(opt, "dma_txsize:", 11)) {
2429                         if (kstrtoint(opt + 11, 0, &dma_txsize))
2430                                 goto err;
2431                 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
2432                         if (kstrtoint(opt + 11, 0, &dma_rxsize))
2433                                 goto err;
2434                 } else if (!strncmp(opt, "buf_sz:", 7)) {
2435                         if (kstrtoint(opt + 7, 0, &buf_sz))
2436                                 goto err;
2437                 } else if (!strncmp(opt, "tc:", 3)) {
2438                         if (kstrtoint(opt + 3, 0, &tc))
2439                                 goto err;
2440                 } else if (!strncmp(opt, "watchdog:", 9)) {
2441                         if (kstrtoint(opt + 9, 0, &watchdog))
2442                                 goto err;
2443                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
2444                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
2445                                 goto err;
2446                 } else if (!strncmp(opt, "pause:", 6)) {
2447                         if (kstrtoint(opt + 6, 0, &pause))
2448                                 goto err;
2449                 } else if (!strncmp(opt, "eee_timer:", 10)) {
2450                         if (kstrtoint(opt + 10, 0, &eee_timer))
2451                                 goto err;
2452                 } else if (!strncmp(opt, "chain_mode:", 11)) {
2453                         if (kstrtoint(opt + 11, 0, &chain_mode))
2454                                 goto err;
2455                 }
2456         }
2457         return 0;
2458
2459 err:
2460         pr_err("%s: ERROR broken module parameter conversion", __func__);
2461         return -EINVAL;
2462 }
2463
2464 __setup("stmmaceth=", stmmac_cmdline_opt);
2465 #endif
2466
2467 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
2468 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
2469 MODULE_LICENSE("GPL");