1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
93 #include <asm/mpc85xx.h>
96 #include <asm/uaccess.h>
97 #include <linux/module.h>
98 #include <linux/dma-mapping.h>
99 #include <linux/crc32.h>
100 #include <linux/mii.h>
101 #include <linux/phy.h>
102 #include <linux/phy_fixed.h>
103 #include <linux/of.h>
104 #include <linux/of_net.h>
105 #include <linux/of_address.h>
106 #include <linux/of_irq.h>
110 #define TX_TIMEOUT (1*HZ)
112 const char gfar_driver_version[] = "1.3";
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_reset_task(struct work_struct *work);
117 static void gfar_timeout(struct net_device *dev);
118 static int gfar_close(struct net_device *dev);
119 static struct sk_buff *gfar_new_skb(struct net_device *dev,
120 dma_addr_t *bufaddr);
121 static int gfar_set_mac_address(struct net_device *dev);
122 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
123 static irqreturn_t gfar_error(int irq, void *dev_id);
124 static irqreturn_t gfar_transmit(int irq, void *dev_id);
125 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
126 static void adjust_link(struct net_device *dev);
127 static noinline void gfar_update_link_state(struct gfar_private *priv);
128 static int init_phy(struct net_device *dev);
129 static int gfar_probe(struct platform_device *ofdev);
130 static int gfar_remove(struct platform_device *ofdev);
131 static void free_skb_resources(struct gfar_private *priv);
132 static void gfar_set_multi(struct net_device *dev);
133 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
134 static void gfar_configure_serdes(struct net_device *dev);
135 static int gfar_poll_rx(struct napi_struct *napi, int budget);
136 static int gfar_poll_tx(struct napi_struct *napi, int budget);
137 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
139 #ifdef CONFIG_NET_POLL_CONTROLLER
140 static void gfar_netpoll(struct net_device *dev);
142 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
143 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
144 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
145 int amount_pull, struct napi_struct *napi);
146 static void gfar_halt_nodisable(struct gfar_private *priv);
147 static void gfar_clear_exact_match(struct net_device *dev);
148 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
150 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
152 MODULE_AUTHOR("Freescale Semiconductor, Inc");
153 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
154 MODULE_LICENSE("GPL");
156 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
161 bdp->bufPtr = cpu_to_be32(buf);
163 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
164 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
165 lstatus |= BD_LFLAG(RXBD_WRAP);
169 bdp->lstatus = cpu_to_be32(lstatus);
172 static int gfar_init_bds(struct net_device *ndev)
174 struct gfar_private *priv = netdev_priv(ndev);
175 struct gfar __iomem *regs = priv->gfargrp[0].regs;
176 struct gfar_priv_tx_q *tx_queue = NULL;
177 struct gfar_priv_rx_q *rx_queue = NULL;
184 for (i = 0; i < priv->num_tx_queues; i++) {
185 tx_queue = priv->tx_queue[i];
186 /* Initialize some variables in our dev structure */
187 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
188 tx_queue->dirty_tx = tx_queue->tx_bd_base;
189 tx_queue->cur_tx = tx_queue->tx_bd_base;
190 tx_queue->skb_curtx = 0;
191 tx_queue->skb_dirtytx = 0;
193 /* Initialize Transmit Descriptor Ring */
194 txbdp = tx_queue->tx_bd_base;
195 for (j = 0; j < tx_queue->tx_ring_size; j++) {
201 /* Set the last descriptor in the ring to indicate wrap */
203 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
207 rfbptr = ®s->rfbptr0;
208 for (i = 0; i < priv->num_rx_queues; i++) {
209 rx_queue = priv->rx_queue[i];
210 rx_queue->cur_rx = rx_queue->rx_bd_base;
211 rx_queue->skb_currx = 0;
212 rxbdp = rx_queue->rx_bd_base;
214 for (j = 0; j < rx_queue->rx_ring_size; j++) {
215 struct sk_buff *skb = rx_queue->rx_skbuff[j];
218 bufaddr = be32_to_cpu(rxbdp->bufPtr);
220 skb = gfar_new_skb(ndev, &bufaddr);
222 netdev_err(ndev, "Can't allocate RX buffers\n");
225 rx_queue->rx_skbuff[j] = skb;
228 gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
232 rx_queue->rfbptr = rfbptr;
239 static int gfar_alloc_skb_resources(struct net_device *ndev)
244 struct gfar_private *priv = netdev_priv(ndev);
245 struct device *dev = priv->dev;
246 struct gfar_priv_tx_q *tx_queue = NULL;
247 struct gfar_priv_rx_q *rx_queue = NULL;
249 priv->total_tx_ring_size = 0;
250 for (i = 0; i < priv->num_tx_queues; i++)
251 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
253 priv->total_rx_ring_size = 0;
254 for (i = 0; i < priv->num_rx_queues; i++)
255 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
257 /* Allocate memory for the buffer descriptors */
258 vaddr = dma_alloc_coherent(dev,
259 (priv->total_tx_ring_size *
260 sizeof(struct txbd8)) +
261 (priv->total_rx_ring_size *
262 sizeof(struct rxbd8)),
267 for (i = 0; i < priv->num_tx_queues; i++) {
268 tx_queue = priv->tx_queue[i];
269 tx_queue->tx_bd_base = vaddr;
270 tx_queue->tx_bd_dma_base = addr;
271 tx_queue->dev = ndev;
272 /* enet DMA only understands physical addresses */
273 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
274 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
277 /* Start the rx descriptor ring where the tx ring leaves off */
278 for (i = 0; i < priv->num_rx_queues; i++) {
279 rx_queue = priv->rx_queue[i];
280 rx_queue->rx_bd_base = vaddr;
281 rx_queue->rx_bd_dma_base = addr;
282 rx_queue->dev = ndev;
283 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
284 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
287 /* Setup the skbuff rings */
288 for (i = 0; i < priv->num_tx_queues; i++) {
289 tx_queue = priv->tx_queue[i];
290 tx_queue->tx_skbuff =
291 kmalloc_array(tx_queue->tx_ring_size,
292 sizeof(*tx_queue->tx_skbuff),
294 if (!tx_queue->tx_skbuff)
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff =
304 kmalloc_array(rx_queue->rx_ring_size,
305 sizeof(*rx_queue->rx_skbuff),
307 if (!rx_queue->rx_skbuff)
310 for (j = 0; j < rx_queue->rx_ring_size; j++)
311 rx_queue->rx_skbuff[j] = NULL;
314 if (gfar_init_bds(ndev))
320 free_skb_resources(priv);
324 static void gfar_init_tx_rx_base(struct gfar_private *priv)
326 struct gfar __iomem *regs = priv->gfargrp[0].regs;
330 baddr = ®s->tbase0;
331 for (i = 0; i < priv->num_tx_queues; i++) {
332 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr = ®s->rbase0;
337 for (i = 0; i < priv->num_rx_queues; i++) {
338 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
343 static void gfar_init_rqprm(struct gfar_private *priv)
345 struct gfar __iomem *regs = priv->gfargrp[0].regs;
349 baddr = ®s->rqprm0;
350 for (i = 0; i < priv->num_rx_queues; i++) {
351 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
352 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
357 static void gfar_rx_buff_size_config(struct gfar_private *priv)
359 int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
361 /* set this when rx hw offload (TOE) functions are being used */
362 priv->uses_rxfcb = 0;
364 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
365 priv->uses_rxfcb = 1;
367 if (priv->hwts_rx_en)
368 priv->uses_rxfcb = 1;
370 if (priv->uses_rxfcb)
371 frame_size += GMAC_FCB_LEN;
373 frame_size += priv->padding;
375 frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
376 INCREMENTAL_BUFFER_SIZE;
378 priv->rx_buffer_size = frame_size;
381 static void gfar_mac_rx_config(struct gfar_private *priv)
383 struct gfar __iomem *regs = priv->gfargrp[0].regs;
386 if (priv->rx_filer_enable) {
387 rctrl |= RCTRL_FILREN;
388 /* Program the RIR0 reg with the required distribution */
389 if (priv->poll_mode == GFAR_SQ_POLLING)
390 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
391 else /* GFAR_MQ_POLLING */
392 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);
395 /* Restore PROMISC mode */
396 if (priv->ndev->flags & IFF_PROMISC)
399 if (priv->ndev->features & NETIF_F_RXCSUM)
400 rctrl |= RCTRL_CHECKSUMMING;
402 if (priv->extended_hash)
403 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
406 rctrl &= ~RCTRL_PAL_MASK;
407 rctrl |= RCTRL_PADDING(priv->padding);
410 /* Enable HW time stamping if requested from user space */
411 if (priv->hwts_rx_en)
412 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
414 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
415 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
417 /* Clear the LFC bit */
418 gfar_write(®s->rctrl, rctrl);
419 /* Init flow control threshold values */
420 gfar_init_rqprm(priv);
421 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
424 /* Init rctrl based on our settings */
425 gfar_write(®s->rctrl, rctrl);
428 static void gfar_mac_tx_config(struct gfar_private *priv)
430 struct gfar __iomem *regs = priv->gfargrp[0].regs;
433 if (priv->ndev->features & NETIF_F_IP_CSUM)
434 tctrl |= TCTRL_INIT_CSUM;
436 if (priv->prio_sched_en)
437 tctrl |= TCTRL_TXSCHED_PRIO;
439 tctrl |= TCTRL_TXSCHED_WRRS;
440 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
441 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
444 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
445 tctrl |= TCTRL_VLINS;
447 gfar_write(®s->tctrl, tctrl);
450 static void gfar_configure_coalescing(struct gfar_private *priv,
451 unsigned long tx_mask, unsigned long rx_mask)
453 struct gfar __iomem *regs = priv->gfargrp[0].regs;
456 if (priv->mode == MQ_MG_MODE) {
459 baddr = ®s->txic0;
460 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
461 gfar_write(baddr + i, 0);
462 if (likely(priv->tx_queue[i]->txcoalescing))
463 gfar_write(baddr + i, priv->tx_queue[i]->txic);
466 baddr = ®s->rxic0;
467 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
468 gfar_write(baddr + i, 0);
469 if (likely(priv->rx_queue[i]->rxcoalescing))
470 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
473 /* Backward compatible case -- even if we enable
474 * multiple queues, there's only single reg to program
476 gfar_write(®s->txic, 0);
477 if (likely(priv->tx_queue[0]->txcoalescing))
478 gfar_write(®s->txic, priv->tx_queue[0]->txic);
480 gfar_write(®s->rxic, 0);
481 if (unlikely(priv->rx_queue[0]->rxcoalescing))
482 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
486 void gfar_configure_coalescing_all(struct gfar_private *priv)
488 gfar_configure_coalescing(priv, 0xFF, 0xFF);
491 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
493 struct gfar_private *priv = netdev_priv(dev);
494 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
495 unsigned long tx_packets = 0, tx_bytes = 0;
498 for (i = 0; i < priv->num_rx_queues; i++) {
499 rx_packets += priv->rx_queue[i]->stats.rx_packets;
500 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
501 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
504 dev->stats.rx_packets = rx_packets;
505 dev->stats.rx_bytes = rx_bytes;
506 dev->stats.rx_dropped = rx_dropped;
508 for (i = 0; i < priv->num_tx_queues; i++) {
509 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
510 tx_packets += priv->tx_queue[i]->stats.tx_packets;
513 dev->stats.tx_bytes = tx_bytes;
514 dev->stats.tx_packets = tx_packets;
519 static int gfar_set_mac_addr(struct net_device *dev, void *p)
521 eth_mac_addr(dev, p);
523 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
528 static const struct net_device_ops gfar_netdev_ops = {
529 .ndo_open = gfar_enet_open,
530 .ndo_start_xmit = gfar_start_xmit,
531 .ndo_stop = gfar_close,
532 .ndo_change_mtu = gfar_change_mtu,
533 .ndo_set_features = gfar_set_features,
534 .ndo_set_rx_mode = gfar_set_multi,
535 .ndo_tx_timeout = gfar_timeout,
536 .ndo_do_ioctl = gfar_ioctl,
537 .ndo_get_stats = gfar_get_stats,
538 .ndo_set_mac_address = gfar_set_mac_addr,
539 .ndo_validate_addr = eth_validate_addr,
540 #ifdef CONFIG_NET_POLL_CONTROLLER
541 .ndo_poll_controller = gfar_netpoll,
545 static void gfar_ints_disable(struct gfar_private *priv)
548 for (i = 0; i < priv->num_grps; i++) {
549 struct gfar __iomem *regs = priv->gfargrp[i].regs;
551 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
553 /* Initialize IMASK */
554 gfar_write(®s->imask, IMASK_INIT_CLEAR);
558 static void gfar_ints_enable(struct gfar_private *priv)
561 for (i = 0; i < priv->num_grps; i++) {
562 struct gfar __iomem *regs = priv->gfargrp[i].regs;
563 /* Unmask the interrupts we look for */
564 gfar_write(®s->imask, IMASK_DEFAULT);
568 static int gfar_alloc_tx_queues(struct gfar_private *priv)
572 for (i = 0; i < priv->num_tx_queues; i++) {
573 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
575 if (!priv->tx_queue[i])
578 priv->tx_queue[i]->tx_skbuff = NULL;
579 priv->tx_queue[i]->qindex = i;
580 priv->tx_queue[i]->dev = priv->ndev;
581 spin_lock_init(&(priv->tx_queue[i]->txlock));
586 static int gfar_alloc_rx_queues(struct gfar_private *priv)
590 for (i = 0; i < priv->num_rx_queues; i++) {
591 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
593 if (!priv->rx_queue[i])
596 priv->rx_queue[i]->rx_skbuff = NULL;
597 priv->rx_queue[i]->qindex = i;
598 priv->rx_queue[i]->dev = priv->ndev;
603 static void gfar_free_tx_queues(struct gfar_private *priv)
607 for (i = 0; i < priv->num_tx_queues; i++)
608 kfree(priv->tx_queue[i]);
611 static void gfar_free_rx_queues(struct gfar_private *priv)
615 for (i = 0; i < priv->num_rx_queues; i++)
616 kfree(priv->rx_queue[i]);
619 static void unmap_group_regs(struct gfar_private *priv)
623 for (i = 0; i < MAXGROUPS; i++)
624 if (priv->gfargrp[i].regs)
625 iounmap(priv->gfargrp[i].regs);
628 static void free_gfar_dev(struct gfar_private *priv)
632 for (i = 0; i < priv->num_grps; i++)
633 for (j = 0; j < GFAR_NUM_IRQS; j++) {
634 kfree(priv->gfargrp[i].irqinfo[j]);
635 priv->gfargrp[i].irqinfo[j] = NULL;
638 free_netdev(priv->ndev);
641 static void disable_napi(struct gfar_private *priv)
645 for (i = 0; i < priv->num_grps; i++) {
646 napi_disable(&priv->gfargrp[i].napi_rx);
647 napi_disable(&priv->gfargrp[i].napi_tx);
651 static void enable_napi(struct gfar_private *priv)
655 for (i = 0; i < priv->num_grps; i++) {
656 napi_enable(&priv->gfargrp[i].napi_rx);
657 napi_enable(&priv->gfargrp[i].napi_tx);
661 static int gfar_parse_group(struct device_node *np,
662 struct gfar_private *priv, const char *model)
664 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
667 for (i = 0; i < GFAR_NUM_IRQS; i++) {
668 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
670 if (!grp->irqinfo[i])
674 grp->regs = of_iomap(np, 0);
678 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
680 /* If we aren't the FEC we have multiple interrupts */
681 if (model && strcasecmp(model, "FEC")) {
682 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
683 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
684 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
685 gfar_irq(grp, RX)->irq == NO_IRQ ||
686 gfar_irq(grp, ER)->irq == NO_IRQ)
691 spin_lock_init(&grp->grplock);
692 if (priv->mode == MQ_MG_MODE) {
693 u32 rxq_mask, txq_mask;
696 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
697 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
699 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
701 grp->rx_bit_map = rxq_mask ?
702 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
705 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
707 grp->tx_bit_map = txq_mask ?
708 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
711 if (priv->poll_mode == GFAR_SQ_POLLING) {
712 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
713 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
714 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
717 grp->rx_bit_map = 0xFF;
718 grp->tx_bit_map = 0xFF;
721 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
722 * right to left, so we need to revert the 8 bits to get the q index
724 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
725 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
727 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
728 * also assign queues to groups
730 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
732 grp->rx_queue = priv->rx_queue[i];
733 grp->num_rx_queues++;
734 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
735 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
736 priv->rx_queue[i]->grp = grp;
739 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
741 grp->tx_queue = priv->tx_queue[i];
742 grp->num_tx_queues++;
743 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
744 priv->tqueue |= (TQUEUE_EN0 >> i);
745 priv->tx_queue[i]->grp = grp;
753 static int gfar_of_group_count(struct device_node *np)
755 struct device_node *child;
758 for_each_available_child_of_node(np, child)
759 if (!of_node_cmp(child->name, "queue-group"))
765 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
769 const void *mac_addr;
771 struct net_device *dev = NULL;
772 struct gfar_private *priv = NULL;
773 struct device_node *np = ofdev->dev.of_node;
774 struct device_node *child = NULL;
775 struct property *stash;
778 unsigned int num_tx_qs, num_rx_qs;
779 unsigned short mode, poll_mode;
784 if (of_device_is_compatible(np, "fsl,etsec2")) {
786 poll_mode = GFAR_SQ_POLLING;
789 poll_mode = GFAR_SQ_POLLING;
792 if (mode == SQ_SG_MODE) {
795 } else { /* MQ_MG_MODE */
796 /* get the actual number of supported groups */
797 unsigned int num_grps = gfar_of_group_count(np);
799 if (num_grps == 0 || num_grps > MAXGROUPS) {
800 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
802 pr_err("Cannot do alloc_etherdev, aborting\n");
806 if (poll_mode == GFAR_SQ_POLLING) {
807 num_tx_qs = num_grps; /* one txq per int group */
808 num_rx_qs = num_grps; /* one rxq per int group */
809 } else { /* GFAR_MQ_POLLING */
810 u32 tx_queues, rx_queues;
813 /* parse the num of HW tx and rx queues */
814 ret = of_property_read_u32(np, "fsl,num_tx_queues",
816 num_tx_qs = ret ? 1 : tx_queues;
818 ret = of_property_read_u32(np, "fsl,num_rx_queues",
820 num_rx_qs = ret ? 1 : rx_queues;
824 if (num_tx_qs > MAX_TX_QS) {
825 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
826 num_tx_qs, MAX_TX_QS);
827 pr_err("Cannot do alloc_etherdev, aborting\n");
831 if (num_rx_qs > MAX_RX_QS) {
832 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
833 num_rx_qs, MAX_RX_QS);
834 pr_err("Cannot do alloc_etherdev, aborting\n");
838 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
843 priv = netdev_priv(dev);
847 priv->poll_mode = poll_mode;
849 priv->num_tx_queues = num_tx_qs;
850 netif_set_real_num_rx_queues(dev, num_rx_qs);
851 priv->num_rx_queues = num_rx_qs;
853 err = gfar_alloc_tx_queues(priv);
855 goto tx_alloc_failed;
857 err = gfar_alloc_rx_queues(priv);
859 goto rx_alloc_failed;
861 err = of_property_read_string(np, "model", &model);
863 pr_err("Device model property missing, aborting\n");
864 goto rx_alloc_failed;
867 /* Init Rx queue filer rule set linked list */
868 INIT_LIST_HEAD(&priv->rx_list.list);
869 priv->rx_list.count = 0;
870 mutex_init(&priv->rx_queue_access);
872 for (i = 0; i < MAXGROUPS; i++)
873 priv->gfargrp[i].regs = NULL;
875 /* Parse and initialize group specific information */
876 if (priv->mode == MQ_MG_MODE) {
877 for_each_available_child_of_node(np, child) {
878 if (of_node_cmp(child->name, "queue-group"))
881 err = gfar_parse_group(child, priv, model);
885 } else { /* SQ_SG_MODE */
886 err = gfar_parse_group(np, priv, model);
891 stash = of_find_property(np, "bd-stash", NULL);
894 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
895 priv->bd_stash_en = 1;
898 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
901 priv->rx_stash_size = stash_len;
903 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
906 priv->rx_stash_index = stash_idx;
908 if (stash_len || stash_idx)
909 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
911 mac_addr = of_get_mac_address(np);
914 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
916 if (model && !strcasecmp(model, "TSEC"))
917 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
918 FSL_GIANFAR_DEV_HAS_COALESCE |
919 FSL_GIANFAR_DEV_HAS_RMON |
920 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
922 if (model && !strcasecmp(model, "eTSEC"))
923 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
924 FSL_GIANFAR_DEV_HAS_COALESCE |
925 FSL_GIANFAR_DEV_HAS_RMON |
926 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
927 FSL_GIANFAR_DEV_HAS_CSUM |
928 FSL_GIANFAR_DEV_HAS_VLAN |
929 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
930 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
931 FSL_GIANFAR_DEV_HAS_TIMER;
933 err = of_property_read_string(np, "phy-connection-type", &ctype);
935 /* We only care about rgmii-id. The rest are autodetected */
936 if (err == 0 && !strcmp(ctype, "rgmii-id"))
937 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
939 priv->interface = PHY_INTERFACE_MODE_MII;
941 if (of_find_property(np, "fsl,magic-packet", NULL))
942 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
944 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
946 /* In the case of a fixed PHY, the DT node associated
947 * to the PHY is the Ethernet MAC DT node.
949 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
950 err = of_phy_register_fixed_link(np);
954 priv->phy_node = of_node_get(np);
957 /* Find the TBI PHY. If it's not there, we don't support SGMII */
958 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
963 unmap_group_regs(priv);
965 gfar_free_rx_queues(priv);
967 gfar_free_tx_queues(priv);
972 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
974 struct hwtstamp_config config;
975 struct gfar_private *priv = netdev_priv(netdev);
977 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
980 /* reserved for future extensions */
984 switch (config.tx_type) {
985 case HWTSTAMP_TX_OFF:
986 priv->hwts_tx_en = 0;
989 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
991 priv->hwts_tx_en = 1;
997 switch (config.rx_filter) {
998 case HWTSTAMP_FILTER_NONE:
999 if (priv->hwts_rx_en) {
1000 priv->hwts_rx_en = 0;
1005 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
1007 if (!priv->hwts_rx_en) {
1008 priv->hwts_rx_en = 1;
1011 config.rx_filter = HWTSTAMP_FILTER_ALL;
1015 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1019 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
1021 struct hwtstamp_config config;
1022 struct gfar_private *priv = netdev_priv(netdev);
1025 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1026 config.rx_filter = (priv->hwts_rx_en ?
1027 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
1029 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1033 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1035 struct gfar_private *priv = netdev_priv(dev);
1037 if (!netif_running(dev))
1040 if (cmd == SIOCSHWTSTAMP)
1041 return gfar_hwtstamp_set(dev, rq);
1042 if (cmd == SIOCGHWTSTAMP)
1043 return gfar_hwtstamp_get(dev, rq);
1048 return phy_mii_ioctl(priv->phydev, rq, cmd);
1051 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1054 u32 rqfpr = FPR_FILER_MASK;
1058 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1059 priv->ftp_rqfpr[rqfar] = rqfpr;
1060 priv->ftp_rqfcr[rqfar] = rqfcr;
1061 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1064 rqfcr = RQFCR_CMP_NOMATCH;
1065 priv->ftp_rqfpr[rqfar] = rqfpr;
1066 priv->ftp_rqfcr[rqfar] = rqfcr;
1067 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1070 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1072 priv->ftp_rqfcr[rqfar] = rqfcr;
1073 priv->ftp_rqfpr[rqfar] = rqfpr;
1074 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1077 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1079 priv->ftp_rqfcr[rqfar] = rqfcr;
1080 priv->ftp_rqfpr[rqfar] = rqfpr;
1081 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1086 static void gfar_init_filer_table(struct gfar_private *priv)
1089 u32 rqfar = MAX_FILER_IDX;
1091 u32 rqfpr = FPR_FILER_MASK;
1094 rqfcr = RQFCR_CMP_MATCH;
1095 priv->ftp_rqfcr[rqfar] = rqfcr;
1096 priv->ftp_rqfpr[rqfar] = rqfpr;
1097 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1099 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1100 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1101 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1102 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1103 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1104 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1106 /* cur_filer_idx indicated the first non-masked rule */
1107 priv->cur_filer_idx = rqfar;
1109 /* Rest are masked rules */
1110 rqfcr = RQFCR_CMP_NOMATCH;
1111 for (i = 0; i < rqfar; i++) {
1112 priv->ftp_rqfcr[i] = rqfcr;
1113 priv->ftp_rqfpr[i] = rqfpr;
1114 gfar_write_filer(priv, i, rqfcr, rqfpr);
1119 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1121 unsigned int pvr = mfspr(SPRN_PVR);
1122 unsigned int svr = mfspr(SPRN_SVR);
1123 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1124 unsigned int rev = svr & 0xffff;
1126 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1127 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1128 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1129 priv->errata |= GFAR_ERRATA_74;
1131 /* MPC8313 and MPC837x all rev */
1132 if ((pvr == 0x80850010 && mod == 0x80b0) ||
1133 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1134 priv->errata |= GFAR_ERRATA_76;
1136 /* MPC8313 Rev < 2.0 */
1137 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1138 priv->errata |= GFAR_ERRATA_12;
1141 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1143 unsigned int svr = mfspr(SPRN_SVR);
1145 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1146 priv->errata |= GFAR_ERRATA_12;
1147 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1148 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1149 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1153 static void gfar_detect_errata(struct gfar_private *priv)
1155 struct device *dev = &priv->ofdev->dev;
1157 /* no plans to fix */
1158 priv->errata |= GFAR_ERRATA_A002;
1161 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1162 __gfar_detect_errata_85xx(priv);
1163 else /* non-mpc85xx parts, i.e. e300 core based */
1164 __gfar_detect_errata_83xx(priv);
1168 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1172 void gfar_mac_reset(struct gfar_private *priv)
1174 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1177 /* Reset MAC layer */
1178 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
1180 /* We need to delay at least 3 TX clocks */
1183 /* the soft reset bit is not self-resetting, so we need to
1184 * clear it before resuming normal operation
1186 gfar_write(®s->maccfg1, 0);
1190 /* Compute rx_buff_size based on config flags */
1191 gfar_rx_buff_size_config(priv);
1193 /* Initialize the max receive frame/buffer lengths */
1194 gfar_write(®s->maxfrm, priv->rx_buffer_size);
1195 gfar_write(®s->mrblr, priv->rx_buffer_size);
1197 /* Initialize the Minimum Frame Length Register */
1198 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1200 /* Initialize MACCFG2. */
1201 tempval = MACCFG2_INIT_SETTINGS;
1203 /* If the mtu is larger than the max size for standard
1204 * ethernet frames (ie, a jumbo frame), then set maccfg2
1205 * to allow huge frames, and to check the length
1207 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1208 gfar_has_errata(priv, GFAR_ERRATA_74))
1209 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1211 gfar_write(®s->maccfg2, tempval);
1213 /* Clear mac addr hash registers */
1214 gfar_write(®s->igaddr0, 0);
1215 gfar_write(®s->igaddr1, 0);
1216 gfar_write(®s->igaddr2, 0);
1217 gfar_write(®s->igaddr3, 0);
1218 gfar_write(®s->igaddr4, 0);
1219 gfar_write(®s->igaddr5, 0);
1220 gfar_write(®s->igaddr6, 0);
1221 gfar_write(®s->igaddr7, 0);
1223 gfar_write(®s->gaddr0, 0);
1224 gfar_write(®s->gaddr1, 0);
1225 gfar_write(®s->gaddr2, 0);
1226 gfar_write(®s->gaddr3, 0);
1227 gfar_write(®s->gaddr4, 0);
1228 gfar_write(®s->gaddr5, 0);
1229 gfar_write(®s->gaddr6, 0);
1230 gfar_write(®s->gaddr7, 0);
1232 if (priv->extended_hash)
1233 gfar_clear_exact_match(priv->ndev);
1235 gfar_mac_rx_config(priv);
1237 gfar_mac_tx_config(priv);
1239 gfar_set_mac_address(priv->ndev);
1241 gfar_set_multi(priv->ndev);
1243 /* clear ievent and imask before configuring coalescing */
1244 gfar_ints_disable(priv);
1246 /* Configure the coalescing support */
1247 gfar_configure_coalescing_all(priv);
1250 static void gfar_hw_init(struct gfar_private *priv)
1252 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1255 /* Stop the DMA engine now, in case it was running before
1256 * (The firmware could have used it, and left it running).
1260 gfar_mac_reset(priv);
1262 /* Zero out the rmon mib registers if it has them */
1263 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1264 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1266 /* Mask off the CAM interrupts */
1267 gfar_write(®s->rmon.cam1, 0xffffffff);
1268 gfar_write(®s->rmon.cam2, 0xffffffff);
1271 /* Initialize ECNTRL */
1272 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
1274 /* Set the extraction length and index */
1275 attrs = ATTRELI_EL(priv->rx_stash_size) |
1276 ATTRELI_EI(priv->rx_stash_index);
1278 gfar_write(®s->attreli, attrs);
1280 /* Start with defaults, and add stashing
1281 * depending on driver parameters
1283 attrs = ATTR_INIT_SETTINGS;
1285 if (priv->bd_stash_en)
1286 attrs |= ATTR_BDSTASH;
1288 if (priv->rx_stash_size != 0)
1289 attrs |= ATTR_BUFSTASH;
1291 gfar_write(®s->attr, attrs);
1294 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1295 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1296 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1298 /* Program the interrupt steering regs, only for MG devices */
1299 if (priv->num_grps > 1)
1300 gfar_write_isrg(priv);
1303 static void gfar_init_addr_hash_table(struct gfar_private *priv)
1305 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1307 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1308 priv->extended_hash = 1;
1309 priv->hash_width = 9;
1311 priv->hash_regs[0] = ®s->igaddr0;
1312 priv->hash_regs[1] = ®s->igaddr1;
1313 priv->hash_regs[2] = ®s->igaddr2;
1314 priv->hash_regs[3] = ®s->igaddr3;
1315 priv->hash_regs[4] = ®s->igaddr4;
1316 priv->hash_regs[5] = ®s->igaddr5;
1317 priv->hash_regs[6] = ®s->igaddr6;
1318 priv->hash_regs[7] = ®s->igaddr7;
1319 priv->hash_regs[8] = ®s->gaddr0;
1320 priv->hash_regs[9] = ®s->gaddr1;
1321 priv->hash_regs[10] = ®s->gaddr2;
1322 priv->hash_regs[11] = ®s->gaddr3;
1323 priv->hash_regs[12] = ®s->gaddr4;
1324 priv->hash_regs[13] = ®s->gaddr5;
1325 priv->hash_regs[14] = ®s->gaddr6;
1326 priv->hash_regs[15] = ®s->gaddr7;
1329 priv->extended_hash = 0;
1330 priv->hash_width = 8;
1332 priv->hash_regs[0] = ®s->gaddr0;
1333 priv->hash_regs[1] = ®s->gaddr1;
1334 priv->hash_regs[2] = ®s->gaddr2;
1335 priv->hash_regs[3] = ®s->gaddr3;
1336 priv->hash_regs[4] = ®s->gaddr4;
1337 priv->hash_regs[5] = ®s->gaddr5;
1338 priv->hash_regs[6] = ®s->gaddr6;
1339 priv->hash_regs[7] = ®s->gaddr7;
1343 /* Set up the ethernet device structure, private data,
1344 * and anything else we need before we start
1346 static int gfar_probe(struct platform_device *ofdev)
1348 struct net_device *dev = NULL;
1349 struct gfar_private *priv = NULL;
1352 err = gfar_of_init(ofdev, &dev);
1357 priv = netdev_priv(dev);
1359 priv->ofdev = ofdev;
1360 priv->dev = &ofdev->dev;
1361 SET_NETDEV_DEV(dev, &ofdev->dev);
1363 spin_lock_init(&priv->bflock);
1364 INIT_WORK(&priv->reset_task, gfar_reset_task);
1366 platform_set_drvdata(ofdev, priv);
1368 gfar_detect_errata(priv);
1370 /* Set the dev->base_addr to the gfar reg region */
1371 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1373 /* Fill in the dev structure */
1374 dev->watchdog_timeo = TX_TIMEOUT;
1376 dev->netdev_ops = &gfar_netdev_ops;
1377 dev->ethtool_ops = &gfar_ethtool_ops;
1379 /* Register for napi ...We are registering NAPI for each grp */
1380 for (i = 0; i < priv->num_grps; i++) {
1381 if (priv->poll_mode == GFAR_SQ_POLLING) {
1382 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1383 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1384 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1385 gfar_poll_tx_sq, 2);
1387 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1388 gfar_poll_rx, GFAR_DEV_WEIGHT);
1389 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1394 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1395 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1397 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1398 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1401 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1402 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1403 NETIF_F_HW_VLAN_CTAG_RX;
1404 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1407 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1409 gfar_init_addr_hash_table(priv);
1411 /* Insert receive time stamps into padding alignment bytes */
1412 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1415 if (dev->features & NETIF_F_IP_CSUM ||
1416 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1417 dev->needed_headroom = GMAC_FCB_LEN;
1419 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1421 /* Initializing some of the rx/tx queue level parameters */
1422 for (i = 0; i < priv->num_tx_queues; i++) {
1423 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1424 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1425 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1426 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1429 for (i = 0; i < priv->num_rx_queues; i++) {
1430 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1431 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1432 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1435 /* always enable rx filer */
1436 priv->rx_filer_enable = 1;
1437 /* Enable most messages by default */
1438 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1439 /* use pritority h/w tx queue scheduling for single queue devices */
1440 if (priv->num_tx_queues == 1)
1441 priv->prio_sched_en = 1;
1443 set_bit(GFAR_DOWN, &priv->state);
1447 /* Carrier starts down, phylib will bring it up */
1448 netif_carrier_off(dev);
1450 err = register_netdev(dev);
1453 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1457 device_init_wakeup(&dev->dev,
1458 priv->device_flags &
1459 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1461 /* fill out IRQ number and name fields */
1462 for (i = 0; i < priv->num_grps; i++) {
1463 struct gfar_priv_grp *grp = &priv->gfargrp[i];
1464 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1465 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1466 dev->name, "_g", '0' + i, "_tx");
1467 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1468 dev->name, "_g", '0' + i, "_rx");
1469 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1470 dev->name, "_g", '0' + i, "_er");
1472 strcpy(gfar_irq(grp, TX)->name, dev->name);
1475 /* Initialize the filer table */
1476 gfar_init_filer_table(priv);
1478 /* Print out the device info */
1479 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1481 /* Even more device info helps when determining which kernel
1482 * provided which set of benchmarks.
1484 netdev_info(dev, "Running with NAPI enabled\n");
1485 for (i = 0; i < priv->num_rx_queues; i++)
1486 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1487 i, priv->rx_queue[i]->rx_ring_size);
1488 for (i = 0; i < priv->num_tx_queues; i++)
1489 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1490 i, priv->tx_queue[i]->tx_ring_size);
1495 unmap_group_regs(priv);
1496 gfar_free_rx_queues(priv);
1497 gfar_free_tx_queues(priv);
1498 of_node_put(priv->phy_node);
1499 of_node_put(priv->tbi_node);
1500 free_gfar_dev(priv);
1504 static int gfar_remove(struct platform_device *ofdev)
1506 struct gfar_private *priv = platform_get_drvdata(ofdev);
1508 of_node_put(priv->phy_node);
1509 of_node_put(priv->tbi_node);
1511 unregister_netdev(priv->ndev);
1512 unmap_group_regs(priv);
1513 gfar_free_rx_queues(priv);
1514 gfar_free_tx_queues(priv);
1515 free_gfar_dev(priv);
1522 static int gfar_suspend(struct device *dev)
1524 struct gfar_private *priv = dev_get_drvdata(dev);
1525 struct net_device *ndev = priv->ndev;
1526 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1528 int magic_packet = priv->wol_en &&
1529 (priv->device_flags &
1530 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1532 if (!netif_running(ndev))
1536 netif_tx_lock(ndev);
1537 netif_device_detach(ndev);
1538 netif_tx_unlock(ndev);
1543 /* Enable interrupt on Magic Packet */
1544 gfar_write(®s->imask, IMASK_MAG);
1546 /* Enable Magic Packet mode */
1547 tempval = gfar_read(®s->maccfg2);
1548 tempval |= MACCFG2_MPEN;
1549 gfar_write(®s->maccfg2, tempval);
1551 /* re-enable the Rx block */
1552 tempval = gfar_read(®s->maccfg1);
1553 tempval |= MACCFG1_RX_EN;
1554 gfar_write(®s->maccfg1, tempval);
1557 phy_stop(priv->phydev);
1563 static int gfar_resume(struct device *dev)
1565 struct gfar_private *priv = dev_get_drvdata(dev);
1566 struct net_device *ndev = priv->ndev;
1567 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1569 int magic_packet = priv->wol_en &&
1570 (priv->device_flags &
1571 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1573 if (!netif_running(ndev))
1577 /* Disable Magic Packet mode */
1578 tempval = gfar_read(®s->maccfg2);
1579 tempval &= ~MACCFG2_MPEN;
1580 gfar_write(®s->maccfg2, tempval);
1582 phy_start(priv->phydev);
1587 netif_device_attach(ndev);
1593 static int gfar_restore(struct device *dev)
1595 struct gfar_private *priv = dev_get_drvdata(dev);
1596 struct net_device *ndev = priv->ndev;
1598 if (!netif_running(ndev)) {
1599 netif_device_attach(ndev);
1604 if (gfar_init_bds(ndev)) {
1605 free_skb_resources(priv);
1609 gfar_mac_reset(priv);
1611 gfar_init_tx_rx_base(priv);
1617 priv->oldduplex = -1;
1620 phy_start(priv->phydev);
1622 netif_device_attach(ndev);
1628 static struct dev_pm_ops gfar_pm_ops = {
1629 .suspend = gfar_suspend,
1630 .resume = gfar_resume,
1631 .freeze = gfar_suspend,
1632 .thaw = gfar_resume,
1633 .restore = gfar_restore,
1636 #define GFAR_PM_OPS (&gfar_pm_ops)
1640 #define GFAR_PM_OPS NULL
1644 /* Reads the controller's registers to determine what interface
1645 * connects it to the PHY.
1647 static phy_interface_t gfar_get_interface(struct net_device *dev)
1649 struct gfar_private *priv = netdev_priv(dev);
1650 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1653 ecntrl = gfar_read(®s->ecntrl);
1655 if (ecntrl & ECNTRL_SGMII_MODE)
1656 return PHY_INTERFACE_MODE_SGMII;
1658 if (ecntrl & ECNTRL_TBI_MODE) {
1659 if (ecntrl & ECNTRL_REDUCED_MODE)
1660 return PHY_INTERFACE_MODE_RTBI;
1662 return PHY_INTERFACE_MODE_TBI;
1665 if (ecntrl & ECNTRL_REDUCED_MODE) {
1666 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1667 return PHY_INTERFACE_MODE_RMII;
1670 phy_interface_t interface = priv->interface;
1672 /* This isn't autodetected right now, so it must
1673 * be set by the device tree or platform code.
1675 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1676 return PHY_INTERFACE_MODE_RGMII_ID;
1678 return PHY_INTERFACE_MODE_RGMII;
1682 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1683 return PHY_INTERFACE_MODE_GMII;
1685 return PHY_INTERFACE_MODE_MII;
1689 /* Initializes driver's PHY state, and attaches to the PHY.
1690 * Returns 0 on success.
1692 static int init_phy(struct net_device *dev)
1694 struct gfar_private *priv = netdev_priv(dev);
1695 uint gigabit_support =
1696 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1697 GFAR_SUPPORTED_GBIT : 0;
1698 phy_interface_t interface;
1702 priv->oldduplex = -1;
1704 interface = gfar_get_interface(dev);
1706 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1708 if (!priv->phydev) {
1709 dev_err(&dev->dev, "could not attach to PHY\n");
1713 if (interface == PHY_INTERFACE_MODE_SGMII)
1714 gfar_configure_serdes(dev);
1716 /* Remove any features not supported by the controller */
1717 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1718 priv->phydev->advertising = priv->phydev->supported;
1720 /* Add support for flow control, but don't advertise it by default */
1721 priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1726 /* Initialize TBI PHY interface for communicating with the
1727 * SERDES lynx PHY on the chip. We communicate with this PHY
1728 * through the MDIO bus on each controller, treating it as a
1729 * "normal" PHY at the address found in the TBIPA register. We assume
1730 * that the TBIPA register is valid. Either the MDIO bus code will set
1731 * it to a value that doesn't conflict with other PHYs on the bus, or the
1732 * value doesn't matter, as there are no other PHYs on the bus.
1734 static void gfar_configure_serdes(struct net_device *dev)
1736 struct gfar_private *priv = netdev_priv(dev);
1737 struct phy_device *tbiphy;
1739 if (!priv->tbi_node) {
1740 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1741 "device tree specify a tbi-handle\n");
1745 tbiphy = of_phy_find_device(priv->tbi_node);
1747 dev_err(&dev->dev, "error: Could not get TBI device\n");
1751 /* If the link is already up, we must already be ok, and don't need to
1752 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1753 * everything for us? Resetting it takes the link down and requires
1754 * several seconds for it to come back.
1756 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1759 /* Single clk mode, mii mode off(for serdes communication) */
1760 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1762 phy_write(tbiphy, MII_ADVERTISE,
1763 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1764 ADVERTISE_1000XPSE_ASYM);
1766 phy_write(tbiphy, MII_BMCR,
1767 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1771 static int __gfar_is_rx_idle(struct gfar_private *priv)
1775 /* Normaly TSEC should not hang on GRS commands, so we should
1776 * actually wait for IEVENT_GRSC flag.
1778 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1781 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1782 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1783 * and the Rx can be safely reset.
1785 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1787 if ((res & 0xffff) == (res >> 16))
1793 /* Halt the receive and transmit queues */
1794 static void gfar_halt_nodisable(struct gfar_private *priv)
1796 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1798 unsigned int timeout;
1801 gfar_ints_disable(priv);
1803 if (gfar_is_dma_stopped(priv))
1806 /* Stop the DMA, and wait for it to stop */
1807 tempval = gfar_read(®s->dmactrl);
1808 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1809 gfar_write(®s->dmactrl, tempval);
1813 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1819 stopped = gfar_is_dma_stopped(priv);
1821 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1822 !__gfar_is_rx_idle(priv))
1826 /* Halt the receive and transmit queues */
1827 void gfar_halt(struct gfar_private *priv)
1829 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1832 /* Dissable the Rx/Tx hw queues */
1833 gfar_write(®s->rqueue, 0);
1834 gfar_write(®s->tqueue, 0);
1838 gfar_halt_nodisable(priv);
1840 /* Disable Rx/Tx DMA */
1841 tempval = gfar_read(®s->maccfg1);
1842 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1843 gfar_write(®s->maccfg1, tempval);
1846 void stop_gfar(struct net_device *dev)
1848 struct gfar_private *priv = netdev_priv(dev);
1850 netif_tx_stop_all_queues(dev);
1852 smp_mb__before_atomic();
1853 set_bit(GFAR_DOWN, &priv->state);
1854 smp_mb__after_atomic();
1858 /* disable ints and gracefully shut down Rx/Tx DMA */
1861 phy_stop(priv->phydev);
1863 free_skb_resources(priv);
1866 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1868 struct txbd8 *txbdp;
1869 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1872 txbdp = tx_queue->tx_bd_base;
1874 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1875 if (!tx_queue->tx_skbuff[i])
1878 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1879 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1881 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1884 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1885 be16_to_cpu(txbdp->length),
1889 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1890 tx_queue->tx_skbuff[i] = NULL;
1892 kfree(tx_queue->tx_skbuff);
1893 tx_queue->tx_skbuff = NULL;
1896 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1898 struct rxbd8 *rxbdp;
1899 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1902 rxbdp = rx_queue->rx_bd_base;
1904 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1905 if (rx_queue->rx_skbuff[i]) {
1906 dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
1907 priv->rx_buffer_size,
1909 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1910 rx_queue->rx_skbuff[i] = NULL;
1916 kfree(rx_queue->rx_skbuff);
1917 rx_queue->rx_skbuff = NULL;
1920 /* If there are any tx skbs or rx skbs still around, free them.
1921 * Then free tx_skbuff and rx_skbuff
1923 static void free_skb_resources(struct gfar_private *priv)
1925 struct gfar_priv_tx_q *tx_queue = NULL;
1926 struct gfar_priv_rx_q *rx_queue = NULL;
1929 /* Go through all the buffer descriptors and free their data buffers */
1930 for (i = 0; i < priv->num_tx_queues; i++) {
1931 struct netdev_queue *txq;
1933 tx_queue = priv->tx_queue[i];
1934 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1935 if (tx_queue->tx_skbuff)
1936 free_skb_tx_queue(tx_queue);
1937 netdev_tx_reset_queue(txq);
1940 for (i = 0; i < priv->num_rx_queues; i++) {
1941 rx_queue = priv->rx_queue[i];
1942 if (rx_queue->rx_skbuff)
1943 free_skb_rx_queue(rx_queue);
1946 dma_free_coherent(priv->dev,
1947 sizeof(struct txbd8) * priv->total_tx_ring_size +
1948 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1949 priv->tx_queue[0]->tx_bd_base,
1950 priv->tx_queue[0]->tx_bd_dma_base);
1953 void gfar_start(struct gfar_private *priv)
1955 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1959 /* Enable Rx/Tx hw queues */
1960 gfar_write(®s->rqueue, priv->rqueue);
1961 gfar_write(®s->tqueue, priv->tqueue);
1963 /* Initialize DMACTRL to have WWR and WOP */
1964 tempval = gfar_read(®s->dmactrl);
1965 tempval |= DMACTRL_INIT_SETTINGS;
1966 gfar_write(®s->dmactrl, tempval);
1968 /* Make sure we aren't stopped */
1969 tempval = gfar_read(®s->dmactrl);
1970 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1971 gfar_write(®s->dmactrl, tempval);
1973 for (i = 0; i < priv->num_grps; i++) {
1974 regs = priv->gfargrp[i].regs;
1975 /* Clear THLT/RHLT, so that the DMA starts polling now */
1976 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1977 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1980 /* Enable Rx/Tx DMA */
1981 tempval = gfar_read(®s->maccfg1);
1982 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1983 gfar_write(®s->maccfg1, tempval);
1985 gfar_ints_enable(priv);
1987 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1990 static void free_grp_irqs(struct gfar_priv_grp *grp)
1992 free_irq(gfar_irq(grp, TX)->irq, grp);
1993 free_irq(gfar_irq(grp, RX)->irq, grp);
1994 free_irq(gfar_irq(grp, ER)->irq, grp);
1997 static int register_grp_irqs(struct gfar_priv_grp *grp)
1999 struct gfar_private *priv = grp->priv;
2000 struct net_device *dev = priv->ndev;
2003 /* If the device has multiple interrupts, register for
2004 * them. Otherwise, only register for the one
2006 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2007 /* Install our interrupt handlers for Error,
2008 * Transmit, and Receive
2010 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2012 gfar_irq(grp, ER)->name, grp);
2014 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2015 gfar_irq(grp, ER)->irq);
2019 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2020 gfar_irq(grp, TX)->name, grp);
2022 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2023 gfar_irq(grp, TX)->irq);
2026 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2027 gfar_irq(grp, RX)->name, grp);
2029 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2030 gfar_irq(grp, RX)->irq);
2034 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2036 gfar_irq(grp, TX)->name, grp);
2038 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2039 gfar_irq(grp, TX)->irq);
2047 free_irq(gfar_irq(grp, TX)->irq, grp);
2049 free_irq(gfar_irq(grp, ER)->irq, grp);
2055 static void gfar_free_irq(struct gfar_private *priv)
2060 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2061 for (i = 0; i < priv->num_grps; i++)
2062 free_grp_irqs(&priv->gfargrp[i]);
2064 for (i = 0; i < priv->num_grps; i++)
2065 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2070 static int gfar_request_irq(struct gfar_private *priv)
2074 for (i = 0; i < priv->num_grps; i++) {
2075 err = register_grp_irqs(&priv->gfargrp[i]);
2077 for (j = 0; j < i; j++)
2078 free_grp_irqs(&priv->gfargrp[j]);
2086 /* Bring the controller up and running */
2087 int startup_gfar(struct net_device *ndev)
2089 struct gfar_private *priv = netdev_priv(ndev);
2092 gfar_mac_reset(priv);
2094 err = gfar_alloc_skb_resources(ndev);
2098 gfar_init_tx_rx_base(priv);
2100 smp_mb__before_atomic();
2101 clear_bit(GFAR_DOWN, &priv->state);
2102 smp_mb__after_atomic();
2104 /* Start Rx/Tx DMA and enable the interrupts */
2107 phy_start(priv->phydev);
2111 netif_tx_wake_all_queues(ndev);
2116 /* Called when something needs to use the ethernet device
2117 * Returns 0 for success.
2119 static int gfar_enet_open(struct net_device *dev)
2121 struct gfar_private *priv = netdev_priv(dev);
2124 err = init_phy(dev);
2128 err = gfar_request_irq(priv);
2132 err = startup_gfar(dev);
2136 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2141 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2143 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2145 memset(fcb, 0, GMAC_FCB_LEN);
2150 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2153 /* If we're here, it's a IP packet with a TCP or UDP
2154 * payload. We set it to checksum, using a pseudo-header
2157 u8 flags = TXFCB_DEFAULT;
2159 /* Tell the controller what the protocol is
2160 * And provide the already calculated phcs
2162 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2164 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
2166 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
2168 /* l3os is the distance between the start of the
2169 * frame (skb->data) and the start of the IP hdr.
2170 * l4os is the distance between the start of the
2171 * l3 hdr and the l4 hdr
2173 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
2174 fcb->l4os = skb_network_header_len(skb);
2179 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2181 fcb->flags |= TXFCB_VLN;
2182 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
2185 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2186 struct txbd8 *base, int ring_size)
2188 struct txbd8 *new_bd = bdp + stride;
2190 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2193 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2196 return skip_txbd(bdp, 1, base, ring_size);
2199 /* eTSEC12: csum generation not supported for some fcb offsets */
2200 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2201 unsigned long fcb_addr)
2203 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2204 (fcb_addr % 0x20) > 0x18);
2207 /* eTSEC76: csum generation for frames larger than 2500 may
2208 * cause excess delays before start of transmission
2210 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2213 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2217 /* This is called by the kernel when a frame is ready for transmission.
2218 * It is pointed to by the dev->hard_start_xmit function pointer
2220 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2222 struct gfar_private *priv = netdev_priv(dev);
2223 struct gfar_priv_tx_q *tx_queue = NULL;
2224 struct netdev_queue *txq;
2225 struct gfar __iomem *regs = NULL;
2226 struct txfcb *fcb = NULL;
2227 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2230 int do_tstamp, do_csum, do_vlan;
2232 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2234 rq = skb->queue_mapping;
2235 tx_queue = priv->tx_queue[rq];
2236 txq = netdev_get_tx_queue(dev, rq);
2237 base = tx_queue->tx_bd_base;
2238 regs = tx_queue->grp->regs;
2240 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2241 do_vlan = skb_vlan_tag_present(skb);
2242 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2245 if (do_csum || do_vlan)
2246 fcb_len = GMAC_FCB_LEN;
2248 /* check if time stamp should be generated */
2249 if (unlikely(do_tstamp))
2250 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2252 /* make space for additional header when fcb is needed */
2253 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2254 struct sk_buff *skb_new;
2256 skb_new = skb_realloc_headroom(skb, fcb_len);
2258 dev->stats.tx_errors++;
2259 dev_kfree_skb_any(skb);
2260 return NETDEV_TX_OK;
2264 skb_set_owner_w(skb_new, skb->sk);
2265 dev_consume_skb_any(skb);
2269 /* total number of fragments in the SKB */
2270 nr_frags = skb_shinfo(skb)->nr_frags;
2272 /* calculate the required number of TxBDs for this skb */
2273 if (unlikely(do_tstamp))
2274 nr_txbds = nr_frags + 2;
2276 nr_txbds = nr_frags + 1;
2278 /* check if there is space to queue this packet */
2279 if (nr_txbds > tx_queue->num_txbdfree) {
2280 /* no space, stop the queue */
2281 netif_tx_stop_queue(txq);
2282 dev->stats.tx_fifo_errors++;
2283 return NETDEV_TX_BUSY;
2286 /* Update transmit stats */
2287 bytes_sent = skb->len;
2288 tx_queue->stats.tx_bytes += bytes_sent;
2289 /* keep Tx bytes on wire for BQL accounting */
2290 GFAR_CB(skb)->bytes_sent = bytes_sent;
2291 tx_queue->stats.tx_packets++;
2293 txbdp = txbdp_start = tx_queue->cur_tx;
2294 lstatus = be32_to_cpu(txbdp->lstatus);
2296 /* Time stamp insertion requires one additional TxBD */
2297 if (unlikely(do_tstamp))
2298 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2299 tx_queue->tx_ring_size);
2301 if (nr_frags == 0) {
2302 if (unlikely(do_tstamp)) {
2303 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2305 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2306 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2308 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2311 /* Place the fragment addresses and lengths into the TxBDs */
2312 for (i = 0; i < nr_frags; i++) {
2313 unsigned int frag_len;
2314 /* Point at the next BD, wrapping as needed */
2315 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2317 frag_len = skb_shinfo(skb)->frags[i].size;
2319 lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
2320 BD_LFLAG(TXBD_READY);
2322 /* Handle the last BD specially */
2323 if (i == nr_frags - 1)
2324 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2326 bufaddr = skb_frag_dma_map(priv->dev,
2327 &skb_shinfo(skb)->frags[i],
2331 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2334 /* set the TxBD length and buffer pointer */
2335 txbdp->bufPtr = cpu_to_be32(bufaddr);
2336 txbdp->lstatus = cpu_to_be32(lstatus);
2339 lstatus = be32_to_cpu(txbdp_start->lstatus);
2342 /* Add TxPAL between FCB and frame if required */
2343 if (unlikely(do_tstamp)) {
2344 skb_push(skb, GMAC_TXPAL_LEN);
2345 memset(skb->data, 0, GMAC_TXPAL_LEN);
2348 /* Add TxFCB if required */
2350 fcb = gfar_add_fcb(skb);
2351 lstatus |= BD_LFLAG(TXBD_TOE);
2354 /* Set up checksumming */
2356 gfar_tx_checksum(skb, fcb, fcb_len);
2358 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2359 unlikely(gfar_csum_errata_76(priv, skb->len))) {
2360 __skb_pull(skb, GMAC_FCB_LEN);
2361 skb_checksum_help(skb);
2362 if (do_vlan || do_tstamp) {
2363 /* put back a new fcb for vlan/tstamp TOE */
2364 fcb = gfar_add_fcb(skb);
2366 /* Tx TOE not used */
2367 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2374 gfar_tx_vlan(skb, fcb);
2376 /* Setup tx hardware time stamping if requested */
2377 if (unlikely(do_tstamp)) {
2378 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2382 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2384 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2387 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
2389 /* If time stamping is requested one additional TxBD must be set up. The
2390 * first TxBD points to the FCB and must have a data length of
2391 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2392 * the full frame length.
2394 if (unlikely(do_tstamp)) {
2395 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2397 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2399 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2400 (skb_headlen(skb) - fcb_len);
2402 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2403 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2404 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2406 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2409 netdev_tx_sent_queue(txq, bytes_sent);
2413 txbdp_start->lstatus = cpu_to_be32(lstatus);
2415 gfar_wmb(); /* force lstatus write before tx_skbuff */
2417 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2419 /* Update the current skb pointer to the next entry we will use
2420 * (wrapping if necessary)
2422 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2423 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2425 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2427 /* We can work in parallel with gfar_clean_tx_ring(), except
2428 * when modifying num_txbdfree. Note that we didn't grab the lock
2429 * when we were reading the num_txbdfree and checking for available
2430 * space, that's because outside of this function it can only grow.
2432 spin_lock_bh(&tx_queue->txlock);
2433 /* reduce TxBD free count */
2434 tx_queue->num_txbdfree -= (nr_txbds);
2435 spin_unlock_bh(&tx_queue->txlock);
2437 /* If the next BD still needs to be cleaned up, then the bds
2438 * are full. We need to tell the kernel to stop sending us stuff.
2440 if (!tx_queue->num_txbdfree) {
2441 netif_tx_stop_queue(txq);
2443 dev->stats.tx_fifo_errors++;
2446 /* Tell the DMA to go go go */
2447 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2449 return NETDEV_TX_OK;
2452 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2454 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2455 for (i = 0; i < nr_frags; i++) {
2456 lstatus = be32_to_cpu(txbdp->lstatus);
2457 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2460 lstatus &= ~BD_LFLAG(TXBD_READY);
2461 txbdp->lstatus = cpu_to_be32(lstatus);
2462 bufaddr = be32_to_cpu(txbdp->bufPtr);
2463 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2465 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2468 dev_kfree_skb_any(skb);
2469 return NETDEV_TX_OK;
2472 /* Stops the kernel queue, and halts the controller */
2473 static int gfar_close(struct net_device *dev)
2475 struct gfar_private *priv = netdev_priv(dev);
2477 cancel_work_sync(&priv->reset_task);
2480 /* Disconnect from the PHY */
2481 phy_disconnect(priv->phydev);
2482 priv->phydev = NULL;
2484 gfar_free_irq(priv);
2489 /* Changes the mac address if the controller is not running. */
2490 static int gfar_set_mac_address(struct net_device *dev)
2492 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2497 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2499 struct gfar_private *priv = netdev_priv(dev);
2500 int frame_size = new_mtu + ETH_HLEN;
2502 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2503 netif_err(priv, drv, dev, "Invalid MTU setting\n");
2507 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2510 if (dev->flags & IFF_UP)
2515 if (dev->flags & IFF_UP)
2518 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2523 void reset_gfar(struct net_device *ndev)
2525 struct gfar_private *priv = netdev_priv(ndev);
2527 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2533 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2536 /* gfar_reset_task gets scheduled when a packet has not been
2537 * transmitted after a set amount of time.
2538 * For now, assume that clearing out all the structures, and
2539 * starting over will fix the problem.
2541 static void gfar_reset_task(struct work_struct *work)
2543 struct gfar_private *priv = container_of(work, struct gfar_private,
2545 reset_gfar(priv->ndev);
2548 static void gfar_timeout(struct net_device *dev)
2550 struct gfar_private *priv = netdev_priv(dev);
2552 dev->stats.tx_errors++;
2553 schedule_work(&priv->reset_task);
2556 static void gfar_align_skb(struct sk_buff *skb)
2558 /* We need the data buffer to be aligned properly. We will reserve
2559 * as many bytes as needed to align the data properly
2561 skb_reserve(skb, RXBUF_ALIGNMENT -
2562 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2565 /* Interrupt Handler for Transmit complete */
2566 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2568 struct net_device *dev = tx_queue->dev;
2569 struct netdev_queue *txq;
2570 struct gfar_private *priv = netdev_priv(dev);
2571 struct txbd8 *bdp, *next = NULL;
2572 struct txbd8 *lbdp = NULL;
2573 struct txbd8 *base = tx_queue->tx_bd_base;
2574 struct sk_buff *skb;
2576 int tx_ring_size = tx_queue->tx_ring_size;
2577 int frags = 0, nr_txbds = 0;
2580 int tqi = tx_queue->qindex;
2581 unsigned int bytes_sent = 0;
2585 txq = netdev_get_tx_queue(dev, tqi);
2586 bdp = tx_queue->dirty_tx;
2587 skb_dirtytx = tx_queue->skb_dirtytx;
2589 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2591 frags = skb_shinfo(skb)->nr_frags;
2593 /* When time stamping, one additional TxBD must be freed.
2594 * Also, we need to dma_unmap_single() the TxPAL.
2596 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2597 nr_txbds = frags + 2;
2599 nr_txbds = frags + 1;
2601 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2603 lstatus = be32_to_cpu(lbdp->lstatus);
2605 /* Only clean completed frames */
2606 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2607 (lstatus & BD_LENGTH_MASK))
2610 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2611 next = next_txbd(bdp, base, tx_ring_size);
2612 buflen = be16_to_cpu(next->length) +
2613 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2615 buflen = be16_to_cpu(bdp->length);
2617 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2618 buflen, DMA_TO_DEVICE);
2620 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2621 struct skb_shared_hwtstamps shhwtstamps;
2622 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2624 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2625 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2626 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2627 skb_tstamp_tx(skb, &shhwtstamps);
2628 gfar_clear_txbd_status(bdp);
2632 gfar_clear_txbd_status(bdp);
2633 bdp = next_txbd(bdp, base, tx_ring_size);
2635 for (i = 0; i < frags; i++) {
2636 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2637 be16_to_cpu(bdp->length),
2639 gfar_clear_txbd_status(bdp);
2640 bdp = next_txbd(bdp, base, tx_ring_size);
2643 bytes_sent += GFAR_CB(skb)->bytes_sent;
2645 dev_kfree_skb_any(skb);
2647 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2649 skb_dirtytx = (skb_dirtytx + 1) &
2650 TX_RING_MOD_MASK(tx_ring_size);
2653 spin_lock(&tx_queue->txlock);
2654 tx_queue->num_txbdfree += nr_txbds;
2655 spin_unlock(&tx_queue->txlock);
2658 /* If we freed a buffer, we can restart transmission, if necessary */
2659 if (tx_queue->num_txbdfree &&
2660 netif_tx_queue_stopped(txq) &&
2661 !(test_bit(GFAR_DOWN, &priv->state)))
2662 netif_wake_subqueue(priv->ndev, tqi);
2664 /* Update dirty indicators */
2665 tx_queue->skb_dirtytx = skb_dirtytx;
2666 tx_queue->dirty_tx = bdp;
2668 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2671 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2673 struct gfar_private *priv = netdev_priv(dev);
2674 struct sk_buff *skb;
2676 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2680 gfar_align_skb(skb);
2685 static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
2687 struct gfar_private *priv = netdev_priv(dev);
2688 struct sk_buff *skb;
2691 skb = gfar_alloc_skb(dev);
2695 addr = dma_map_single(priv->dev, skb->data,
2696 priv->rx_buffer_size, DMA_FROM_DEVICE);
2697 if (unlikely(dma_mapping_error(priv->dev, addr))) {
2698 dev_kfree_skb_any(skb);
2706 static inline void count_errors(unsigned short status, struct net_device *dev)
2708 struct gfar_private *priv = netdev_priv(dev);
2709 struct net_device_stats *stats = &dev->stats;
2710 struct gfar_extra_stats *estats = &priv->extra_stats;
2712 /* If the packet was truncated, none of the other errors matter */
2713 if (status & RXBD_TRUNCATED) {
2714 stats->rx_length_errors++;
2716 atomic64_inc(&estats->rx_trunc);
2720 /* Count the errors, if there were any */
2721 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2722 stats->rx_length_errors++;
2724 if (status & RXBD_LARGE)
2725 atomic64_inc(&estats->rx_large);
2727 atomic64_inc(&estats->rx_short);
2729 if (status & RXBD_NONOCTET) {
2730 stats->rx_frame_errors++;
2731 atomic64_inc(&estats->rx_nonoctet);
2733 if (status & RXBD_CRCERR) {
2734 atomic64_inc(&estats->rx_crcerr);
2735 stats->rx_crc_errors++;
2737 if (status & RXBD_OVERRUN) {
2738 atomic64_inc(&estats->rx_overrun);
2739 stats->rx_crc_errors++;
2743 irqreturn_t gfar_receive(int irq, void *grp_id)
2745 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2746 unsigned long flags;
2749 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2750 spin_lock_irqsave(&grp->grplock, flags);
2751 imask = gfar_read(&grp->regs->imask);
2752 imask &= IMASK_RX_DISABLED;
2753 gfar_write(&grp->regs->imask, imask);
2754 spin_unlock_irqrestore(&grp->grplock, flags);
2755 __napi_schedule(&grp->napi_rx);
2757 /* Clear IEVENT, so interrupts aren't called again
2758 * because of the packets that have already arrived.
2760 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2766 /* Interrupt Handler for Transmit complete */
2767 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2769 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2770 unsigned long flags;
2773 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2774 spin_lock_irqsave(&grp->grplock, flags);
2775 imask = gfar_read(&grp->regs->imask);
2776 imask &= IMASK_TX_DISABLED;
2777 gfar_write(&grp->regs->imask, imask);
2778 spin_unlock_irqrestore(&grp->grplock, flags);
2779 __napi_schedule(&grp->napi_tx);
2781 /* Clear IEVENT, so interrupts aren't called again
2782 * because of the packets that have already arrived.
2784 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2790 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2792 /* If valid headers were found, and valid sums
2793 * were verified, then we tell the kernel that no
2794 * checksumming is necessary. Otherwise, it is [FIXME]
2796 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2797 (RXFCB_CIP | RXFCB_CTU))
2798 skb->ip_summed = CHECKSUM_UNNECESSARY;
2800 skb_checksum_none_assert(skb);
2803 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2804 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2805 int amount_pull, struct napi_struct *napi)
2807 struct gfar_private *priv = netdev_priv(dev);
2808 struct rxfcb *fcb = NULL;
2810 /* fcb is at the beginning if exists */
2811 fcb = (struct rxfcb *)skb->data;
2813 /* Remove the FCB from the skb
2814 * Remove the padded bytes, if there are any
2817 skb_record_rx_queue(skb, fcb->rq);
2818 skb_pull(skb, amount_pull);
2821 /* Get receive timestamp from the skb */
2822 if (priv->hwts_rx_en) {
2823 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2824 u64 *ns = (u64 *) skb->data;
2826 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2827 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2831 skb_pull(skb, priv->padding);
2833 if (dev->features & NETIF_F_RXCSUM)
2834 gfar_rx_checksum(skb, fcb);
2836 /* Tell the skb what kind of packet this is */
2837 skb->protocol = eth_type_trans(skb, dev);
2839 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2840 * Even if vlan rx accel is disabled, on some chips
2841 * RXFCB_VLN is pseudo randomly set.
2843 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2844 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2845 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2846 be16_to_cpu(fcb->vlctl));
2848 /* Send the packet up the stack */
2849 napi_gro_receive(napi, skb);
2853 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2854 * until the budget/quota has been reached. Returns the number
2857 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2859 struct net_device *dev = rx_queue->dev;
2860 struct rxbd8 *bdp, *base;
2861 struct sk_buff *skb;
2865 struct gfar_private *priv = netdev_priv(dev);
2867 /* Get the first full descriptor */
2868 bdp = rx_queue->cur_rx;
2869 base = rx_queue->rx_bd_base;
2871 amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2873 while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
2874 struct sk_buff *newskb;
2879 /* Add another skb for the future */
2880 newskb = gfar_new_skb(dev, &bufaddr);
2882 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2884 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2885 priv->rx_buffer_size, DMA_FROM_DEVICE);
2887 if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
2888 be16_to_cpu(bdp->length) > priv->rx_buffer_size))
2889 bdp->status = cpu_to_be16(RXBD_LARGE);
2891 /* We drop the frame if we failed to allocate a new buffer */
2892 if (unlikely(!newskb ||
2893 !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
2894 be16_to_cpu(bdp->status) & RXBD_ERR)) {
2895 count_errors(be16_to_cpu(bdp->status), dev);
2897 if (unlikely(!newskb)) {
2899 bufaddr = be32_to_cpu(bdp->bufPtr);
2903 /* Increment the number of packets */
2904 rx_queue->stats.rx_packets++;
2908 pkt_len = be16_to_cpu(bdp->length) -
2910 /* Remove the FCS from the packet length */
2911 skb_put(skb, pkt_len);
2912 rx_queue->stats.rx_bytes += pkt_len;
2913 skb_record_rx_queue(skb, rx_queue->qindex);
2914 gfar_process_frame(dev, skb, amount_pull,
2915 &rx_queue->grp->napi_rx);
2918 netif_warn(priv, rx_err, dev, "Missing skb!\n");
2919 rx_queue->stats.rx_dropped++;
2920 atomic64_inc(&priv->extra_stats.rx_skbmissing);
2925 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2927 /* Setup the new bdp */
2928 gfar_init_rxbdp(rx_queue, bdp, bufaddr);
2930 /* Update Last Free RxBD pointer for LFC */
2931 if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
2932 gfar_write(rx_queue->rfbptr, (u32)bdp);
2934 /* Update to the next pointer */
2935 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2937 /* update to point at the next skb */
2938 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2939 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2942 /* Update the current rxbd pointer to be the next one */
2943 rx_queue->cur_rx = bdp;
2948 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2950 struct gfar_priv_grp *gfargrp =
2951 container_of(napi, struct gfar_priv_grp, napi_rx);
2952 struct gfar __iomem *regs = gfargrp->regs;
2953 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2956 /* Clear IEVENT, so interrupts aren't called again
2957 * because of the packets that have already arrived
2959 gfar_write(®s->ievent, IEVENT_RX_MASK);
2961 work_done = gfar_clean_rx_ring(rx_queue, budget);
2963 if (work_done < budget) {
2965 napi_complete(napi);
2966 /* Clear the halt bit in RSTAT */
2967 gfar_write(®s->rstat, gfargrp->rstat);
2969 spin_lock_irq(&gfargrp->grplock);
2970 imask = gfar_read(®s->imask);
2971 imask |= IMASK_RX_DEFAULT;
2972 gfar_write(®s->imask, imask);
2973 spin_unlock_irq(&gfargrp->grplock);
2979 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2981 struct gfar_priv_grp *gfargrp =
2982 container_of(napi, struct gfar_priv_grp, napi_tx);
2983 struct gfar __iomem *regs = gfargrp->regs;
2984 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2987 /* Clear IEVENT, so interrupts aren't called again
2988 * because of the packets that have already arrived
2990 gfar_write(®s->ievent, IEVENT_TX_MASK);
2992 /* run Tx cleanup to completion */
2993 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2994 gfar_clean_tx_ring(tx_queue);
2996 napi_complete(napi);
2998 spin_lock_irq(&gfargrp->grplock);
2999 imask = gfar_read(®s->imask);
3000 imask |= IMASK_TX_DEFAULT;
3001 gfar_write(®s->imask, imask);
3002 spin_unlock_irq(&gfargrp->grplock);
3007 static int gfar_poll_rx(struct napi_struct *napi, int budget)
3009 struct gfar_priv_grp *gfargrp =
3010 container_of(napi, struct gfar_priv_grp, napi_rx);
3011 struct gfar_private *priv = gfargrp->priv;
3012 struct gfar __iomem *regs = gfargrp->regs;
3013 struct gfar_priv_rx_q *rx_queue = NULL;
3014 int work_done = 0, work_done_per_q = 0;
3015 int i, budget_per_q = 0;
3016 unsigned long rstat_rxf;
3019 /* Clear IEVENT, so interrupts aren't called again
3020 * because of the packets that have already arrived
3022 gfar_write(®s->ievent, IEVENT_RX_MASK);
3024 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
3026 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3028 budget_per_q = budget/num_act_queues;
3030 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3031 /* skip queue if not active */
3032 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3035 rx_queue = priv->rx_queue[i];
3037 gfar_clean_rx_ring(rx_queue, budget_per_q);
3038 work_done += work_done_per_q;
3040 /* finished processing this queue */
3041 if (work_done_per_q < budget_per_q) {
3042 /* clear active queue hw indication */
3043 gfar_write(®s->rstat,
3044 RSTAT_CLEAR_RXF0 >> i);
3047 if (!num_act_queues)
3052 if (!num_act_queues) {
3054 napi_complete(napi);
3056 /* Clear the halt bit in RSTAT */
3057 gfar_write(®s->rstat, gfargrp->rstat);
3059 spin_lock_irq(&gfargrp->grplock);
3060 imask = gfar_read(®s->imask);
3061 imask |= IMASK_RX_DEFAULT;
3062 gfar_write(®s->imask, imask);
3063 spin_unlock_irq(&gfargrp->grplock);
3069 static int gfar_poll_tx(struct napi_struct *napi, int budget)
3071 struct gfar_priv_grp *gfargrp =
3072 container_of(napi, struct gfar_priv_grp, napi_tx);
3073 struct gfar_private *priv = gfargrp->priv;
3074 struct gfar __iomem *regs = gfargrp->regs;
3075 struct gfar_priv_tx_q *tx_queue = NULL;
3076 int has_tx_work = 0;
3079 /* Clear IEVENT, so interrupts aren't called again
3080 * because of the packets that have already arrived
3082 gfar_write(®s->ievent, IEVENT_TX_MASK);
3084 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3085 tx_queue = priv->tx_queue[i];
3086 /* run Tx cleanup to completion */
3087 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3088 gfar_clean_tx_ring(tx_queue);
3095 napi_complete(napi);
3097 spin_lock_irq(&gfargrp->grplock);
3098 imask = gfar_read(®s->imask);
3099 imask |= IMASK_TX_DEFAULT;
3100 gfar_write(®s->imask, imask);
3101 spin_unlock_irq(&gfargrp->grplock);
3108 #ifdef CONFIG_NET_POLL_CONTROLLER
3109 /* Polling 'interrupt' - used by things like netconsole to send skbs
3110 * without having to re-enable interrupts. It's not called while
3111 * the interrupt routine is executing.
3113 static void gfar_netpoll(struct net_device *dev)
3115 struct gfar_private *priv = netdev_priv(dev);
3118 /* If the device has multiple interrupts, run tx/rx */
3119 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3120 for (i = 0; i < priv->num_grps; i++) {
3121 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3123 disable_irq(gfar_irq(grp, TX)->irq);
3124 disable_irq(gfar_irq(grp, RX)->irq);
3125 disable_irq(gfar_irq(grp, ER)->irq);
3126 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3127 enable_irq(gfar_irq(grp, ER)->irq);
3128 enable_irq(gfar_irq(grp, RX)->irq);
3129 enable_irq(gfar_irq(grp, TX)->irq);
3132 for (i = 0; i < priv->num_grps; i++) {
3133 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3135 disable_irq(gfar_irq(grp, TX)->irq);
3136 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3137 enable_irq(gfar_irq(grp, TX)->irq);
3143 /* The interrupt handler for devices with one interrupt */
3144 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3146 struct gfar_priv_grp *gfargrp = grp_id;
3148 /* Save ievent for future reference */
3149 u32 events = gfar_read(&gfargrp->regs->ievent);
3151 /* Check for reception */
3152 if (events & IEVENT_RX_MASK)
3153 gfar_receive(irq, grp_id);
3155 /* Check for transmit completion */
3156 if (events & IEVENT_TX_MASK)
3157 gfar_transmit(irq, grp_id);
3159 /* Check for errors */
3160 if (events & IEVENT_ERR_MASK)
3161 gfar_error(irq, grp_id);
3166 /* Called every time the controller might need to be made
3167 * aware of new link state. The PHY code conveys this
3168 * information through variables in the phydev structure, and this
3169 * function converts those variables into the appropriate
3170 * register values, and can bring down the device if needed.
3172 static void adjust_link(struct net_device *dev)
3174 struct gfar_private *priv = netdev_priv(dev);
3175 struct phy_device *phydev = priv->phydev;
3177 if (unlikely(phydev->link != priv->oldlink ||
3178 (phydev->link && (phydev->duplex != priv->oldduplex ||
3179 phydev->speed != priv->oldspeed))))
3180 gfar_update_link_state(priv);
3183 /* Update the hash table based on the current list of multicast
3184 * addresses we subscribe to. Also, change the promiscuity of
3185 * the device based on the flags (this function is called
3186 * whenever dev->flags is changed
3188 static void gfar_set_multi(struct net_device *dev)
3190 struct netdev_hw_addr *ha;
3191 struct gfar_private *priv = netdev_priv(dev);
3192 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3195 if (dev->flags & IFF_PROMISC) {
3196 /* Set RCTRL to PROM */
3197 tempval = gfar_read(®s->rctrl);
3198 tempval |= RCTRL_PROM;
3199 gfar_write(®s->rctrl, tempval);
3201 /* Set RCTRL to not PROM */
3202 tempval = gfar_read(®s->rctrl);
3203 tempval &= ~(RCTRL_PROM);
3204 gfar_write(®s->rctrl, tempval);
3207 if (dev->flags & IFF_ALLMULTI) {
3208 /* Set the hash to rx all multicast frames */
3209 gfar_write(®s->igaddr0, 0xffffffff);
3210 gfar_write(®s->igaddr1, 0xffffffff);
3211 gfar_write(®s->igaddr2, 0xffffffff);
3212 gfar_write(®s->igaddr3, 0xffffffff);
3213 gfar_write(®s->igaddr4, 0xffffffff);
3214 gfar_write(®s->igaddr5, 0xffffffff);
3215 gfar_write(®s->igaddr6, 0xffffffff);
3216 gfar_write(®s->igaddr7, 0xffffffff);
3217 gfar_write(®s->gaddr0, 0xffffffff);
3218 gfar_write(®s->gaddr1, 0xffffffff);
3219 gfar_write(®s->gaddr2, 0xffffffff);
3220 gfar_write(®s->gaddr3, 0xffffffff);
3221 gfar_write(®s->gaddr4, 0xffffffff);
3222 gfar_write(®s->gaddr5, 0xffffffff);
3223 gfar_write(®s->gaddr6, 0xffffffff);
3224 gfar_write(®s->gaddr7, 0xffffffff);
3229 /* zero out the hash */
3230 gfar_write(®s->igaddr0, 0x0);
3231 gfar_write(®s->igaddr1, 0x0);
3232 gfar_write(®s->igaddr2, 0x0);
3233 gfar_write(®s->igaddr3, 0x0);
3234 gfar_write(®s->igaddr4, 0x0);
3235 gfar_write(®s->igaddr5, 0x0);
3236 gfar_write(®s->igaddr6, 0x0);
3237 gfar_write(®s->igaddr7, 0x0);
3238 gfar_write(®s->gaddr0, 0x0);
3239 gfar_write(®s->gaddr1, 0x0);
3240 gfar_write(®s->gaddr2, 0x0);
3241 gfar_write(®s->gaddr3, 0x0);
3242 gfar_write(®s->gaddr4, 0x0);
3243 gfar_write(®s->gaddr5, 0x0);
3244 gfar_write(®s->gaddr6, 0x0);
3245 gfar_write(®s->gaddr7, 0x0);
3247 /* If we have extended hash tables, we need to
3248 * clear the exact match registers to prepare for
3251 if (priv->extended_hash) {
3252 em_num = GFAR_EM_NUM + 1;
3253 gfar_clear_exact_match(dev);
3260 if (netdev_mc_empty(dev))
3263 /* Parse the list, and set the appropriate bits */
3264 netdev_for_each_mc_addr(ha, dev) {
3266 gfar_set_mac_for_addr(dev, idx, ha->addr);
3269 gfar_set_hash_for_addr(dev, ha->addr);
3275 /* Clears each of the exact match registers to zero, so they
3276 * don't interfere with normal reception
3278 static void gfar_clear_exact_match(struct net_device *dev)
3281 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3283 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3284 gfar_set_mac_for_addr(dev, idx, zero_arr);
3287 /* Set the appropriate hash bit for the given addr */
3288 /* The algorithm works like so:
3289 * 1) Take the Destination Address (ie the multicast address), and
3290 * do a CRC on it (little endian), and reverse the bits of the
3292 * 2) Use the 8 most significant bits as a hash into a 256-entry
3293 * table. The table is controlled through 8 32-bit registers:
3294 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3295 * gaddr7. This means that the 3 most significant bits in the
3296 * hash index which gaddr register to use, and the 5 other bits
3297 * indicate which bit (assuming an IBM numbering scheme, which
3298 * for PowerPC (tm) is usually the case) in the register holds
3301 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3304 struct gfar_private *priv = netdev_priv(dev);
3305 u32 result = ether_crc(ETH_ALEN, addr);
3306 int width = priv->hash_width;
3307 u8 whichbit = (result >> (32 - width)) & 0x1f;
3308 u8 whichreg = result >> (32 - width + 5);
3309 u32 value = (1 << (31-whichbit));
3311 tempval = gfar_read(priv->hash_regs[whichreg]);
3313 gfar_write(priv->hash_regs[whichreg], tempval);
3317 /* There are multiple MAC Address register pairs on some controllers
3318 * This function sets the numth pair to a given address
3320 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3323 struct gfar_private *priv = netdev_priv(dev);
3324 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3326 u32 __iomem *macptr = ®s->macstnaddr1;
3330 /* For a station address of 0x12345678ABCD in transmission
3331 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3332 * MACnADDR2 is set to 0x34120000.
3334 tempval = (addr[5] << 24) | (addr[4] << 16) |
3335 (addr[3] << 8) | addr[2];
3337 gfar_write(macptr, tempval);
3339 tempval = (addr[1] << 24) | (addr[0] << 16);
3341 gfar_write(macptr+1, tempval);
3344 /* GFAR error interrupt handler */
3345 static irqreturn_t gfar_error(int irq, void *grp_id)
3347 struct gfar_priv_grp *gfargrp = grp_id;
3348 struct gfar __iomem *regs = gfargrp->regs;
3349 struct gfar_private *priv= gfargrp->priv;
3350 struct net_device *dev = priv->ndev;
3352 /* Save ievent for future reference */
3353 u32 events = gfar_read(®s->ievent);
3356 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
3358 /* Magic Packet is not an error. */
3359 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3360 (events & IEVENT_MAG))
3361 events &= ~IEVENT_MAG;
3364 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3366 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3367 events, gfar_read(®s->imask));
3369 /* Update the error counters */
3370 if (events & IEVENT_TXE) {
3371 dev->stats.tx_errors++;
3373 if (events & IEVENT_LC)
3374 dev->stats.tx_window_errors++;
3375 if (events & IEVENT_CRL)
3376 dev->stats.tx_aborted_errors++;
3377 if (events & IEVENT_XFUN) {
3378 netif_dbg(priv, tx_err, dev,
3379 "TX FIFO underrun, packet dropped\n");
3380 dev->stats.tx_dropped++;
3381 atomic64_inc(&priv->extra_stats.tx_underrun);
3383 schedule_work(&priv->reset_task);
3385 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3387 if (events & IEVENT_BSY) {
3388 dev->stats.rx_errors++;
3389 atomic64_inc(&priv->extra_stats.rx_bsy);
3391 gfar_receive(irq, grp_id);
3393 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3394 gfar_read(®s->rstat));
3396 if (events & IEVENT_BABR) {
3397 dev->stats.rx_errors++;
3398 atomic64_inc(&priv->extra_stats.rx_babr);
3400 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3402 if (events & IEVENT_EBERR) {
3403 atomic64_inc(&priv->extra_stats.eberr);
3404 netif_dbg(priv, rx_err, dev, "bus error\n");
3406 if (events & IEVENT_RXC)
3407 netif_dbg(priv, rx_status, dev, "control frame\n");
3409 if (events & IEVENT_BABT) {
3410 atomic64_inc(&priv->extra_stats.tx_babt);
3411 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3416 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3418 struct phy_device *phydev = priv->phydev;
3421 if (!phydev->duplex)
3424 if (!priv->pause_aneg_en) {
3425 if (priv->tx_pause_en)
3426 val |= MACCFG1_TX_FLOW;
3427 if (priv->rx_pause_en)
3428 val |= MACCFG1_RX_FLOW;
3430 u16 lcl_adv, rmt_adv;
3432 /* get link partner capabilities */
3435 rmt_adv = LPA_PAUSE_CAP;
3436 if (phydev->asym_pause)
3437 rmt_adv |= LPA_PAUSE_ASYM;
3440 if (phydev->advertising & ADVERTISED_Pause)
3441 lcl_adv |= ADVERTISE_PAUSE_CAP;
3442 if (phydev->advertising & ADVERTISED_Asym_Pause)
3443 lcl_adv |= ADVERTISE_PAUSE_ASYM;
3445 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3446 if (flowctrl & FLOW_CTRL_TX)
3447 val |= MACCFG1_TX_FLOW;
3448 if (flowctrl & FLOW_CTRL_RX)
3449 val |= MACCFG1_RX_FLOW;
3455 static noinline void gfar_update_link_state(struct gfar_private *priv)
3457 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3458 struct phy_device *phydev = priv->phydev;
3459 struct gfar_priv_rx_q *rx_queue = NULL;
3463 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3467 u32 tempval1 = gfar_read(®s->maccfg1);
3468 u32 tempval = gfar_read(®s->maccfg2);
3469 u32 ecntrl = gfar_read(®s->ecntrl);
3470 u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
3472 if (phydev->duplex != priv->oldduplex) {
3473 if (!(phydev->duplex))
3474 tempval &= ~(MACCFG2_FULL_DUPLEX);
3476 tempval |= MACCFG2_FULL_DUPLEX;
3478 priv->oldduplex = phydev->duplex;
3481 if (phydev->speed != priv->oldspeed) {
3482 switch (phydev->speed) {
3485 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3487 ecntrl &= ~(ECNTRL_R100);
3492 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3494 /* Reduced mode distinguishes
3495 * between 10 and 100
3497 if (phydev->speed == SPEED_100)
3498 ecntrl |= ECNTRL_R100;
3500 ecntrl &= ~(ECNTRL_R100);
3503 netif_warn(priv, link, priv->ndev,
3504 "Ack! Speed (%d) is not 10/100/1000!\n",
3509 priv->oldspeed = phydev->speed;
3512 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3513 tempval1 |= gfar_get_flowctrl_cfg(priv);
3515 /* Turn last free buffer recording on */
3516 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3517 for (i = 0; i < priv->num_rx_queues; i++) {
3518 rx_queue = priv->rx_queue[i];
3519 bdp = rx_queue->cur_rx;
3520 /* skip to previous bd */
3521 bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
3522 rx_queue->rx_bd_base,
3523 rx_queue->rx_ring_size);
3525 if (rx_queue->rfbptr)
3526 gfar_write(rx_queue->rfbptr, (u32)bdp);
3529 priv->tx_actual_en = 1;
3532 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3533 priv->tx_actual_en = 0;
3535 gfar_write(®s->maccfg1, tempval1);
3536 gfar_write(®s->maccfg2, tempval);
3537 gfar_write(®s->ecntrl, ecntrl);
3542 } else if (priv->oldlink) {
3545 priv->oldduplex = -1;
3548 if (netif_msg_link(priv))
3549 phy_print_status(phydev);
3552 static const struct of_device_id gfar_match[] =
3556 .compatible = "gianfar",
3559 .compatible = "fsl,etsec2",
3563 MODULE_DEVICE_TABLE(of, gfar_match);
3565 /* Structure for a device driver */
3566 static struct platform_driver gfar_driver = {
3568 .name = "fsl-gianfar",
3570 .of_match_table = gfar_match,
3572 .probe = gfar_probe,
3573 .remove = gfar_remove,
3576 module_platform_driver(gfar_driver);