1 /* Renesas Ethernet AVB device driver
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * Based on the SuperH Ethernet driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
14 #include <linux/cache.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/etherdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/net_tstamp.h>
27 #include <linux/of_device.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
37 #define RAVB_DEF_MSG_ENABLE \
43 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
47 for (i = 0; i < 10000; i++) {
48 if ((ravb_read(ndev, reg) & mask) == value)
55 static int ravb_config(struct net_device *ndev)
60 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
62 /* Check if the operating mode is changed to the config mode */
63 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
65 netdev_err(ndev, "failed to switch device to config mode\n");
70 static void ravb_set_duplex(struct net_device *ndev)
72 struct ravb_private *priv = netdev_priv(ndev);
73 u32 ecmr = ravb_read(ndev, ECMR);
75 if (priv->duplex) /* Full */
79 ravb_write(ndev, ecmr, ECMR);
82 static void ravb_set_rate(struct net_device *ndev)
84 struct ravb_private *priv = netdev_priv(ndev);
86 switch (priv->speed) {
87 case 100: /* 100BASE */
88 ravb_write(ndev, GECMR_SPEED_100, GECMR);
90 case 1000: /* 1000BASE */
91 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
98 static void ravb_set_buffer_align(struct sk_buff *skb)
100 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
103 skb_reserve(skb, RAVB_ALIGN - reserve);
106 /* Get MAC address from the MAC address registers
108 * Ethernet AVB device doesn't have ROM for MAC address.
109 * This function gets the MAC address that was used by a bootloader.
111 static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
114 ether_addr_copy(ndev->dev_addr, mac);
116 ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
117 ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
118 ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
119 ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
120 ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
121 ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
125 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
127 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
129 u32 pir = ravb_read(priv->ndev, PIR);
135 ravb_write(priv->ndev, pir, PIR);
138 /* MDC pin control */
139 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
141 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
144 /* Data I/O pin control */
145 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
147 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
151 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
153 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
157 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
159 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
162 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
165 /* MDIO bus control struct */
166 static struct mdiobb_ops bb_ops = {
167 .owner = THIS_MODULE,
168 .set_mdc = ravb_set_mdc,
169 .set_mdio_dir = ravb_set_mdio_dir,
170 .set_mdio_data = ravb_set_mdio_data,
171 .get_mdio_data = ravb_get_mdio_data,
174 /* Free skb's and DMA buffers for Ethernet AVB */
175 static void ravb_ring_free(struct net_device *ndev, int q)
177 struct ravb_private *priv = netdev_priv(ndev);
181 /* Free RX skb ringbuffer */
182 if (priv->rx_skb[q]) {
183 for (i = 0; i < priv->num_rx_ring[q]; i++)
184 dev_kfree_skb(priv->rx_skb[q][i]);
186 kfree(priv->rx_skb[q]);
187 priv->rx_skb[q] = NULL;
189 /* Free TX skb ringbuffer */
190 if (priv->tx_skb[q]) {
191 for (i = 0; i < priv->num_tx_ring[q]; i++)
192 dev_kfree_skb(priv->tx_skb[q][i]);
194 kfree(priv->tx_skb[q]);
195 priv->tx_skb[q] = NULL;
197 /* Free aligned TX buffers */
198 if (priv->tx_buffers[q]) {
199 for (i = 0; i < priv->num_tx_ring[q]; i++)
200 kfree(priv->tx_buffers[q][i]);
202 kfree(priv->tx_buffers[q]);
203 priv->tx_buffers[q] = NULL;
205 if (priv->rx_ring[q]) {
206 ring_size = sizeof(struct ravb_ex_rx_desc) *
207 (priv->num_rx_ring[q] + 1);
208 dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
209 priv->rx_desc_dma[q]);
210 priv->rx_ring[q] = NULL;
213 if (priv->tx_ring[q]) {
214 ring_size = sizeof(struct ravb_tx_desc) *
215 (priv->num_tx_ring[q] + 1);
216 dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
217 priv->tx_desc_dma[q]);
218 priv->tx_ring[q] = NULL;
222 /* Format skb and descriptor buffer for Ethernet AVB */
223 static void ravb_ring_format(struct net_device *ndev, int q)
225 struct ravb_private *priv = netdev_priv(ndev);
226 struct ravb_ex_rx_desc *rx_desc = NULL;
227 struct ravb_tx_desc *tx_desc = NULL;
228 struct ravb_desc *desc = NULL;
229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
236 priv->dirty_rx[q] = 0;
237 priv->dirty_tx[q] = 0;
239 memset(priv->rx_ring[q], 0, rx_ring_size);
240 /* Build RX ring buffer */
241 for (i = 0; i < priv->num_rx_ring[q]; i++) {
243 rx_desc = &priv->rx_ring[q][i];
244 /* The size of the buffer should be on 16-byte boundary. */
245 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
246 dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
247 ALIGN(PKT_BUF_SZ, 16),
249 /* We just set the data size to 0 for a failed mapping which
250 * should prevent DMA from happening...
252 if (dma_mapping_error(&ndev->dev, dma_addr))
253 rx_desc->ds_cc = cpu_to_le16(0);
254 rx_desc->dptr = cpu_to_le32(dma_addr);
255 rx_desc->die_dt = DT_FEMPTY;
257 rx_desc = &priv->rx_ring[q][i];
258 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
259 rx_desc->die_dt = DT_LINKFIX; /* type */
261 memset(priv->tx_ring[q], 0, tx_ring_size);
262 /* Build TX ring buffer */
263 for (i = 0; i < priv->num_tx_ring[q]; i++) {
264 tx_desc = &priv->tx_ring[q][i];
265 tx_desc->die_dt = DT_EEMPTY;
267 tx_desc = &priv->tx_ring[q][i];
268 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
269 tx_desc->die_dt = DT_LINKFIX; /* type */
271 /* RX descriptor base address for best effort */
272 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
273 desc->die_dt = DT_LINKFIX; /* type */
274 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
276 /* TX descriptor base address for best effort */
277 desc = &priv->desc_bat[q];
278 desc->die_dt = DT_LINKFIX; /* type */
279 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
282 /* Init skb and descriptor buffer for Ethernet AVB */
283 static int ravb_ring_init(struct net_device *ndev, int q)
285 struct ravb_private *priv = netdev_priv(ndev);
291 /* Allocate RX and TX skb rings */
292 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
293 sizeof(*priv->rx_skb[q]), GFP_KERNEL);
294 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
295 sizeof(*priv->tx_skb[q]), GFP_KERNEL);
296 if (!priv->rx_skb[q] || !priv->tx_skb[q])
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
300 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
303 ravb_set_buffer_align(skb);
304 priv->rx_skb[q][i] = skb;
307 /* Allocate rings for the aligned buffers */
308 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
309 sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
310 if (!priv->tx_buffers[q])
313 for (i = 0; i < priv->num_tx_ring[q]; i++) {
314 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
317 /* Aligned TX buffer */
318 priv->tx_buffers[q][i] = buffer;
321 /* Allocate all RX descriptors. */
322 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
323 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
324 &priv->rx_desc_dma[q],
326 if (!priv->rx_ring[q])
329 priv->dirty_rx[q] = 0;
331 /* Allocate all TX descriptors. */
332 ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
333 priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
334 &priv->tx_desc_dma[q],
336 if (!priv->tx_ring[q])
342 ravb_ring_free(ndev, q);
347 /* E-MAC init function */
348 static void ravb_emac_init(struct net_device *ndev)
350 struct ravb_private *priv = netdev_priv(ndev);
353 /* Receive frame limit set register */
354 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
356 /* PAUSE prohibition */
357 ecmr = ravb_read(ndev, ECMR);
359 ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
360 ravb_write(ndev, ecmr, ECMR);
364 /* Set MAC address */
366 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
367 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
369 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
371 ravb_write(ndev, 1, MPR);
373 /* E-MAC status register clear */
374 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
376 /* E-MAC interrupt enable register */
377 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
380 /* Device init function for Ethernet AVB */
381 static int ravb_dmac_init(struct net_device *ndev)
385 /* Set CONFIG mode */
386 error = ravb_config(ndev);
390 error = ravb_ring_init(ndev, RAVB_BE);
393 error = ravb_ring_init(ndev, RAVB_NC);
395 ravb_ring_free(ndev, RAVB_BE);
399 /* Descriptor format */
400 ravb_ring_format(ndev, RAVB_BE);
401 ravb_ring_format(ndev, RAVB_NC);
403 #if defined(__LITTLE_ENDIAN)
404 ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
406 ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
410 ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
413 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
415 /* Timestamp enable */
416 ravb_write(ndev, TCCR_TFEN, TCCR);
418 /* Interrupt enable: */
420 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
421 /* Receive FIFO full warning */
422 ravb_write(ndev, RIC1_RFWE, RIC1);
423 /* Receive FIFO full error, descriptor empty */
424 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
425 /* Frame transmitted, timestamp FIFO updated */
426 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
428 /* Setting the control will start the AVB-DMAC process. */
429 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
435 /* Free TX skb function for AVB-IP */
436 static int ravb_tx_free(struct net_device *ndev, int q)
438 struct ravb_private *priv = netdev_priv(ndev);
439 struct net_device_stats *stats = &priv->stats[q];
440 struct ravb_tx_desc *desc;
445 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
446 entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
447 desc = &priv->tx_ring[q][entry];
448 if (desc->die_dt != DT_FEMPTY)
450 /* Descriptor type must be checked before all other reads */
452 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
453 /* Free the original skb. */
454 if (priv->tx_skb[q][entry]) {
455 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
456 size, DMA_TO_DEVICE);
457 dev_kfree_skb_any(priv->tx_skb[q][entry]);
458 priv->tx_skb[q][entry] = NULL;
462 stats->tx_bytes += size;
463 desc->die_dt = DT_EEMPTY;
468 static void ravb_get_tx_tstamp(struct net_device *ndev)
470 struct ravb_private *priv = netdev_priv(ndev);
471 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
472 struct skb_shared_hwtstamps shhwtstamps;
474 struct timespec64 ts;
479 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
481 tfa2 = ravb_read(ndev, TFA2);
482 tfa_tag = (tfa2 & TFA2_TST) >> 16;
483 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
484 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
485 ravb_read(ndev, TFA1);
486 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
487 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
488 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
492 list_del(&ts_skb->list);
494 if (tag == tfa_tag) {
495 skb_tstamp_tx(skb, &shhwtstamps);
499 ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
503 /* Packet receive function for Ethernet AVB */
504 static bool ravb_rx(struct net_device *ndev, int *quota, int q)
506 struct ravb_private *priv = netdev_priv(ndev);
507 int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
508 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
510 struct net_device_stats *stats = &priv->stats[q];
511 struct ravb_ex_rx_desc *desc;
514 struct timespec64 ts;
519 boguscnt = min(boguscnt, *quota);
521 desc = &priv->rx_ring[q][entry];
522 while (desc->die_dt != DT_FEMPTY) {
523 /* Descriptor type must be checked before all other reads */
525 desc_status = desc->msc;
526 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
531 /* We use 0-byte descriptors to mark the DMA mapping errors */
535 if (desc_status & MSC_MC)
538 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
541 if (desc_status & MSC_CRC)
542 stats->rx_crc_errors++;
543 if (desc_status & MSC_RFE)
544 stats->rx_frame_errors++;
545 if (desc_status & (MSC_RTLF | MSC_RTSF))
546 stats->rx_length_errors++;
547 if (desc_status & MSC_CEEF)
548 stats->rx_missed_errors++;
550 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
552 skb = priv->rx_skb[q][entry];
553 priv->rx_skb[q][entry] = NULL;
554 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
555 ALIGN(PKT_BUF_SZ, 16),
557 get_ts &= (q == RAVB_NC) ?
558 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
559 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
561 struct skb_shared_hwtstamps *shhwtstamps;
563 shhwtstamps = skb_hwtstamps(skb);
564 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
565 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
566 32) | le32_to_cpu(desc->ts_sl);
567 ts.tv_nsec = le32_to_cpu(desc->ts_n);
568 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
570 skb_put(skb, pkt_len);
571 skb->protocol = eth_type_trans(skb, ndev);
572 napi_gro_receive(&priv->napi[q], skb);
574 stats->rx_bytes += pkt_len;
577 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
578 desc = &priv->rx_ring[q][entry];
581 /* Refill the RX ring buffers. */
582 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
583 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
584 desc = &priv->rx_ring[q][entry];
585 /* The size of the buffer should be on 16-byte boundary. */
586 desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
588 if (!priv->rx_skb[q][entry]) {
589 skb = netdev_alloc_skb(ndev,
590 PKT_BUF_SZ + RAVB_ALIGN - 1);
592 break; /* Better luck next round. */
593 ravb_set_buffer_align(skb);
594 dma_addr = dma_map_single(&ndev->dev, skb->data,
595 le16_to_cpu(desc->ds_cc),
597 skb_checksum_none_assert(skb);
598 /* We just set the data size to 0 for a failed mapping
599 * which should prevent DMA from happening...
601 if (dma_mapping_error(&ndev->dev, dma_addr))
602 desc->ds_cc = cpu_to_le16(0);
603 desc->dptr = cpu_to_le32(dma_addr);
604 priv->rx_skb[q][entry] = skb;
606 /* Descriptor type must be set after all the above writes */
608 desc->die_dt = DT_FEMPTY;
611 *quota -= limit - (++boguscnt);
613 return boguscnt <= 0;
616 static void ravb_rcv_snd_disable(struct net_device *ndev)
618 /* Disable TX and RX */
619 ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
622 static void ravb_rcv_snd_enable(struct net_device *ndev)
624 /* Enable TX and RX */
625 ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
628 /* function for waiting dma process finished */
629 static int ravb_stop_dma(struct net_device *ndev)
633 /* Wait for stopping the hardware TX process */
634 error = ravb_wait(ndev, TCCR,
635 TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
639 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
644 /* Stop the E-MAC's RX/TX processes. */
645 ravb_rcv_snd_disable(ndev);
647 /* Wait for stopping the RX DMA process */
648 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
652 /* Stop AVB-DMAC process */
653 return ravb_config(ndev);
656 /* E-MAC interrupt handler */
657 static void ravb_emac_interrupt(struct net_device *ndev)
659 struct ravb_private *priv = netdev_priv(ndev);
662 ecsr = ravb_read(ndev, ECSR);
663 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
665 ndev->stats.tx_carrier_errors++;
666 if (ecsr & ECSR_LCHNG) {
668 if (priv->no_avb_link)
670 psr = ravb_read(ndev, PSR);
671 if (priv->avb_link_active_low)
673 if (!(psr & PSR_LMON)) {
674 /* DIsable RX and TX */
675 ravb_rcv_snd_disable(ndev);
677 /* Enable RX and TX */
678 ravb_rcv_snd_enable(ndev);
683 /* Error interrupt handler */
684 static void ravb_error_interrupt(struct net_device *ndev)
686 struct ravb_private *priv = netdev_priv(ndev);
689 eis = ravb_read(ndev, EIS);
690 ravb_write(ndev, ~EIS_QFS, EIS);
692 ris2 = ravb_read(ndev, RIS2);
693 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
695 /* Receive Descriptor Empty int */
696 if (ris2 & RIS2_QFF0)
697 priv->stats[RAVB_BE].rx_over_errors++;
699 /* Receive Descriptor Empty int */
700 if (ris2 & RIS2_QFF1)
701 priv->stats[RAVB_NC].rx_over_errors++;
703 /* Receive FIFO Overflow int */
704 if (ris2 & RIS2_RFFF)
705 priv->rx_fifo_errors++;
709 static irqreturn_t ravb_interrupt(int irq, void *dev_id)
711 struct net_device *ndev = dev_id;
712 struct ravb_private *priv = netdev_priv(ndev);
713 irqreturn_t result = IRQ_NONE;
716 spin_lock(&priv->lock);
717 /* Get interrupt status */
718 iss = ravb_read(ndev, ISS);
720 /* Received and transmitted interrupts */
721 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
722 u32 ris0 = ravb_read(ndev, RIS0);
723 u32 ric0 = ravb_read(ndev, RIC0);
724 u32 tis = ravb_read(ndev, TIS);
725 u32 tic = ravb_read(ndev, TIC);
728 /* Timestamp updated */
729 if (tis & TIS_TFUF) {
730 ravb_write(ndev, ~TIS_TFUF, TIS);
731 ravb_get_tx_tstamp(ndev);
732 result = IRQ_HANDLED;
735 /* Network control and best effort queue RX/TX */
736 for (q = RAVB_NC; q >= RAVB_BE; q--) {
737 if (((ris0 & ric0) & BIT(q)) ||
738 ((tis & tic) & BIT(q))) {
739 if (napi_schedule_prep(&priv->napi[q])) {
740 /* Mask RX and TX interrupts */
741 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
742 ravb_write(ndev, tic & ~BIT(q), TIC);
743 __napi_schedule(&priv->napi[q]);
746 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
749 " tx status 0x%08x, tx mask 0x%08x.\n",
752 result = IRQ_HANDLED;
757 /* E-MAC status summary */
759 ravb_emac_interrupt(ndev);
760 result = IRQ_HANDLED;
763 /* Error status summary */
765 ravb_error_interrupt(ndev);
766 result = IRQ_HANDLED;
770 result = ravb_ptp_interrupt(ndev);
773 spin_unlock(&priv->lock);
777 static int ravb_poll(struct napi_struct *napi, int budget)
779 struct net_device *ndev = napi->dev;
780 struct ravb_private *priv = netdev_priv(ndev);
782 int q = napi - priv->napi;
788 tis = ravb_read(ndev, TIS);
789 ris0 = ravb_read(ndev, RIS0);
790 if (!((ris0 & mask) || (tis & mask)))
793 /* Processing RX Descriptor Ring */
795 /* Clear RX interrupt */
796 ravb_write(ndev, ~mask, RIS0);
797 if (ravb_rx(ndev, "a, q))
800 /* Processing TX Descriptor Ring */
802 spin_lock_irqsave(&priv->lock, flags);
803 /* Clear TX interrupt */
804 ravb_write(ndev, ~mask, TIS);
805 ravb_tx_free(ndev, q);
806 netif_wake_subqueue(ndev, q);
808 spin_unlock_irqrestore(&priv->lock, flags);
814 /* Re-enable RX/TX interrupts */
815 spin_lock_irqsave(&priv->lock, flags);
816 ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
817 ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
819 spin_unlock_irqrestore(&priv->lock, flags);
821 /* Receive error message handling */
822 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
823 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
824 if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
825 ndev->stats.rx_over_errors = priv->rx_over_errors;
826 netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
828 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
829 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
830 netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
833 return budget - quota;
836 /* PHY state control function */
837 static void ravb_adjust_link(struct net_device *ndev)
839 struct ravb_private *priv = netdev_priv(ndev);
840 struct phy_device *phydev = priv->phydev;
841 bool new_state = false;
844 if (phydev->duplex != priv->duplex) {
846 priv->duplex = phydev->duplex;
847 ravb_set_duplex(ndev);
850 if (phydev->speed != priv->speed) {
852 priv->speed = phydev->speed;
856 ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
859 priv->link = phydev->link;
860 if (priv->no_avb_link)
861 ravb_rcv_snd_enable(ndev);
863 } else if (priv->link) {
868 if (priv->no_avb_link)
869 ravb_rcv_snd_disable(ndev);
872 if (new_state && netif_msg_link(priv))
873 phy_print_status(phydev);
876 /* PHY init function */
877 static int ravb_phy_init(struct net_device *ndev)
879 struct device_node *np = ndev->dev.parent->of_node;
880 struct ravb_private *priv = netdev_priv(ndev);
881 struct phy_device *phydev;
882 struct device_node *pn;
888 /* Try connecting to PHY */
889 pn = of_parse_phandle(np, "phy-handle", 0);
890 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
891 priv->phy_interface);
893 netdev_err(ndev, "failed to connect PHY\n");
897 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
898 phydev->addr, phydev->irq, phydev->drv->name);
900 priv->phydev = phydev;
905 /* PHY control start function */
906 static int ravb_phy_start(struct net_device *ndev)
908 struct ravb_private *priv = netdev_priv(ndev);
911 error = ravb_phy_init(ndev);
915 phy_start(priv->phydev);
920 static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
922 struct ravb_private *priv = netdev_priv(ndev);
927 spin_lock_irqsave(&priv->lock, flags);
928 error = phy_ethtool_gset(priv->phydev, ecmd);
929 spin_unlock_irqrestore(&priv->lock, flags);
935 static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
937 struct ravb_private *priv = netdev_priv(ndev);
944 spin_lock_irqsave(&priv->lock, flags);
946 /* Disable TX and RX */
947 ravb_rcv_snd_disable(ndev);
949 error = phy_ethtool_sset(priv->phydev, ecmd);
953 if (ecmd->duplex == DUPLEX_FULL)
958 ravb_set_duplex(ndev);
963 /* Enable TX and RX */
964 ravb_rcv_snd_enable(ndev);
967 spin_unlock_irqrestore(&priv->lock, flags);
972 static int ravb_nway_reset(struct net_device *ndev)
974 struct ravb_private *priv = netdev_priv(ndev);
979 spin_lock_irqsave(&priv->lock, flags);
980 error = phy_start_aneg(priv->phydev);
981 spin_unlock_irqrestore(&priv->lock, flags);
987 static u32 ravb_get_msglevel(struct net_device *ndev)
989 struct ravb_private *priv = netdev_priv(ndev);
991 return priv->msg_enable;
994 static void ravb_set_msglevel(struct net_device *ndev, u32 value)
996 struct ravb_private *priv = netdev_priv(ndev);
998 priv->msg_enable = value;
1001 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1002 "rx_queue_0_current",
1003 "tx_queue_0_current",
1006 "rx_queue_0_packets",
1007 "tx_queue_0_packets",
1010 "rx_queue_0_mcast_packets",
1011 "rx_queue_0_errors",
1012 "rx_queue_0_crc_errors",
1013 "rx_queue_0_frame_errors",
1014 "rx_queue_0_length_errors",
1015 "rx_queue_0_missed_errors",
1016 "rx_queue_0_over_errors",
1018 "rx_queue_1_current",
1019 "tx_queue_1_current",
1022 "rx_queue_1_packets",
1023 "tx_queue_1_packets",
1026 "rx_queue_1_mcast_packets",
1027 "rx_queue_1_errors",
1028 "rx_queue_1_crc_errors",
1029 "rx_queue_1_frame_errors_",
1030 "rx_queue_1_length_errors",
1031 "rx_queue_1_missed_errors",
1032 "rx_queue_1_over_errors",
1035 #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
1037 static int ravb_get_sset_count(struct net_device *netdev, int sset)
1041 return RAVB_STATS_LEN;
1047 static void ravb_get_ethtool_stats(struct net_device *ndev,
1048 struct ethtool_stats *stats, u64 *data)
1050 struct ravb_private *priv = netdev_priv(ndev);
1054 /* Device-specific stats */
1055 for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1056 struct net_device_stats *stats = &priv->stats[q];
1058 data[i++] = priv->cur_rx[q];
1059 data[i++] = priv->cur_tx[q];
1060 data[i++] = priv->dirty_rx[q];
1061 data[i++] = priv->dirty_tx[q];
1062 data[i++] = stats->rx_packets;
1063 data[i++] = stats->tx_packets;
1064 data[i++] = stats->rx_bytes;
1065 data[i++] = stats->tx_bytes;
1066 data[i++] = stats->multicast;
1067 data[i++] = stats->rx_errors;
1068 data[i++] = stats->rx_crc_errors;
1069 data[i++] = stats->rx_frame_errors;
1070 data[i++] = stats->rx_length_errors;
1071 data[i++] = stats->rx_missed_errors;
1072 data[i++] = stats->rx_over_errors;
1076 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1078 switch (stringset) {
1080 memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
1085 static void ravb_get_ringparam(struct net_device *ndev,
1086 struct ethtool_ringparam *ring)
1088 struct ravb_private *priv = netdev_priv(ndev);
1090 ring->rx_max_pending = BE_RX_RING_MAX;
1091 ring->tx_max_pending = BE_TX_RING_MAX;
1092 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1093 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1096 static int ravb_set_ringparam(struct net_device *ndev,
1097 struct ethtool_ringparam *ring)
1099 struct ravb_private *priv = netdev_priv(ndev);
1102 if (ring->tx_pending > BE_TX_RING_MAX ||
1103 ring->rx_pending > BE_RX_RING_MAX ||
1104 ring->tx_pending < BE_TX_RING_MIN ||
1105 ring->rx_pending < BE_RX_RING_MIN)
1107 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1110 if (netif_running(ndev)) {
1111 netif_device_detach(ndev);
1112 /* Stop PTP Clock driver */
1113 ravb_ptp_stop(ndev);
1114 /* Wait for DMA stopping */
1115 error = ravb_stop_dma(ndev);
1118 "cannot set ringparam! Any AVB processes are still running?\n");
1121 synchronize_irq(ndev->irq);
1123 /* Free all the skb's in the RX queue and the DMA buffers. */
1124 ravb_ring_free(ndev, RAVB_BE);
1125 ravb_ring_free(ndev, RAVB_NC);
1128 /* Set new parameters */
1129 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1130 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1132 if (netif_running(ndev)) {
1133 error = ravb_dmac_init(ndev);
1136 "%s: ravb_dmac_init() failed, error %d\n",
1141 ravb_emac_init(ndev);
1143 /* Initialise PTP Clock driver */
1144 ravb_ptp_init(ndev, priv->pdev);
1146 netif_device_attach(ndev);
1152 static int ravb_get_ts_info(struct net_device *ndev,
1153 struct ethtool_ts_info *info)
1155 struct ravb_private *priv = netdev_priv(ndev);
1157 info->so_timestamping =
1158 SOF_TIMESTAMPING_TX_SOFTWARE |
1159 SOF_TIMESTAMPING_RX_SOFTWARE |
1160 SOF_TIMESTAMPING_SOFTWARE |
1161 SOF_TIMESTAMPING_TX_HARDWARE |
1162 SOF_TIMESTAMPING_RX_HARDWARE |
1163 SOF_TIMESTAMPING_RAW_HARDWARE;
1164 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1166 (1 << HWTSTAMP_FILTER_NONE) |
1167 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1168 (1 << HWTSTAMP_FILTER_ALL);
1169 info->phc_index = ptp_clock_index(priv->ptp.clock);
1174 static const struct ethtool_ops ravb_ethtool_ops = {
1175 .get_settings = ravb_get_settings,
1176 .set_settings = ravb_set_settings,
1177 .nway_reset = ravb_nway_reset,
1178 .get_msglevel = ravb_get_msglevel,
1179 .set_msglevel = ravb_set_msglevel,
1180 .get_link = ethtool_op_get_link,
1181 .get_strings = ravb_get_strings,
1182 .get_ethtool_stats = ravb_get_ethtool_stats,
1183 .get_sset_count = ravb_get_sset_count,
1184 .get_ringparam = ravb_get_ringparam,
1185 .set_ringparam = ravb_set_ringparam,
1186 .get_ts_info = ravb_get_ts_info,
1189 /* Network device open function for Ethernet AVB */
1190 static int ravb_open(struct net_device *ndev)
1192 struct ravb_private *priv = netdev_priv(ndev);
1195 napi_enable(&priv->napi[RAVB_BE]);
1196 napi_enable(&priv->napi[RAVB_NC]);
1198 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
1201 netdev_err(ndev, "cannot request IRQ\n");
1206 error = ravb_dmac_init(ndev);
1209 ravb_emac_init(ndev);
1211 /* Initialise PTP Clock driver */
1212 ravb_ptp_init(ndev, priv->pdev);
1214 netif_tx_start_all_queues(ndev);
1216 /* PHY control start */
1217 error = ravb_phy_start(ndev);
1224 /* Stop PTP Clock driver */
1225 ravb_ptp_stop(ndev);
1227 free_irq(ndev->irq, ndev);
1229 napi_disable(&priv->napi[RAVB_NC]);
1230 napi_disable(&priv->napi[RAVB_BE]);
1234 /* Timeout function for Ethernet AVB */
1235 static void ravb_tx_timeout(struct net_device *ndev)
1237 struct ravb_private *priv = netdev_priv(ndev);
1239 netif_err(priv, tx_err, ndev,
1240 "transmit timed out, status %08x, resetting...\n",
1241 ravb_read(ndev, ISS));
1243 /* tx_errors count up */
1244 ndev->stats.tx_errors++;
1246 schedule_work(&priv->work);
1249 static void ravb_tx_timeout_work(struct work_struct *work)
1251 struct ravb_private *priv = container_of(work, struct ravb_private,
1253 struct net_device *ndev = priv->ndev;
1255 netif_tx_stop_all_queues(ndev);
1257 /* Stop PTP Clock driver */
1258 ravb_ptp_stop(ndev);
1260 /* Wait for DMA stopping */
1261 ravb_stop_dma(ndev);
1263 ravb_ring_free(ndev, RAVB_BE);
1264 ravb_ring_free(ndev, RAVB_NC);
1267 ravb_dmac_init(ndev);
1268 ravb_emac_init(ndev);
1270 /* Initialise PTP Clock driver */
1271 ravb_ptp_init(ndev, priv->pdev);
1273 netif_tx_start_all_queues(ndev);
1276 /* Packet transmit function for Ethernet AVB */
1277 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1279 struct ravb_private *priv = netdev_priv(ndev);
1280 struct ravb_tstamp_skb *ts_skb = NULL;
1281 u16 q = skb_get_queue_mapping(skb);
1282 struct ravb_tx_desc *desc;
1283 unsigned long flags;
1288 spin_lock_irqsave(&priv->lock, flags);
1289 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
1290 netif_err(priv, tx_queued, ndev,
1291 "still transmitting with the full ring!\n");
1292 netif_stop_subqueue(ndev, q);
1293 spin_unlock_irqrestore(&priv->lock, flags);
1294 return NETDEV_TX_BUSY;
1296 entry = priv->cur_tx[q] % priv->num_tx_ring[q];
1297 priv->tx_skb[q][entry] = skb;
1299 if (skb_put_padto(skb, ETH_ZLEN))
1302 buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
1303 memcpy(buffer, skb->data, skb->len);
1304 desc = &priv->tx_ring[q][entry];
1305 desc->ds_tagl = cpu_to_le16(skb->len);
1306 dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
1307 if (dma_mapping_error(&ndev->dev, dma_addr))
1309 desc->dptr = cpu_to_le32(dma_addr);
1311 /* TX timestamp required */
1313 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1315 dma_unmap_single(&ndev->dev, dma_addr, skb->len,
1320 ts_skb->tag = priv->ts_skb_tag++;
1321 priv->ts_skb_tag &= 0x3ff;
1322 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1324 /* TAG and timestamp required flag */
1325 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1326 skb_tx_timestamp(skb);
1327 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1328 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1331 /* Descriptor type must be set after all the above writes */
1333 desc->die_dt = DT_FSINGLE;
1335 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1338 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
1339 !ravb_tx_free(ndev, q))
1340 netif_stop_subqueue(ndev, q);
1344 spin_unlock_irqrestore(&priv->lock, flags);
1345 return NETDEV_TX_OK;
1348 dev_kfree_skb_any(skb);
1349 priv->tx_skb[q][entry] = NULL;
1353 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1354 void *accel_priv, select_queue_fallback_t fallback)
1356 /* If skb needs TX timestamp, it is handled in network control queue */
1357 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1362 static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1364 struct ravb_private *priv = netdev_priv(ndev);
1365 struct net_device_stats *nstats, *stats0, *stats1;
1367 nstats = &ndev->stats;
1368 stats0 = &priv->stats[RAVB_BE];
1369 stats1 = &priv->stats[RAVB_NC];
1371 nstats->tx_dropped += ravb_read(ndev, TROCR);
1372 ravb_write(ndev, 0, TROCR); /* (write clear) */
1373 nstats->collisions += ravb_read(ndev, CDCR);
1374 ravb_write(ndev, 0, CDCR); /* (write clear) */
1375 nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
1376 ravb_write(ndev, 0, LCCR); /* (write clear) */
1378 nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
1379 ravb_write(ndev, 0, CERCR); /* (write clear) */
1380 nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
1381 ravb_write(ndev, 0, CEECR); /* (write clear) */
1383 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1384 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1385 nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1386 nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1387 nstats->multicast = stats0->multicast + stats1->multicast;
1388 nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1389 nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1390 nstats->rx_frame_errors =
1391 stats0->rx_frame_errors + stats1->rx_frame_errors;
1392 nstats->rx_length_errors =
1393 stats0->rx_length_errors + stats1->rx_length_errors;
1394 nstats->rx_missed_errors =
1395 stats0->rx_missed_errors + stats1->rx_missed_errors;
1396 nstats->rx_over_errors =
1397 stats0->rx_over_errors + stats1->rx_over_errors;
1402 /* Update promiscuous bit */
1403 static void ravb_set_rx_mode(struct net_device *ndev)
1405 struct ravb_private *priv = netdev_priv(ndev);
1406 unsigned long flags;
1409 spin_lock_irqsave(&priv->lock, flags);
1410 ecmr = ravb_read(ndev, ECMR);
1411 if (ndev->flags & IFF_PROMISC)
1415 ravb_write(ndev, ecmr, ECMR);
1417 spin_unlock_irqrestore(&priv->lock, flags);
1420 /* Device close function for Ethernet AVB */
1421 static int ravb_close(struct net_device *ndev)
1423 struct ravb_private *priv = netdev_priv(ndev);
1424 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1426 netif_tx_stop_all_queues(ndev);
1428 /* Disable interrupts by clearing the interrupt masks. */
1429 ravb_write(ndev, 0, RIC0);
1430 ravb_write(ndev, 0, RIC1);
1431 ravb_write(ndev, 0, RIC2);
1432 ravb_write(ndev, 0, TIC);
1434 /* Stop PTP Clock driver */
1435 ravb_ptp_stop(ndev);
1437 /* Set the config mode to stop the AVB-DMAC's processes */
1438 if (ravb_stop_dma(ndev) < 0)
1440 "device will be stopped after h/w processes are done.\n");
1442 /* Clear the timestamp list */
1443 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1444 list_del(&ts_skb->list);
1448 /* PHY disconnect */
1450 phy_stop(priv->phydev);
1451 phy_disconnect(priv->phydev);
1452 priv->phydev = NULL;
1455 free_irq(ndev->irq, ndev);
1457 napi_disable(&priv->napi[RAVB_NC]);
1458 napi_disable(&priv->napi[RAVB_BE]);
1460 /* Free all the skb's in the RX queue and the DMA buffers. */
1461 ravb_ring_free(ndev, RAVB_BE);
1462 ravb_ring_free(ndev, RAVB_NC);
1467 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1469 struct ravb_private *priv = netdev_priv(ndev);
1470 struct hwtstamp_config config;
1473 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1475 if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
1476 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1477 else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
1478 config.rx_filter = HWTSTAMP_FILTER_ALL;
1480 config.rx_filter = HWTSTAMP_FILTER_NONE;
1482 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1486 /* Control hardware time stamping */
1487 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1489 struct ravb_private *priv = netdev_priv(ndev);
1490 struct hwtstamp_config config;
1491 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1494 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1497 /* Reserved for future extensions */
1501 switch (config.tx_type) {
1502 case HWTSTAMP_TX_OFF:
1505 case HWTSTAMP_TX_ON:
1506 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1512 switch (config.rx_filter) {
1513 case HWTSTAMP_FILTER_NONE:
1516 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1517 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1520 config.rx_filter = HWTSTAMP_FILTER_ALL;
1521 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1524 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1525 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1527 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1531 /* ioctl to device function */
1532 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1534 struct ravb_private *priv = netdev_priv(ndev);
1535 struct phy_device *phydev = priv->phydev;
1537 if (!netif_running(ndev))
1545 return ravb_hwtstamp_get(ndev, req);
1547 return ravb_hwtstamp_set(ndev, req);
1550 return phy_mii_ioctl(phydev, req, cmd);
1553 static const struct net_device_ops ravb_netdev_ops = {
1554 .ndo_open = ravb_open,
1555 .ndo_stop = ravb_close,
1556 .ndo_start_xmit = ravb_start_xmit,
1557 .ndo_select_queue = ravb_select_queue,
1558 .ndo_get_stats = ravb_get_stats,
1559 .ndo_set_rx_mode = ravb_set_rx_mode,
1560 .ndo_tx_timeout = ravb_tx_timeout,
1561 .ndo_do_ioctl = ravb_do_ioctl,
1562 .ndo_validate_addr = eth_validate_addr,
1563 .ndo_set_mac_address = eth_mac_addr,
1564 .ndo_change_mtu = eth_change_mtu,
1567 /* MDIO bus init function */
1568 static int ravb_mdio_init(struct ravb_private *priv)
1570 struct platform_device *pdev = priv->pdev;
1571 struct device *dev = &pdev->dev;
1575 priv->mdiobb.ops = &bb_ops;
1577 /* MII controller setting */
1578 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1582 /* Hook up MII support for ethtool */
1583 priv->mii_bus->name = "ravb_mii";
1584 priv->mii_bus->parent = dev;
1585 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1586 pdev->name, pdev->id);
1588 /* Register MDIO bus */
1589 error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1596 free_mdio_bitbang(priv->mii_bus);
1600 /* MDIO bus release function */
1601 static int ravb_mdio_release(struct ravb_private *priv)
1603 /* Unregister mdio bus */
1604 mdiobus_unregister(priv->mii_bus);
1606 /* Free bitbang info */
1607 free_mdio_bitbang(priv->mii_bus);
1612 static int ravb_probe(struct platform_device *pdev)
1614 struct device_node *np = pdev->dev.of_node;
1615 struct ravb_private *priv;
1616 struct net_device *ndev;
1618 struct resource *res;
1622 "this driver is required to be instantiated from device tree\n");
1626 /* Get base address */
1627 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1629 dev_err(&pdev->dev, "invalid resource\n");
1633 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
1634 NUM_TX_QUEUE, NUM_RX_QUEUE);
1638 pm_runtime_enable(&pdev->dev);
1639 pm_runtime_get_sync(&pdev->dev);
1641 /* The Ether-specific entries in the device structure. */
1642 ndev->base_addr = res->start;
1644 irq = platform_get_irq(pdev, 0);
1651 SET_NETDEV_DEV(ndev, &pdev->dev);
1653 priv = netdev_priv(ndev);
1656 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
1657 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
1658 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
1659 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
1660 priv->addr = devm_ioremap_resource(&pdev->dev, res);
1661 if (IS_ERR(priv->addr)) {
1662 error = PTR_ERR(priv->addr);
1666 spin_lock_init(&priv->lock);
1667 INIT_WORK(&priv->work, ravb_tx_timeout_work);
1669 priv->phy_interface = of_get_phy_mode(np);
1671 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
1672 priv->avb_link_active_low =
1673 of_property_read_bool(np, "renesas,ether-link-active-low");
1676 ndev->netdev_ops = &ravb_netdev_ops;
1677 ndev->ethtool_ops = &ravb_ethtool_ops;
1679 /* Set AVB config mode */
1680 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
1683 /* Set CSEL value */
1684 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1688 ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
1690 /* Request GTI loading */
1691 ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
1693 /* Allocate descriptor base address table */
1694 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
1695 priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
1696 &priv->desc_bat_dma, GFP_KERNEL);
1697 if (!priv->desc_bat) {
1699 "Cannot allocate desc base address table (size %d bytes)\n",
1700 priv->desc_bat_size);
1704 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
1705 priv->desc_bat[q].die_dt = DT_EOS;
1706 ravb_write(ndev, priv->desc_bat_dma, DBAT);
1708 /* Initialise HW timestamp list */
1709 INIT_LIST_HEAD(&priv->ts_skb_list);
1711 /* Debug message level */
1712 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
1714 /* Read and set MAC address */
1715 ravb_read_mac_address(ndev, of_get_mac_address(np));
1716 if (!is_valid_ether_addr(ndev->dev_addr)) {
1717 dev_warn(&pdev->dev,
1718 "no valid MAC address supplied, using a random one\n");
1719 eth_hw_addr_random(ndev);
1723 error = ravb_mdio_init(priv);
1725 dev_err(&ndev->dev, "failed to initialize MDIO\n");
1729 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
1730 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
1732 /* Network device register */
1733 error = register_netdev(ndev);
1737 /* Print device information */
1738 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
1739 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1741 platform_set_drvdata(pdev, ndev);
1746 netif_napi_del(&priv->napi[RAVB_NC]);
1747 netif_napi_del(&priv->napi[RAVB_BE]);
1748 ravb_mdio_release(priv);
1750 dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
1751 priv->desc_bat_dma);
1756 pm_runtime_put(&pdev->dev);
1757 pm_runtime_disable(&pdev->dev);
1761 static int ravb_remove(struct platform_device *pdev)
1763 struct net_device *ndev = platform_get_drvdata(pdev);
1764 struct ravb_private *priv = netdev_priv(ndev);
1766 dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
1767 priv->desc_bat_dma);
1768 /* Set reset mode */
1769 ravb_write(ndev, CCC_OPC_RESET, CCC);
1770 pm_runtime_put_sync(&pdev->dev);
1771 unregister_netdev(ndev);
1772 netif_napi_del(&priv->napi[RAVB_NC]);
1773 netif_napi_del(&priv->napi[RAVB_BE]);
1774 ravb_mdio_release(priv);
1775 pm_runtime_disable(&pdev->dev);
1777 platform_set_drvdata(pdev, NULL);
1783 static int ravb_runtime_nop(struct device *dev)
1785 /* Runtime PM callback shared between ->runtime_suspend()
1786 * and ->runtime_resume(). Simply returns success.
1788 * This driver re-initializes all registers after
1789 * pm_runtime_get_sync() anyway so there is no need
1790 * to save and restore registers here.
1795 static const struct dev_pm_ops ravb_dev_pm_ops = {
1796 .runtime_suspend = ravb_runtime_nop,
1797 .runtime_resume = ravb_runtime_nop,
1800 #define RAVB_PM_OPS (&ravb_dev_pm_ops)
1802 #define RAVB_PM_OPS NULL
1805 static const struct of_device_id ravb_match_table[] = {
1806 { .compatible = "renesas,etheravb-r8a7790" },
1807 { .compatible = "renesas,etheravb-r8a7794" },
1810 MODULE_DEVICE_TABLE(of, ravb_match_table);
1812 static struct platform_driver ravb_driver = {
1813 .probe = ravb_probe,
1814 .remove = ravb_remove,
1818 .of_match_table = ravb_match_table,
1822 module_platform_driver(ravb_driver);
1824 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
1825 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
1826 MODULE_LICENSE("GPL v2");