ixgbe: reset before SRIOV init to avoid mailbox issues
[cascardo/linux.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *          Ravi Patel <rapatel@apm.com>
6  *          Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
27
28 #define RES_ENET_CSR    0
29 #define RES_RING_CSR    1
30 #define RES_RING_CMD    2
31
32 static const struct of_device_id xgene_enet_of_match[];
33 static const struct acpi_device_id xgene_enet_acpi_match[];
34
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
36 {
37         struct xgene_enet_raw_desc16 *raw_desc;
38         int i;
39
40         for (i = 0; i < buf_pool->slots; i++) {
41                 raw_desc = &buf_pool->raw_desc16[i];
42
43                 /* Hardware expects descriptor in little endian format */
44                 raw_desc->m0 = cpu_to_le64(i |
45                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
46                                 SET_VAL(STASH, 3));
47         }
48 }
49
50 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
51                                      u32 nbuf)
52 {
53         struct sk_buff *skb;
54         struct xgene_enet_raw_desc16 *raw_desc;
55         struct xgene_enet_pdata *pdata;
56         struct net_device *ndev;
57         struct device *dev;
58         dma_addr_t dma_addr;
59         u32 tail = buf_pool->tail;
60         u32 slots = buf_pool->slots - 1;
61         u16 bufdatalen, len;
62         int i;
63
64         ndev = buf_pool->ndev;
65         dev = ndev_to_dev(buf_pool->ndev);
66         pdata = netdev_priv(ndev);
67         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
68         len = XGENE_ENET_MAX_MTU;
69
70         for (i = 0; i < nbuf; i++) {
71                 raw_desc = &buf_pool->raw_desc16[tail];
72
73                 skb = netdev_alloc_skb_ip_align(ndev, len);
74                 if (unlikely(!skb))
75                         return -ENOMEM;
76
77                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78                 if (dma_mapping_error(dev, dma_addr)) {
79                         netdev_err(ndev, "DMA mapping error\n");
80                         dev_kfree_skb_any(skb);
81                         return -EINVAL;
82                 }
83
84                 buf_pool->rx_skb[tail] = skb;
85
86                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
87                                            SET_VAL(BUFDATALEN, bufdatalen) |
88                                            SET_BIT(COHERENT));
89                 tail = (tail + 1) & slots;
90         }
91
92         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
93         buf_pool->tail = tail;
94
95         return 0;
96 }
97
98 static u8 xgene_enet_hdr_len(const void *data)
99 {
100         const struct ethhdr *eth = data;
101
102         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
103 }
104
105 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
106 {
107         struct device *dev = ndev_to_dev(buf_pool->ndev);
108         struct xgene_enet_raw_desc16 *raw_desc;
109         dma_addr_t dma_addr;
110         int i;
111
112         /* Free up the buffers held by hardware */
113         for (i = 0; i < buf_pool->slots; i++) {
114                 if (buf_pool->rx_skb[i]) {
115                         dev_kfree_skb_any(buf_pool->rx_skb[i]);
116
117                         raw_desc = &buf_pool->raw_desc16[i];
118                         dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
119                         dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
120                                          DMA_FROM_DEVICE);
121                 }
122         }
123 }
124
125 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
126 {
127         struct xgene_enet_desc_ring *rx_ring = data;
128
129         if (napi_schedule_prep(&rx_ring->napi)) {
130                 disable_irq_nosync(irq);
131                 __napi_schedule(&rx_ring->napi);
132         }
133
134         return IRQ_HANDLED;
135 }
136
137 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
138                                     struct xgene_enet_raw_desc *raw_desc)
139 {
140         struct sk_buff *skb;
141         struct device *dev;
142         skb_frag_t *frag;
143         dma_addr_t *frag_dma_addr;
144         u16 skb_index;
145         u8 status;
146         int i, ret = 0;
147
148         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
149         skb = cp_ring->cp_skb[skb_index];
150         frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
151
152         dev = ndev_to_dev(cp_ring->ndev);
153         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
154                          skb_headlen(skb),
155                          DMA_TO_DEVICE);
156
157         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
158                 frag = &skb_shinfo(skb)->frags[i];
159                 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
160                                DMA_TO_DEVICE);
161         }
162
163         /* Checking for error */
164         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
165         if (unlikely(status > 2)) {
166                 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
167                                        status);
168                 ret = -EIO;
169         }
170
171         if (likely(skb)) {
172                 dev_kfree_skb_any(skb);
173         } else {
174                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
175                 ret = -EIO;
176         }
177
178         return ret;
179 }
180
181 static u64 xgene_enet_work_msg(struct sk_buff *skb)
182 {
183         struct net_device *ndev = skb->dev;
184         struct iphdr *iph;
185         u8 l3hlen = 0, l4hlen = 0;
186         u8 ethhdr, proto = 0, csum_enable = 0;
187         u64 hopinfo = 0;
188         u32 hdr_len, mss = 0;
189         u32 i, len, nr_frags;
190
191         ethhdr = xgene_enet_hdr_len(skb->data);
192
193         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
194             unlikely(skb->protocol != htons(ETH_P_8021Q)))
195                 goto out;
196
197         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
198                 goto out;
199
200         iph = ip_hdr(skb);
201         if (unlikely(ip_is_fragment(iph)))
202                 goto out;
203
204         if (likely(iph->protocol == IPPROTO_TCP)) {
205                 l4hlen = tcp_hdrlen(skb) >> 2;
206                 csum_enable = 1;
207                 proto = TSO_IPPROTO_TCP;
208                 if (ndev->features & NETIF_F_TSO) {
209                         hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
210                         mss = skb_shinfo(skb)->gso_size;
211
212                         if (skb_is_nonlinear(skb)) {
213                                 len = skb_headlen(skb);
214                                 nr_frags = skb_shinfo(skb)->nr_frags;
215
216                                 for (i = 0; i < 2 && i < nr_frags; i++)
217                                         len += skb_shinfo(skb)->frags[i].size;
218
219                                 /* HW requires header must reside in 3 buffer */
220                                 if (unlikely(hdr_len > len)) {
221                                         if (skb_linearize(skb))
222                                                 return 0;
223                                 }
224                         }
225
226                         if (!mss || ((skb->len - hdr_len) <= mss))
227                                 goto out;
228
229                         hopinfo |= SET_BIT(ET);
230                 }
231         } else if (iph->protocol == IPPROTO_UDP) {
232                 l4hlen = UDP_HDR_SIZE;
233                 csum_enable = 1;
234         }
235 out:
236         l3hlen = ip_hdrlen(skb) >> 2;
237         hopinfo |= SET_VAL(TCPHDR, l4hlen) |
238                   SET_VAL(IPHDR, l3hlen) |
239                   SET_VAL(ETHHDR, ethhdr) |
240                   SET_VAL(EC, csum_enable) |
241                   SET_VAL(IS, proto) |
242                   SET_BIT(IC) |
243                   SET_BIT(TYPE_ETH_WORK_MESSAGE);
244
245         return hopinfo;
246 }
247
248 static u16 xgene_enet_encode_len(u16 len)
249 {
250         return (len == BUFLEN_16K) ? 0 : len;
251 }
252
253 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
254 {
255         desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
256                                     SET_VAL(BUFDATALEN, len));
257 }
258
259 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
260 {
261         __le64 *exp_bufs;
262
263         exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
264         memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
265         ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
266
267         return exp_bufs;
268 }
269
270 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
271 {
272         return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
273 }
274
275 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
276                                     struct sk_buff *skb)
277 {
278         struct device *dev = ndev_to_dev(tx_ring->ndev);
279         struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
280         struct xgene_enet_raw_desc *raw_desc;
281         __le64 *exp_desc = NULL, *exp_bufs = NULL;
282         dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
283         skb_frag_t *frag;
284         u16 tail = tx_ring->tail;
285         u64 hopinfo;
286         u32 len, hw_len;
287         u8 ll = 0, nv = 0, idx = 0;
288         bool split = false;
289         u32 size, offset, ell_bytes = 0;
290         u32 i, fidx, nr_frags, count = 1;
291
292         raw_desc = &tx_ring->raw_desc[tail];
293         tail = (tail + 1) & (tx_ring->slots - 1);
294         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
295
296         hopinfo = xgene_enet_work_msg(skb);
297         if (!hopinfo)
298                 return -EINVAL;
299         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
300                                    hopinfo);
301
302         len = skb_headlen(skb);
303         hw_len = xgene_enet_encode_len(len);
304
305         dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
306         if (dma_mapping_error(dev, dma_addr)) {
307                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
308                 return -EINVAL;
309         }
310
311         /* Hardware expects descriptor in little endian format */
312         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
313                                    SET_VAL(BUFDATALEN, hw_len) |
314                                    SET_BIT(COHERENT));
315
316         if (!skb_is_nonlinear(skb))
317                 goto out;
318
319         /* scatter gather */
320         nv = 1;
321         exp_desc = (void *)&tx_ring->raw_desc[tail];
322         tail = (tail + 1) & (tx_ring->slots - 1);
323         memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
324
325         nr_frags = skb_shinfo(skb)->nr_frags;
326         for (i = nr_frags; i < 4 ; i++)
327                 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
328
329         frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
330
331         for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
332                 if (!split) {
333                         frag = &skb_shinfo(skb)->frags[fidx];
334                         size = skb_frag_size(frag);
335                         offset = 0;
336
337                         pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
338                                                      DMA_TO_DEVICE);
339                         if (dma_mapping_error(dev, pbuf_addr))
340                                 return -EINVAL;
341
342                         frag_dma_addr[fidx] = pbuf_addr;
343                         fidx++;
344
345                         if (size > BUFLEN_16K)
346                                 split = true;
347                 }
348
349                 if (size > BUFLEN_16K) {
350                         len = BUFLEN_16K;
351                         size -= BUFLEN_16K;
352                 } else {
353                         len = size;
354                         split = false;
355                 }
356
357                 dma_addr = pbuf_addr + offset;
358                 hw_len = xgene_enet_encode_len(len);
359
360                 switch (i) {
361                 case 0:
362                 case 1:
363                 case 2:
364                         xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
365                         break;
366                 case 3:
367                         if (split || (fidx != nr_frags)) {
368                                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
369                                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
370                                                    hw_len);
371                                 idx++;
372                                 ell_bytes += len;
373                         } else {
374                                 xgene_set_addr_len(exp_desc, i, dma_addr,
375                                                    hw_len);
376                         }
377                         break;
378                 default:
379                         xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
380                         idx++;
381                         ell_bytes += len;
382                         break;
383                 }
384
385                 if (split)
386                         offset += BUFLEN_16K;
387         }
388         count++;
389
390         if (idx) {
391                 ll = 1;
392                 dma_addr = dma_map_single(dev, exp_bufs,
393                                           sizeof(u64) * MAX_EXP_BUFFS,
394                                           DMA_TO_DEVICE);
395                 if (dma_mapping_error(dev, dma_addr)) {
396                         dev_kfree_skb_any(skb);
397                         return -EINVAL;
398                 }
399                 i = ell_bytes >> LL_BYTES_LSB_LEN;
400                 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
401                                           SET_VAL(LL_BYTES_MSB, i) |
402                                           SET_VAL(LL_LEN, idx));
403                 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
404         }
405
406 out:
407         raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
408                                    SET_VAL(USERINFO, tx_ring->tail));
409         tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
410         pdata->tx_level[tx_ring->cp_ring->index] += count;
411         tx_ring->tail = tail;
412
413         return count;
414 }
415
416 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
417                                          struct net_device *ndev)
418 {
419         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
420         struct xgene_enet_desc_ring *tx_ring;
421         int index = skb->queue_mapping;
422         u32 tx_level = pdata->tx_level[index];
423         int count;
424
425         tx_ring = pdata->tx_ring[index];
426         if (tx_level < pdata->txc_level[index])
427                 tx_level += ((typeof(pdata->tx_level[index]))~0U);
428
429         if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
430                 netif_stop_subqueue(ndev, index);
431                 return NETDEV_TX_BUSY;
432         }
433
434         if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
435                 return NETDEV_TX_OK;
436
437         count = xgene_enet_setup_tx_desc(tx_ring, skb);
438         if (count <= 0) {
439                 dev_kfree_skb_any(skb);
440                 return NETDEV_TX_OK;
441         }
442
443         skb_tx_timestamp(skb);
444
445         tx_ring->tx_packets++;
446         tx_ring->tx_bytes += skb->len;
447
448         pdata->ring_ops->wr_cmd(tx_ring, count);
449         return NETDEV_TX_OK;
450 }
451
452 static void xgene_enet_skip_csum(struct sk_buff *skb)
453 {
454         struct iphdr *iph = ip_hdr(skb);
455
456         if (!ip_is_fragment(iph) ||
457             (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
458                 skb->ip_summed = CHECKSUM_UNNECESSARY;
459         }
460 }
461
462 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
463                                struct xgene_enet_raw_desc *raw_desc)
464 {
465         struct net_device *ndev;
466         struct device *dev;
467         struct xgene_enet_desc_ring *buf_pool;
468         u32 datalen, skb_index;
469         struct sk_buff *skb;
470         u8 status;
471         int ret = 0;
472
473         ndev = rx_ring->ndev;
474         dev = ndev_to_dev(rx_ring->ndev);
475         buf_pool = rx_ring->buf_pool;
476
477         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
478                          XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
479         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
480         skb = buf_pool->rx_skb[skb_index];
481         buf_pool->rx_skb[skb_index] = NULL;
482
483         /* checking for error */
484         status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
485                   GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
486         if (unlikely(status > 2)) {
487                 dev_kfree_skb_any(skb);
488                 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
489                                        status);
490                 ret = -EIO;
491                 goto out;
492         }
493
494         /* strip off CRC as HW isn't doing this */
495         datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
496         datalen = (datalen & DATALEN_MASK) - 4;
497         prefetch(skb->data - NET_IP_ALIGN);
498         skb_put(skb, datalen);
499
500         skb_checksum_none_assert(skb);
501         skb->protocol = eth_type_trans(skb, ndev);
502         if (likely((ndev->features & NETIF_F_IP_CSUM) &&
503                    skb->protocol == htons(ETH_P_IP))) {
504                 xgene_enet_skip_csum(skb);
505         }
506
507         rx_ring->rx_packets++;
508         rx_ring->rx_bytes += datalen;
509         napi_gro_receive(&rx_ring->napi, skb);
510 out:
511         if (--rx_ring->nbufpool == 0) {
512                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
513                 rx_ring->nbufpool = NUM_BUFPOOL;
514         }
515
516         return ret;
517 }
518
519 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
520 {
521         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
522 }
523
524 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
525                                    int budget)
526 {
527         struct net_device *ndev = ring->ndev;
528         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
529         struct xgene_enet_raw_desc *raw_desc, *exp_desc;
530         u16 head = ring->head;
531         u16 slots = ring->slots - 1;
532         int ret, desc_count, count = 0, processed = 0;
533         bool is_completion;
534
535         do {
536                 raw_desc = &ring->raw_desc[head];
537                 desc_count = 0;
538                 is_completion = false;
539                 exp_desc = NULL;
540                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
541                         break;
542
543                 /* read fpqnum field after dataaddr field */
544                 dma_rmb();
545                 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
546                         head = (head + 1) & slots;
547                         exp_desc = &ring->raw_desc[head];
548
549                         if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
550                                 head = (head - 1) & slots;
551                                 break;
552                         }
553                         dma_rmb();
554                         count++;
555                         desc_count++;
556                 }
557                 if (is_rx_desc(raw_desc)) {
558                         ret = xgene_enet_rx_frame(ring, raw_desc);
559                 } else {
560                         ret = xgene_enet_tx_completion(ring, raw_desc);
561                         is_completion = true;
562                 }
563                 xgene_enet_mark_desc_slot_empty(raw_desc);
564                 if (exp_desc)
565                         xgene_enet_mark_desc_slot_empty(exp_desc);
566
567                 head = (head + 1) & slots;
568                 count++;
569                 desc_count++;
570                 processed++;
571                 if (is_completion)
572                         pdata->txc_level[ring->index] += desc_count;
573
574                 if (ret)
575                         break;
576         } while (--budget);
577
578         if (likely(count)) {
579                 pdata->ring_ops->wr_cmd(ring, -count);
580                 ring->head = head;
581
582                 if (__netif_subqueue_stopped(ndev, ring->index))
583                         netif_start_subqueue(ndev, ring->index);
584         }
585
586         return processed;
587 }
588
589 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
590 {
591         struct xgene_enet_desc_ring *ring;
592         int processed;
593
594         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
595         processed = xgene_enet_process_ring(ring, budget);
596
597         if (processed != budget) {
598                 napi_complete(napi);
599                 enable_irq(ring->irq);
600         }
601
602         return processed;
603 }
604
605 static void xgene_enet_timeout(struct net_device *ndev)
606 {
607         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
608         struct netdev_queue *txq;
609         int i;
610
611         pdata->mac_ops->reset(pdata);
612
613         for (i = 0; i < pdata->txq_cnt; i++) {
614                 txq = netdev_get_tx_queue(ndev, i);
615                 txq->trans_start = jiffies;
616                 netif_tx_start_queue(txq);
617         }
618 }
619
620 static void xgene_enet_set_irq_name(struct net_device *ndev)
621 {
622         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
623         struct xgene_enet_desc_ring *ring;
624         int i;
625
626         for (i = 0; i < pdata->rxq_cnt; i++) {
627                 ring = pdata->rx_ring[i];
628                 if (!pdata->cq_cnt) {
629                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
630                                  ndev->name);
631                 } else {
632                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
633                                  ndev->name, i);
634                 }
635         }
636
637         for (i = 0; i < pdata->cq_cnt; i++) {
638                 ring = pdata->tx_ring[i]->cp_ring;
639                 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
640                          ndev->name, i);
641         }
642 }
643
644 static int xgene_enet_register_irq(struct net_device *ndev)
645 {
646         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
647         struct device *dev = ndev_to_dev(ndev);
648         struct xgene_enet_desc_ring *ring;
649         int ret = 0, i;
650
651         xgene_enet_set_irq_name(ndev);
652         for (i = 0; i < pdata->rxq_cnt; i++) {
653                 ring = pdata->rx_ring[i];
654                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
655                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
656                                        0, ring->irq_name, ring);
657                 if (ret) {
658                         netdev_err(ndev, "Failed to request irq %s\n",
659                                    ring->irq_name);
660                 }
661         }
662
663         for (i = 0; i < pdata->cq_cnt; i++) {
664                 ring = pdata->tx_ring[i]->cp_ring;
665                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
666                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
667                                        0, ring->irq_name, ring);
668                 if (ret) {
669                         netdev_err(ndev, "Failed to request irq %s\n",
670                                    ring->irq_name);
671                 }
672         }
673
674         return ret;
675 }
676
677 static void xgene_enet_free_irq(struct net_device *ndev)
678 {
679         struct xgene_enet_pdata *pdata;
680         struct xgene_enet_desc_ring *ring;
681         struct device *dev;
682         int i;
683
684         pdata = netdev_priv(ndev);
685         dev = ndev_to_dev(ndev);
686
687         for (i = 0; i < pdata->rxq_cnt; i++) {
688                 ring = pdata->rx_ring[i];
689                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
690                 devm_free_irq(dev, ring->irq, ring);
691         }
692
693         for (i = 0; i < pdata->cq_cnt; i++) {
694                 ring = pdata->tx_ring[i]->cp_ring;
695                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
696                 devm_free_irq(dev, ring->irq, ring);
697         }
698 }
699
700 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
701 {
702         struct napi_struct *napi;
703         int i;
704
705         for (i = 0; i < pdata->rxq_cnt; i++) {
706                 napi = &pdata->rx_ring[i]->napi;
707                 napi_enable(napi);
708         }
709
710         for (i = 0; i < pdata->cq_cnt; i++) {
711                 napi = &pdata->tx_ring[i]->cp_ring->napi;
712                 napi_enable(napi);
713         }
714 }
715
716 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
717 {
718         struct napi_struct *napi;
719         int i;
720
721         for (i = 0; i < pdata->rxq_cnt; i++) {
722                 napi = &pdata->rx_ring[i]->napi;
723                 napi_disable(napi);
724         }
725
726         for (i = 0; i < pdata->cq_cnt; i++) {
727                 napi = &pdata->tx_ring[i]->cp_ring->napi;
728                 napi_disable(napi);
729         }
730 }
731
732 static int xgene_enet_open(struct net_device *ndev)
733 {
734         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
735         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
736         int ret;
737
738         ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
739         if (ret)
740                 return ret;
741
742         ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
743         if (ret)
744                 return ret;
745
746         xgene_enet_napi_enable(pdata);
747         ret = xgene_enet_register_irq(ndev);
748         if (ret)
749                 return ret;
750
751         if (ndev->phydev) {
752                 phy_start(ndev->phydev);
753         } else {
754                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
755                 netif_carrier_off(ndev);
756         }
757
758         mac_ops->tx_enable(pdata);
759         mac_ops->rx_enable(pdata);
760         netif_tx_start_all_queues(ndev);
761
762         return ret;
763 }
764
765 static int xgene_enet_close(struct net_device *ndev)
766 {
767         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
768         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
769         int i;
770
771         netif_tx_stop_all_queues(ndev);
772         mac_ops->tx_disable(pdata);
773         mac_ops->rx_disable(pdata);
774
775         if (ndev->phydev)
776                 phy_stop(ndev->phydev);
777         else
778                 cancel_delayed_work_sync(&pdata->link_work);
779
780         xgene_enet_free_irq(ndev);
781         xgene_enet_napi_disable(pdata);
782         for (i = 0; i < pdata->rxq_cnt; i++)
783                 xgene_enet_process_ring(pdata->rx_ring[i], -1);
784
785         return 0;
786 }
787 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
788 {
789         struct xgene_enet_pdata *pdata;
790         struct device *dev;
791
792         pdata = netdev_priv(ring->ndev);
793         dev = ndev_to_dev(ring->ndev);
794
795         pdata->ring_ops->clear(ring);
796         dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
797 }
798
799 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
800 {
801         struct xgene_enet_desc_ring *buf_pool;
802         struct xgene_enet_desc_ring *ring;
803         int i;
804
805         for (i = 0; i < pdata->txq_cnt; i++) {
806                 ring = pdata->tx_ring[i];
807                 if (ring) {
808                         xgene_enet_delete_ring(ring);
809                         pdata->port_ops->clear(pdata, ring);
810                         if (pdata->cq_cnt)
811                                 xgene_enet_delete_ring(ring->cp_ring);
812                         pdata->tx_ring[i] = NULL;
813                 }
814         }
815
816         for (i = 0; i < pdata->rxq_cnt; i++) {
817                 ring = pdata->rx_ring[i];
818                 if (ring) {
819                         buf_pool = ring->buf_pool;
820                         xgene_enet_delete_bufpool(buf_pool);
821                         xgene_enet_delete_ring(buf_pool);
822                         pdata->port_ops->clear(pdata, buf_pool);
823                         xgene_enet_delete_ring(ring);
824                         pdata->rx_ring[i] = NULL;
825                 }
826         }
827 }
828
829 static int xgene_enet_get_ring_size(struct device *dev,
830                                     enum xgene_enet_ring_cfgsize cfgsize)
831 {
832         int size = -EINVAL;
833
834         switch (cfgsize) {
835         case RING_CFGSIZE_512B:
836                 size = 0x200;
837                 break;
838         case RING_CFGSIZE_2KB:
839                 size = 0x800;
840                 break;
841         case RING_CFGSIZE_16KB:
842                 size = 0x4000;
843                 break;
844         case RING_CFGSIZE_64KB:
845                 size = 0x10000;
846                 break;
847         case RING_CFGSIZE_512KB:
848                 size = 0x80000;
849                 break;
850         default:
851                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
852                 break;
853         }
854
855         return size;
856 }
857
858 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
859 {
860         struct xgene_enet_pdata *pdata;
861         struct device *dev;
862
863         if (!ring)
864                 return;
865
866         dev = ndev_to_dev(ring->ndev);
867         pdata = netdev_priv(ring->ndev);
868
869         if (ring->desc_addr) {
870                 pdata->ring_ops->clear(ring);
871                 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
872         }
873         devm_kfree(dev, ring);
874 }
875
876 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
877 {
878         struct device *dev = &pdata->pdev->dev;
879         struct xgene_enet_desc_ring *ring;
880         int i;
881
882         for (i = 0; i < pdata->txq_cnt; i++) {
883                 ring = pdata->tx_ring[i];
884                 if (ring) {
885                         if (ring->cp_ring && ring->cp_ring->cp_skb)
886                                 devm_kfree(dev, ring->cp_ring->cp_skb);
887                         if (ring->cp_ring && pdata->cq_cnt)
888                                 xgene_enet_free_desc_ring(ring->cp_ring);
889                         xgene_enet_free_desc_ring(ring);
890                 }
891         }
892
893         for (i = 0; i < pdata->rxq_cnt; i++) {
894                 ring = pdata->rx_ring[i];
895                 if (ring) {
896                         if (ring->buf_pool) {
897                                 if (ring->buf_pool->rx_skb)
898                                         devm_kfree(dev, ring->buf_pool->rx_skb);
899                                 xgene_enet_free_desc_ring(ring->buf_pool);
900                         }
901                         xgene_enet_free_desc_ring(ring);
902                 }
903         }
904 }
905
906 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
907                                  struct xgene_enet_desc_ring *ring)
908 {
909         if ((pdata->enet_id == XGENE_ENET2) &&
910             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
911                 return true;
912         }
913
914         return false;
915 }
916
917 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
918                                               struct xgene_enet_desc_ring *ring)
919 {
920         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
921
922         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
923 }
924
925 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
926                         struct net_device *ndev, u32 ring_num,
927                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
928 {
929         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
930         struct device *dev = ndev_to_dev(ndev);
931         struct xgene_enet_desc_ring *ring;
932         void *irq_mbox_addr;
933         int size;
934
935         size = xgene_enet_get_ring_size(dev, cfgsize);
936         if (size < 0)
937                 return NULL;
938
939         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
940                             GFP_KERNEL);
941         if (!ring)
942                 return NULL;
943
944         ring->ndev = ndev;
945         ring->num = ring_num;
946         ring->cfgsize = cfgsize;
947         ring->id = ring_id;
948
949         ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
950                                               GFP_KERNEL | __GFP_ZERO);
951         if (!ring->desc_addr) {
952                 devm_kfree(dev, ring);
953                 return NULL;
954         }
955         ring->size = size;
956
957         if (is_irq_mbox_required(pdata, ring)) {
958                 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
959                                                     &ring->irq_mbox_dma,
960                                                     GFP_KERNEL | __GFP_ZERO);
961                 if (!irq_mbox_addr) {
962                         dmam_free_coherent(dev, size, ring->desc_addr,
963                                            ring->dma);
964                         devm_kfree(dev, ring);
965                         return NULL;
966                 }
967                 ring->irq_mbox_addr = irq_mbox_addr;
968         }
969
970         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
971         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
972         ring = pdata->ring_ops->setup(ring);
973         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
974                    ring->num, ring->size, ring->id, ring->slots);
975
976         return ring;
977 }
978
979 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
980 {
981         return (owner << 6) | (bufnum & GENMASK(5, 0));
982 }
983
984 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
985 {
986         enum xgene_ring_owner owner;
987
988         if (p->enet_id == XGENE_ENET1) {
989                 switch (p->phy_mode) {
990                 case PHY_INTERFACE_MODE_SGMII:
991                         owner = RING_OWNER_ETH0;
992                         break;
993                 default:
994                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
995                                                 RING_OWNER_ETH1;
996                         break;
997                 }
998         } else {
999                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1000         }
1001
1002         return owner;
1003 }
1004
1005 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1006 {
1007         struct device *dev = &pdata->pdev->dev;
1008         u32 cpu_bufnum;
1009         int ret;
1010
1011         ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1012
1013         return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1014 }
1015
1016 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1017 {
1018         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1019         struct device *dev = ndev_to_dev(ndev);
1020         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1021         struct xgene_enet_desc_ring *buf_pool = NULL;
1022         enum xgene_ring_owner owner;
1023         dma_addr_t dma_exp_bufs;
1024         u8 cpu_bufnum;
1025         u8 eth_bufnum = pdata->eth_bufnum;
1026         u8 bp_bufnum = pdata->bp_bufnum;
1027         u16 ring_num = pdata->ring_num;
1028         __le64 *exp_bufs;
1029         u16 ring_id;
1030         int i, ret, size;
1031
1032         cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1033
1034         for (i = 0; i < pdata->rxq_cnt; i++) {
1035                 /* allocate rx descriptor ring */
1036                 owner = xgene_derive_ring_owner(pdata);
1037                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1038                 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1039                                                       RING_CFGSIZE_16KB,
1040                                                       ring_id);
1041                 if (!rx_ring) {
1042                         ret = -ENOMEM;
1043                         goto err;
1044                 }
1045
1046                 /* allocate buffer pool for receiving packets */
1047                 owner = xgene_derive_ring_owner(pdata);
1048                 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1049                 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1050                                                        RING_CFGSIZE_2KB,
1051                                                        ring_id);
1052                 if (!buf_pool) {
1053                         ret = -ENOMEM;
1054                         goto err;
1055                 }
1056
1057                 rx_ring->nbufpool = NUM_BUFPOOL;
1058                 rx_ring->buf_pool = buf_pool;
1059                 rx_ring->irq = pdata->irqs[i];
1060                 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1061                                                 sizeof(struct sk_buff *),
1062                                                 GFP_KERNEL);
1063                 if (!buf_pool->rx_skb) {
1064                         ret = -ENOMEM;
1065                         goto err;
1066                 }
1067
1068                 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1069                 rx_ring->buf_pool = buf_pool;
1070                 pdata->rx_ring[i] = rx_ring;
1071         }
1072
1073         for (i = 0; i < pdata->txq_cnt; i++) {
1074                 /* allocate tx descriptor ring */
1075                 owner = xgene_derive_ring_owner(pdata);
1076                 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1077                 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1078                                                       RING_CFGSIZE_16KB,
1079                                                       ring_id);
1080                 if (!tx_ring) {
1081                         ret = -ENOMEM;
1082                         goto err;
1083                 }
1084
1085                 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1086                 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1087                                                GFP_KERNEL | __GFP_ZERO);
1088                 if (!exp_bufs) {
1089                         ret = -ENOMEM;
1090                         goto err;
1091                 }
1092                 tx_ring->exp_bufs = exp_bufs;
1093
1094                 pdata->tx_ring[i] = tx_ring;
1095
1096                 if (!pdata->cq_cnt) {
1097                         cp_ring = pdata->rx_ring[i];
1098                 } else {
1099                         /* allocate tx completion descriptor ring */
1100                         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1101                                                          cpu_bufnum++);
1102                         cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1103                                                               RING_CFGSIZE_16KB,
1104                                                               ring_id);
1105                         if (!cp_ring) {
1106                                 ret = -ENOMEM;
1107                                 goto err;
1108                         }
1109
1110                         cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1111                         cp_ring->index = i;
1112                 }
1113
1114                 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1115                                                sizeof(struct sk_buff *),
1116                                                GFP_KERNEL);
1117                 if (!cp_ring->cp_skb) {
1118                         ret = -ENOMEM;
1119                         goto err;
1120                 }
1121
1122                 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1123                 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1124                                                       size, GFP_KERNEL);
1125                 if (!cp_ring->frag_dma_addr) {
1126                         devm_kfree(dev, cp_ring->cp_skb);
1127                         ret = -ENOMEM;
1128                         goto err;
1129                 }
1130
1131                 tx_ring->cp_ring = cp_ring;
1132                 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1133         }
1134
1135         pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1136         pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1137
1138         return 0;
1139
1140 err:
1141         xgene_enet_free_desc_rings(pdata);
1142         return ret;
1143 }
1144
1145 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1146                         struct net_device *ndev,
1147                         struct rtnl_link_stats64 *storage)
1148 {
1149         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1150         struct rtnl_link_stats64 *stats = &pdata->stats;
1151         struct xgene_enet_desc_ring *ring;
1152         int i;
1153
1154         memset(stats, 0, sizeof(struct rtnl_link_stats64));
1155         for (i = 0; i < pdata->txq_cnt; i++) {
1156                 ring = pdata->tx_ring[i];
1157                 if (ring) {
1158                         stats->tx_packets += ring->tx_packets;
1159                         stats->tx_bytes += ring->tx_bytes;
1160                 }
1161         }
1162
1163         for (i = 0; i < pdata->rxq_cnt; i++) {
1164                 ring = pdata->rx_ring[i];
1165                 if (ring) {
1166                         stats->rx_packets += ring->rx_packets;
1167                         stats->rx_bytes += ring->rx_bytes;
1168                         stats->rx_errors += ring->rx_length_errors +
1169                                 ring->rx_crc_errors +
1170                                 ring->rx_frame_errors +
1171                                 ring->rx_fifo_errors;
1172                         stats->rx_dropped += ring->rx_dropped;
1173                 }
1174         }
1175         memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1176
1177         return storage;
1178 }
1179
1180 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1181 {
1182         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1183         int ret;
1184
1185         ret = eth_mac_addr(ndev, addr);
1186         if (ret)
1187                 return ret;
1188         pdata->mac_ops->set_mac_addr(pdata);
1189
1190         return ret;
1191 }
1192
1193 static const struct net_device_ops xgene_ndev_ops = {
1194         .ndo_open = xgene_enet_open,
1195         .ndo_stop = xgene_enet_close,
1196         .ndo_start_xmit = xgene_enet_start_xmit,
1197         .ndo_tx_timeout = xgene_enet_timeout,
1198         .ndo_get_stats64 = xgene_enet_get_stats64,
1199         .ndo_change_mtu = eth_change_mtu,
1200         .ndo_set_mac_address = xgene_enet_set_mac_address,
1201 };
1202
1203 #ifdef CONFIG_ACPI
1204 static void xgene_get_port_id_acpi(struct device *dev,
1205                                   struct xgene_enet_pdata *pdata)
1206 {
1207         acpi_status status;
1208         u64 temp;
1209
1210         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1211         if (ACPI_FAILURE(status)) {
1212                 pdata->port_id = 0;
1213         } else {
1214                 pdata->port_id = temp;
1215         }
1216
1217         return;
1218 }
1219 #endif
1220
1221 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1222 {
1223         u32 id = 0;
1224
1225         of_property_read_u32(dev->of_node, "port-id", &id);
1226
1227         pdata->port_id = id & BIT(0);
1228
1229         return;
1230 }
1231
1232 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1233 {
1234         struct device *dev = &pdata->pdev->dev;
1235         int delay, ret;
1236
1237         ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1238         if (ret) {
1239                 pdata->tx_delay = 4;
1240                 return 0;
1241         }
1242
1243         if (delay < 0 || delay > 7) {
1244                 dev_err(dev, "Invalid tx-delay specified\n");
1245                 return -EINVAL;
1246         }
1247
1248         pdata->tx_delay = delay;
1249
1250         return 0;
1251 }
1252
1253 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1254 {
1255         struct device *dev = &pdata->pdev->dev;
1256         int delay, ret;
1257
1258         ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1259         if (ret) {
1260                 pdata->rx_delay = 2;
1261                 return 0;
1262         }
1263
1264         if (delay < 0 || delay > 7) {
1265                 dev_err(dev, "Invalid rx-delay specified\n");
1266                 return -EINVAL;
1267         }
1268
1269         pdata->rx_delay = delay;
1270
1271         return 0;
1272 }
1273
1274 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1275 {
1276         struct platform_device *pdev = pdata->pdev;
1277         struct device *dev = &pdev->dev;
1278         int i, ret, max_irqs;
1279
1280         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1281                 max_irqs = 1;
1282         else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1283                 max_irqs = 2;
1284         else
1285                 max_irqs = XGENE_MAX_ENET_IRQ;
1286
1287         for (i = 0; i < max_irqs; i++) {
1288                 ret = platform_get_irq(pdev, i);
1289                 if (ret <= 0) {
1290                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1291                                 max_irqs = i;
1292                                 pdata->rxq_cnt = max_irqs / 2;
1293                                 pdata->txq_cnt = max_irqs / 2;
1294                                 pdata->cq_cnt = max_irqs / 2;
1295                                 break;
1296                         }
1297                         dev_err(dev, "Unable to get ENET IRQ\n");
1298                         ret = ret ? : -ENXIO;
1299                         return ret;
1300                 }
1301                 pdata->irqs[i] = ret;
1302         }
1303
1304         return 0;
1305 }
1306
1307 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1308 {
1309         int ret;
1310
1311         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1312                 return 0;
1313
1314         if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1315                 return 0;
1316
1317         ret = xgene_enet_phy_connect(pdata->ndev);
1318         if (!ret)
1319                 pdata->mdio_driver = true;
1320
1321         return 0;
1322 }
1323
1324 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1325 {
1326         struct device *dev = &pdata->pdev->dev;
1327
1328         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1329                 return;
1330
1331         pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1332         if (IS_ERR(pdata->sfp_rdy))
1333                 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1334 }
1335
1336 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1337 {
1338         struct platform_device *pdev;
1339         struct net_device *ndev;
1340         struct device *dev;
1341         struct resource *res;
1342         void __iomem *base_addr;
1343         u32 offset;
1344         int ret = 0;
1345
1346         pdev = pdata->pdev;
1347         dev = &pdev->dev;
1348         ndev = pdata->ndev;
1349
1350         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1351         if (!res) {
1352                 dev_err(dev, "Resource enet_csr not defined\n");
1353                 return -ENODEV;
1354         }
1355         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1356         if (!pdata->base_addr) {
1357                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1358                 return -ENOMEM;
1359         }
1360
1361         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1362         if (!res) {
1363                 dev_err(dev, "Resource ring_csr not defined\n");
1364                 return -ENODEV;
1365         }
1366         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1367                                                         resource_size(res));
1368         if (!pdata->ring_csr_addr) {
1369                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1370                 return -ENOMEM;
1371         }
1372
1373         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1374         if (!res) {
1375                 dev_err(dev, "Resource ring_cmd not defined\n");
1376                 return -ENODEV;
1377         }
1378         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1379                                                         resource_size(res));
1380         if (!pdata->ring_cmd_addr) {
1381                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1382                 return -ENOMEM;
1383         }
1384
1385         if (dev->of_node)
1386                 xgene_get_port_id_dt(dev, pdata);
1387 #ifdef CONFIG_ACPI
1388         else
1389                 xgene_get_port_id_acpi(dev, pdata);
1390 #endif
1391
1392         if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1393                 eth_hw_addr_random(ndev);
1394
1395         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1396
1397         pdata->phy_mode = device_get_phy_mode(dev);
1398         if (pdata->phy_mode < 0) {
1399                 dev_err(dev, "Unable to get phy-connection-type\n");
1400                 return pdata->phy_mode;
1401         }
1402         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1403             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1404             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1405                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1406                 return -ENODEV;
1407         }
1408
1409         ret = xgene_get_tx_delay(pdata);
1410         if (ret)
1411                 return ret;
1412
1413         ret = xgene_get_rx_delay(pdata);
1414         if (ret)
1415                 return ret;
1416
1417         ret = xgene_enet_get_irqs(pdata);
1418         if (ret)
1419                 return ret;
1420
1421         ret = xgene_enet_check_phy_handle(pdata);
1422         if (ret)
1423                 return ret;
1424
1425         xgene_enet_gpiod_get(pdata);
1426
1427         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1428         if (IS_ERR(pdata->clk)) {
1429                 /* Firmware may have set up the clock already. */
1430                 dev_info(dev, "clocks have been setup already\n");
1431         }
1432
1433         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1434                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1435         else
1436                 base_addr = pdata->base_addr;
1437         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1438         pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1439         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1440         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1441         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1442             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1443                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1444                 offset = (pdata->enet_id == XGENE_ENET1) ?
1445                           BLOCK_ETH_MAC_CSR_OFFSET :
1446                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1447                 pdata->mcx_mac_csr_addr = base_addr + offset;
1448         } else {
1449                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1450                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1451                 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1452         }
1453         pdata->rx_buff_cnt = NUM_PKT_BUF;
1454
1455         return 0;
1456 }
1457
1458 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1459 {
1460         struct xgene_enet_cle *enet_cle = &pdata->cle;
1461         struct net_device *ndev = pdata->ndev;
1462         struct xgene_enet_desc_ring *buf_pool;
1463         u16 dst_ring_num;
1464         int i, ret;
1465
1466         ret = pdata->port_ops->reset(pdata);
1467         if (ret)
1468                 return ret;
1469
1470         ret = xgene_enet_create_desc_rings(ndev);
1471         if (ret) {
1472                 netdev_err(ndev, "Error in ring configuration\n");
1473                 return ret;
1474         }
1475
1476         /* setup buffer pool */
1477         for (i = 0; i < pdata->rxq_cnt; i++) {
1478                 buf_pool = pdata->rx_ring[i]->buf_pool;
1479                 xgene_enet_init_bufpool(buf_pool);
1480                 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1481                 if (ret)
1482                         goto err;
1483         }
1484
1485         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1486         buf_pool = pdata->rx_ring[0]->buf_pool;
1487         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1488                 /* Initialize and Enable  PreClassifier Tree */
1489                 enet_cle->max_nodes = 512;
1490                 enet_cle->max_dbptrs = 1024;
1491                 enet_cle->parsers = 3;
1492                 enet_cle->active_parser = PARSER_ALL;
1493                 enet_cle->ptree.start_node = 0;
1494                 enet_cle->ptree.start_dbptr = 0;
1495                 enet_cle->jump_bytes = 8;
1496                 ret = pdata->cle_ops->cle_init(pdata);
1497                 if (ret) {
1498                         netdev_err(ndev, "Preclass Tree init error\n");
1499                         goto err;
1500                 }
1501         } else {
1502                 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1503         }
1504
1505         pdata->phy_speed = SPEED_UNKNOWN;
1506         pdata->mac_ops->init(pdata);
1507
1508         return ret;
1509
1510 err:
1511         xgene_enet_delete_desc_rings(pdata);
1512         return ret;
1513 }
1514
1515 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1516 {
1517         switch (pdata->phy_mode) {
1518         case PHY_INTERFACE_MODE_RGMII:
1519                 pdata->mac_ops = &xgene_gmac_ops;
1520                 pdata->port_ops = &xgene_gport_ops;
1521                 pdata->rm = RM3;
1522                 pdata->rxq_cnt = 1;
1523                 pdata->txq_cnt = 1;
1524                 pdata->cq_cnt = 0;
1525                 break;
1526         case PHY_INTERFACE_MODE_SGMII:
1527                 pdata->mac_ops = &xgene_sgmac_ops;
1528                 pdata->port_ops = &xgene_sgport_ops;
1529                 pdata->rm = RM1;
1530                 pdata->rxq_cnt = 1;
1531                 pdata->txq_cnt = 1;
1532                 pdata->cq_cnt = 1;
1533                 break;
1534         default:
1535                 pdata->mac_ops = &xgene_xgmac_ops;
1536                 pdata->port_ops = &xgene_xgport_ops;
1537                 pdata->cle_ops = &xgene_cle3in_ops;
1538                 pdata->rm = RM0;
1539                 if (!pdata->rxq_cnt) {
1540                         pdata->rxq_cnt = XGENE_NUM_RX_RING;
1541                         pdata->txq_cnt = XGENE_NUM_TX_RING;
1542                         pdata->cq_cnt = XGENE_NUM_TXC_RING;
1543                 }
1544                 break;
1545         }
1546
1547         if (pdata->enet_id == XGENE_ENET1) {
1548                 switch (pdata->port_id) {
1549                 case 0:
1550                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1551                                 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1552                                 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1553                                 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1554                                 pdata->ring_num = START_RING_NUM_0;
1555                         } else {
1556                                 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1557                                 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1558                                 pdata->bp_bufnum = START_BP_BUFNUM_0;
1559                                 pdata->ring_num = START_RING_NUM_0;
1560                         }
1561                         break;
1562                 case 1:
1563                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1564                                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1565                                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1566                                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1567                                 pdata->ring_num = XG_START_RING_NUM_1;
1568                         } else {
1569                                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1570                                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1571                                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1572                                 pdata->ring_num = START_RING_NUM_1;
1573                         }
1574                         break;
1575                 default:
1576                         break;
1577                 }
1578                 pdata->ring_ops = &xgene_ring1_ops;
1579         } else {
1580                 switch (pdata->port_id) {
1581                 case 0:
1582                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1583                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1584                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1585                         pdata->ring_num = X2_START_RING_NUM_0;
1586                         break;
1587                 case 1:
1588                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1589                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1590                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1591                         pdata->ring_num = X2_START_RING_NUM_1;
1592                         break;
1593                 default:
1594                         break;
1595                 }
1596                 pdata->rm = RM0;
1597                 pdata->ring_ops = &xgene_ring2_ops;
1598         }
1599 }
1600
1601 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1602 {
1603         struct napi_struct *napi;
1604         int i;
1605
1606         for (i = 0; i < pdata->rxq_cnt; i++) {
1607                 napi = &pdata->rx_ring[i]->napi;
1608                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1609                                NAPI_POLL_WEIGHT);
1610         }
1611
1612         for (i = 0; i < pdata->cq_cnt; i++) {
1613                 napi = &pdata->tx_ring[i]->cp_ring->napi;
1614                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1615                                NAPI_POLL_WEIGHT);
1616         }
1617 }
1618
1619 static int xgene_enet_probe(struct platform_device *pdev)
1620 {
1621         struct net_device *ndev;
1622         struct xgene_enet_pdata *pdata;
1623         struct device *dev = &pdev->dev;
1624         void (*link_state)(struct work_struct *);
1625         const struct of_device_id *of_id;
1626         int ret;
1627
1628         ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1629                                   XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1630         if (!ndev)
1631                 return -ENOMEM;
1632
1633         pdata = netdev_priv(ndev);
1634
1635         pdata->pdev = pdev;
1636         pdata->ndev = ndev;
1637         SET_NETDEV_DEV(ndev, dev);
1638         platform_set_drvdata(pdev, pdata);
1639         ndev->netdev_ops = &xgene_ndev_ops;
1640         xgene_enet_set_ethtool_ops(ndev);
1641         ndev->features |= NETIF_F_IP_CSUM |
1642                           NETIF_F_GSO |
1643                           NETIF_F_GRO |
1644                           NETIF_F_SG;
1645
1646         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1647         if (of_id) {
1648                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1649         }
1650 #ifdef CONFIG_ACPI
1651         else {
1652                 const struct acpi_device_id *acpi_id;
1653
1654                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1655                 if (acpi_id)
1656                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1657         }
1658 #endif
1659         if (!pdata->enet_id) {
1660                 ret = -ENODEV;
1661                 goto err;
1662         }
1663
1664         ret = xgene_enet_get_resources(pdata);
1665         if (ret)
1666                 goto err;
1667
1668         xgene_enet_setup_ops(pdata);
1669
1670         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1671                 ndev->features |= NETIF_F_TSO;
1672                 pdata->mss = XGENE_ENET_MSS;
1673         }
1674         ndev->hw_features = ndev->features;
1675
1676         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1677         if (ret) {
1678                 netdev_err(ndev, "No usable DMA configuration\n");
1679                 goto err;
1680         }
1681
1682         ret = xgene_enet_init_hw(pdata);
1683         if (ret)
1684                 goto err;
1685
1686         link_state = pdata->mac_ops->link_state;
1687         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1688                 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1689         } else if (!pdata->mdio_driver) {
1690                 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1691                         ret = xgene_enet_mdio_config(pdata);
1692                 else
1693                         INIT_DELAYED_WORK(&pdata->link_work, link_state);
1694
1695                 if (ret)
1696                         goto err1;
1697         }
1698
1699         xgene_enet_napi_add(pdata);
1700         ret = register_netdev(ndev);
1701         if (ret) {
1702                 netdev_err(ndev, "Failed to register netdev\n");
1703                 goto err2;
1704         }
1705
1706         return 0;
1707
1708 err2:
1709         /*
1710          * If necessary, free_netdev() will call netif_napi_del() and undo
1711          * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1712          */
1713
1714         if (pdata->mdio_driver)
1715                 xgene_enet_phy_disconnect(pdata);
1716         else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1717                 xgene_enet_mdio_remove(pdata);
1718 err1:
1719         xgene_enet_delete_desc_rings(pdata);
1720 err:
1721         free_netdev(ndev);
1722         return ret;
1723 }
1724
1725 static int xgene_enet_remove(struct platform_device *pdev)
1726 {
1727         struct xgene_enet_pdata *pdata;
1728         struct net_device *ndev;
1729
1730         pdata = platform_get_drvdata(pdev);
1731         ndev = pdata->ndev;
1732
1733         rtnl_lock();
1734         if (netif_running(ndev))
1735                 dev_close(ndev);
1736         rtnl_unlock();
1737
1738         if (pdata->mdio_driver)
1739                 xgene_enet_phy_disconnect(pdata);
1740         else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1741                 xgene_enet_mdio_remove(pdata);
1742
1743         unregister_netdev(ndev);
1744         pdata->port_ops->shutdown(pdata);
1745         xgene_enet_delete_desc_rings(pdata);
1746         free_netdev(ndev);
1747
1748         return 0;
1749 }
1750
1751 static void xgene_enet_shutdown(struct platform_device *pdev)
1752 {
1753         struct xgene_enet_pdata *pdata;
1754
1755         pdata = platform_get_drvdata(pdev);
1756         if (!pdata)
1757                 return;
1758
1759         if (!pdata->ndev)
1760                 return;
1761
1762         xgene_enet_remove(pdev);
1763 }
1764
1765 #ifdef CONFIG_ACPI
1766 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1767         { "APMC0D05", XGENE_ENET1},
1768         { "APMC0D30", XGENE_ENET1},
1769         { "APMC0D31", XGENE_ENET1},
1770         { "APMC0D3F", XGENE_ENET1},
1771         { "APMC0D26", XGENE_ENET2},
1772         { "APMC0D25", XGENE_ENET2},
1773         { }
1774 };
1775 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1776 #endif
1777
1778 #ifdef CONFIG_OF
1779 static const struct of_device_id xgene_enet_of_match[] = {
1780         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1781         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1782         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1783         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1784         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1785         {},
1786 };
1787
1788 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1789 #endif
1790
1791 static struct platform_driver xgene_enet_driver = {
1792         .driver = {
1793                    .name = "xgene-enet",
1794                    .of_match_table = of_match_ptr(xgene_enet_of_match),
1795                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1796         },
1797         .probe = xgene_enet_probe,
1798         .remove = xgene_enet_remove,
1799         .shutdown = xgene_enet_shutdown,
1800 };
1801
1802 module_platform_driver(xgene_enet_driver);
1803
1804 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1805 MODULE_VERSION(XGENE_DRV_VERSION);
1806 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1807 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1808 MODULE_LICENSE("GPL");