d208b172f4d7b662ecaeb8710cabf0f3b4d7228d
[cascardo/linux.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *          Ravi Patel <rapatel@apm.com>
6  *          Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
26
27 #define RES_ENET_CSR    0
28 #define RES_RING_CSR    1
29 #define RES_RING_CMD    2
30
31 static const struct of_device_id xgene_enet_of_match[];
32 static const struct acpi_device_id xgene_enet_acpi_match[];
33
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35 {
36         struct xgene_enet_raw_desc16 *raw_desc;
37         int i;
38
39         for (i = 0; i < buf_pool->slots; i++) {
40                 raw_desc = &buf_pool->raw_desc16[i];
41
42                 /* Hardware expects descriptor in little endian format */
43                 raw_desc->m0 = cpu_to_le64(i |
44                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45                                 SET_VAL(STASH, 3));
46         }
47 }
48
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50                                      u32 nbuf)
51 {
52         struct sk_buff *skb;
53         struct xgene_enet_raw_desc16 *raw_desc;
54         struct xgene_enet_pdata *pdata;
55         struct net_device *ndev;
56         struct device *dev;
57         dma_addr_t dma_addr;
58         u32 tail = buf_pool->tail;
59         u32 slots = buf_pool->slots - 1;
60         u16 bufdatalen, len;
61         int i;
62
63         ndev = buf_pool->ndev;
64         dev = ndev_to_dev(buf_pool->ndev);
65         pdata = netdev_priv(ndev);
66         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67         len = XGENE_ENET_MAX_MTU;
68
69         for (i = 0; i < nbuf; i++) {
70                 raw_desc = &buf_pool->raw_desc16[tail];
71
72                 skb = netdev_alloc_skb_ip_align(ndev, len);
73                 if (unlikely(!skb))
74                         return -ENOMEM;
75                 buf_pool->rx_skb[tail] = skb;
76
77                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78                 if (dma_mapping_error(dev, dma_addr)) {
79                         netdev_err(ndev, "DMA mapping error\n");
80                         dev_kfree_skb_any(skb);
81                         return -EINVAL;
82                 }
83
84                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85                                            SET_VAL(BUFDATALEN, bufdatalen) |
86                                            SET_BIT(COHERENT));
87                 tail = (tail + 1) & slots;
88         }
89
90         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
91         buf_pool->tail = tail;
92
93         return 0;
94 }
95
96 static u8 xgene_enet_hdr_len(const void *data)
97 {
98         const struct ethhdr *eth = data;
99
100         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
101 }
102
103 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
104 {
105         struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
106         struct xgene_enet_raw_desc16 *raw_desc;
107         u32 slots = buf_pool->slots - 1;
108         u32 tail = buf_pool->tail;
109         u32 userinfo;
110         int i, len;
111
112         len = pdata->ring_ops->len(buf_pool);
113         for (i = 0; i < len; i++) {
114                 tail = (tail - 1) & slots;
115                 raw_desc = &buf_pool->raw_desc16[tail];
116
117                 /* Hardware stores descriptor in little endian format */
118                 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
119                 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
120         }
121
122         pdata->ring_ops->wr_cmd(buf_pool, -len);
123         buf_pool->tail = tail;
124 }
125
126 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
127 {
128         struct xgene_enet_desc_ring *rx_ring = data;
129
130         if (napi_schedule_prep(&rx_ring->napi)) {
131                 disable_irq_nosync(irq);
132                 __napi_schedule(&rx_ring->napi);
133         }
134
135         return IRQ_HANDLED;
136 }
137
138 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
139                                     struct xgene_enet_raw_desc *raw_desc)
140 {
141         struct sk_buff *skb;
142         struct device *dev;
143         skb_frag_t *frag;
144         dma_addr_t *frag_dma_addr;
145         u16 skb_index;
146         u8 status;
147         int i, ret = 0;
148
149         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
150         skb = cp_ring->cp_skb[skb_index];
151         frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
152
153         dev = ndev_to_dev(cp_ring->ndev);
154         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
155                          skb_headlen(skb),
156                          DMA_TO_DEVICE);
157
158         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
159                 frag = &skb_shinfo(skb)->frags[i];
160                 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
161                                DMA_TO_DEVICE);
162         }
163
164         /* Checking for error */
165         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
166         if (unlikely(status > 2)) {
167                 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
168                                        status);
169                 ret = -EIO;
170         }
171
172         if (likely(skb)) {
173                 dev_kfree_skb_any(skb);
174         } else {
175                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
176                 ret = -EIO;
177         }
178
179         return ret;
180 }
181
182 static u64 xgene_enet_work_msg(struct sk_buff *skb)
183 {
184         struct net_device *ndev = skb->dev;
185         struct iphdr *iph;
186         u8 l3hlen = 0, l4hlen = 0;
187         u8 ethhdr, proto = 0, csum_enable = 0;
188         u64 hopinfo = 0;
189         u32 hdr_len, mss = 0;
190         u32 i, len, nr_frags;
191
192         ethhdr = xgene_enet_hdr_len(skb->data);
193
194         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
195             unlikely(skb->protocol != htons(ETH_P_8021Q)))
196                 goto out;
197
198         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
199                 goto out;
200
201         iph = ip_hdr(skb);
202         if (unlikely(ip_is_fragment(iph)))
203                 goto out;
204
205         if (likely(iph->protocol == IPPROTO_TCP)) {
206                 l4hlen = tcp_hdrlen(skb) >> 2;
207                 csum_enable = 1;
208                 proto = TSO_IPPROTO_TCP;
209                 if (ndev->features & NETIF_F_TSO) {
210                         hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
211                         mss = skb_shinfo(skb)->gso_size;
212
213                         if (skb_is_nonlinear(skb)) {
214                                 len = skb_headlen(skb);
215                                 nr_frags = skb_shinfo(skb)->nr_frags;
216
217                                 for (i = 0; i < 2 && i < nr_frags; i++)
218                                         len += skb_shinfo(skb)->frags[i].size;
219
220                                 /* HW requires header must reside in 3 buffer */
221                                 if (unlikely(hdr_len > len)) {
222                                         if (skb_linearize(skb))
223                                                 return 0;
224                                 }
225                         }
226
227                         if (!mss || ((skb->len - hdr_len) <= mss))
228                                 goto out;
229
230                         hopinfo |= SET_BIT(ET);
231                 }
232         } else if (iph->protocol == IPPROTO_UDP) {
233                 l4hlen = UDP_HDR_SIZE;
234                 csum_enable = 1;
235         }
236 out:
237         l3hlen = ip_hdrlen(skb) >> 2;
238         hopinfo |= SET_VAL(TCPHDR, l4hlen) |
239                   SET_VAL(IPHDR, l3hlen) |
240                   SET_VAL(ETHHDR, ethhdr) |
241                   SET_VAL(EC, csum_enable) |
242                   SET_VAL(IS, proto) |
243                   SET_BIT(IC) |
244                   SET_BIT(TYPE_ETH_WORK_MESSAGE);
245
246         return hopinfo;
247 }
248
249 static u16 xgene_enet_encode_len(u16 len)
250 {
251         return (len == BUFLEN_16K) ? 0 : len;
252 }
253
254 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
255 {
256         desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
257                                     SET_VAL(BUFDATALEN, len));
258 }
259
260 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
261 {
262         __le64 *exp_bufs;
263
264         exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
265         memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
266         ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
267
268         return exp_bufs;
269 }
270
271 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
272 {
273         return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
274 }
275
276 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
277                                     struct sk_buff *skb)
278 {
279         struct device *dev = ndev_to_dev(tx_ring->ndev);
280         struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
281         struct xgene_enet_raw_desc *raw_desc;
282         __le64 *exp_desc = NULL, *exp_bufs = NULL;
283         dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
284         skb_frag_t *frag;
285         u16 tail = tx_ring->tail;
286         u64 hopinfo;
287         u32 len, hw_len;
288         u8 ll = 0, nv = 0, idx = 0;
289         bool split = false;
290         u32 size, offset, ell_bytes = 0;
291         u32 i, fidx, nr_frags, count = 1;
292
293         raw_desc = &tx_ring->raw_desc[tail];
294         tail = (tail + 1) & (tx_ring->slots - 1);
295         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
296
297         hopinfo = xgene_enet_work_msg(skb);
298         if (!hopinfo)
299                 return -EINVAL;
300         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
301                                    hopinfo);
302
303         len = skb_headlen(skb);
304         hw_len = xgene_enet_encode_len(len);
305
306         dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
307         if (dma_mapping_error(dev, dma_addr)) {
308                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
309                 return -EINVAL;
310         }
311
312         /* Hardware expects descriptor in little endian format */
313         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
314                                    SET_VAL(BUFDATALEN, hw_len) |
315                                    SET_BIT(COHERENT));
316
317         if (!skb_is_nonlinear(skb))
318                 goto out;
319
320         /* scatter gather */
321         nv = 1;
322         exp_desc = (void *)&tx_ring->raw_desc[tail];
323         tail = (tail + 1) & (tx_ring->slots - 1);
324         memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
325
326         nr_frags = skb_shinfo(skb)->nr_frags;
327         for (i = nr_frags; i < 4 ; i++)
328                 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
329
330         frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
331
332         for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
333                 if (!split) {
334                         frag = &skb_shinfo(skb)->frags[fidx];
335                         size = skb_frag_size(frag);
336                         offset = 0;
337
338                         pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
339                                                      DMA_TO_DEVICE);
340                         if (dma_mapping_error(dev, pbuf_addr))
341                                 return -EINVAL;
342
343                         frag_dma_addr[fidx] = pbuf_addr;
344                         fidx++;
345
346                         if (size > BUFLEN_16K)
347                                 split = true;
348                 }
349
350                 if (size > BUFLEN_16K) {
351                         len = BUFLEN_16K;
352                         size -= BUFLEN_16K;
353                 } else {
354                         len = size;
355                         split = false;
356                 }
357
358                 dma_addr = pbuf_addr + offset;
359                 hw_len = xgene_enet_encode_len(len);
360
361                 switch (i) {
362                 case 0:
363                 case 1:
364                 case 2:
365                         xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
366                         break;
367                 case 3:
368                         if (split || (fidx != nr_frags)) {
369                                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
370                                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
371                                                    hw_len);
372                                 idx++;
373                                 ell_bytes += len;
374                         } else {
375                                 xgene_set_addr_len(exp_desc, i, dma_addr,
376                                                    hw_len);
377                         }
378                         break;
379                 default:
380                         xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
381                         idx++;
382                         ell_bytes += len;
383                         break;
384                 }
385
386                 if (split)
387                         offset += BUFLEN_16K;
388         }
389         count++;
390
391         if (idx) {
392                 ll = 1;
393                 dma_addr = dma_map_single(dev, exp_bufs,
394                                           sizeof(u64) * MAX_EXP_BUFFS,
395                                           DMA_TO_DEVICE);
396                 if (dma_mapping_error(dev, dma_addr)) {
397                         dev_kfree_skb_any(skb);
398                         return -EINVAL;
399                 }
400                 i = ell_bytes >> LL_BYTES_LSB_LEN;
401                 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
402                                           SET_VAL(LL_BYTES_MSB, i) |
403                                           SET_VAL(LL_LEN, idx));
404                 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
405         }
406
407 out:
408         raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
409                                    SET_VAL(USERINFO, tx_ring->tail));
410         tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
411         pdata->tx_level[tx_ring->cp_ring->index] += count;
412         tx_ring->tail = tail;
413
414         return count;
415 }
416
417 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
418                                          struct net_device *ndev)
419 {
420         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
421         struct xgene_enet_desc_ring *tx_ring;
422         int index = skb->queue_mapping;
423         u32 tx_level = pdata->tx_level[index];
424         int count;
425
426         tx_ring = pdata->tx_ring[index];
427         if (tx_level < pdata->txc_level[index])
428                 tx_level += ((typeof(pdata->tx_level[index]))~0U);
429
430         if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
431                 netif_stop_subqueue(ndev, index);
432                 return NETDEV_TX_BUSY;
433         }
434
435         if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
436                 return NETDEV_TX_OK;
437
438         count = xgene_enet_setup_tx_desc(tx_ring, skb);
439         if (count <= 0) {
440                 dev_kfree_skb_any(skb);
441                 return NETDEV_TX_OK;
442         }
443
444         skb_tx_timestamp(skb);
445
446         tx_ring->tx_packets++;
447         tx_ring->tx_bytes += skb->len;
448
449         pdata->ring_ops->wr_cmd(tx_ring, count);
450         return NETDEV_TX_OK;
451 }
452
453 static void xgene_enet_skip_csum(struct sk_buff *skb)
454 {
455         struct iphdr *iph = ip_hdr(skb);
456
457         if (!ip_is_fragment(iph) ||
458             (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
459                 skb->ip_summed = CHECKSUM_UNNECESSARY;
460         }
461 }
462
463 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
464                                struct xgene_enet_raw_desc *raw_desc)
465 {
466         struct net_device *ndev;
467         struct xgene_enet_pdata *pdata;
468         struct device *dev;
469         struct xgene_enet_desc_ring *buf_pool;
470         u32 datalen, skb_index;
471         struct sk_buff *skb;
472         u8 status;
473         int ret = 0;
474
475         ndev = rx_ring->ndev;
476         pdata = netdev_priv(ndev);
477         dev = ndev_to_dev(rx_ring->ndev);
478         buf_pool = rx_ring->buf_pool;
479
480         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
481                          XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
482         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
483         skb = buf_pool->rx_skb[skb_index];
484
485         /* checking for error */
486         status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
487                   GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
488         if (unlikely(status > 2)) {
489                 dev_kfree_skb_any(skb);
490                 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
491                                        status);
492                 ret = -EIO;
493                 goto out;
494         }
495
496         /* strip off CRC as HW isn't doing this */
497         datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
498         datalen = (datalen & DATALEN_MASK) - 4;
499         prefetch(skb->data - NET_IP_ALIGN);
500         skb_put(skb, datalen);
501
502         skb_checksum_none_assert(skb);
503         skb->protocol = eth_type_trans(skb, ndev);
504         if (likely((ndev->features & NETIF_F_IP_CSUM) &&
505                    skb->protocol == htons(ETH_P_IP))) {
506                 xgene_enet_skip_csum(skb);
507         }
508
509         rx_ring->rx_packets++;
510         rx_ring->rx_bytes += datalen;
511         napi_gro_receive(&rx_ring->napi, skb);
512 out:
513         if (--rx_ring->nbufpool == 0) {
514                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
515                 rx_ring->nbufpool = NUM_BUFPOOL;
516         }
517
518         return ret;
519 }
520
521 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
522 {
523         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
524 }
525
526 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
527                                    int budget)
528 {
529         struct net_device *ndev = ring->ndev;
530         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
531         struct xgene_enet_raw_desc *raw_desc, *exp_desc;
532         u16 head = ring->head;
533         u16 slots = ring->slots - 1;
534         int ret, desc_count, count = 0, processed = 0;
535         bool is_completion;
536
537         do {
538                 raw_desc = &ring->raw_desc[head];
539                 desc_count = 0;
540                 is_completion = false;
541                 exp_desc = NULL;
542                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
543                         break;
544
545                 /* read fpqnum field after dataaddr field */
546                 dma_rmb();
547                 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
548                         head = (head + 1) & slots;
549                         exp_desc = &ring->raw_desc[head];
550
551                         if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
552                                 head = (head - 1) & slots;
553                                 break;
554                         }
555                         dma_rmb();
556                         count++;
557                         desc_count++;
558                 }
559                 if (is_rx_desc(raw_desc)) {
560                         ret = xgene_enet_rx_frame(ring, raw_desc);
561                 } else {
562                         ret = xgene_enet_tx_completion(ring, raw_desc);
563                         is_completion = true;
564                 }
565                 xgene_enet_mark_desc_slot_empty(raw_desc);
566                 if (exp_desc)
567                         xgene_enet_mark_desc_slot_empty(exp_desc);
568
569                 head = (head + 1) & slots;
570                 count++;
571                 desc_count++;
572                 processed++;
573                 if (is_completion)
574                         pdata->txc_level[ring->index] += desc_count;
575
576                 if (ret)
577                         break;
578         } while (--budget);
579
580         if (likely(count)) {
581                 pdata->ring_ops->wr_cmd(ring, -count);
582                 ring->head = head;
583
584                 if (__netif_subqueue_stopped(ndev, ring->index))
585                         netif_start_subqueue(ndev, ring->index);
586         }
587
588         return processed;
589 }
590
591 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
592 {
593         struct xgene_enet_desc_ring *ring;
594         int processed;
595
596         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
597         processed = xgene_enet_process_ring(ring, budget);
598
599         if (processed != budget) {
600                 napi_complete(napi);
601                 enable_irq(ring->irq);
602         }
603
604         return processed;
605 }
606
607 static void xgene_enet_timeout(struct net_device *ndev)
608 {
609         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
610         struct netdev_queue *txq;
611         int i;
612
613         pdata->mac_ops->reset(pdata);
614
615         for (i = 0; i < pdata->txq_cnt; i++) {
616                 txq = netdev_get_tx_queue(ndev, i);
617                 txq->trans_start = jiffies;
618                 netif_tx_start_queue(txq);
619         }
620 }
621
622 static int xgene_enet_register_irq(struct net_device *ndev)
623 {
624         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
625         struct device *dev = ndev_to_dev(ndev);
626         struct xgene_enet_desc_ring *ring;
627         int ret = 0, i;
628
629         for (i = 0; i < pdata->rxq_cnt; i++) {
630                 ring = pdata->rx_ring[i];
631                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
632                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
633                                        0, ring->irq_name, ring);
634                 if (ret) {
635                         netdev_err(ndev, "Failed to request irq %s\n",
636                                    ring->irq_name);
637                 }
638         }
639
640         for (i = 0; i < pdata->cq_cnt; i++) {
641                 ring = pdata->tx_ring[i]->cp_ring;
642                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
643                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
644                                        0, ring->irq_name, ring);
645                 if (ret) {
646                         netdev_err(ndev, "Failed to request irq %s\n",
647                                    ring->irq_name);
648                 }
649         }
650
651         return ret;
652 }
653
654 static void xgene_enet_free_irq(struct net_device *ndev)
655 {
656         struct xgene_enet_pdata *pdata;
657         struct xgene_enet_desc_ring *ring;
658         struct device *dev;
659         int i;
660
661         pdata = netdev_priv(ndev);
662         dev = ndev_to_dev(ndev);
663
664         for (i = 0; i < pdata->rxq_cnt; i++) {
665                 ring = pdata->rx_ring[i];
666                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
667                 devm_free_irq(dev, ring->irq, ring);
668         }
669
670         for (i = 0; i < pdata->cq_cnt; i++) {
671                 ring = pdata->tx_ring[i]->cp_ring;
672                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
673                 devm_free_irq(dev, ring->irq, ring);
674         }
675 }
676
677 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
678 {
679         struct napi_struct *napi;
680         int i;
681
682         for (i = 0; i < pdata->rxq_cnt; i++) {
683                 napi = &pdata->rx_ring[i]->napi;
684                 napi_enable(napi);
685         }
686
687         for (i = 0; i < pdata->cq_cnt; i++) {
688                 napi = &pdata->tx_ring[i]->cp_ring->napi;
689                 napi_enable(napi);
690         }
691 }
692
693 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
694 {
695         struct napi_struct *napi;
696         int i;
697
698         for (i = 0; i < pdata->rxq_cnt; i++) {
699                 napi = &pdata->rx_ring[i]->napi;
700                 napi_disable(napi);
701         }
702
703         for (i = 0; i < pdata->cq_cnt; i++) {
704                 napi = &pdata->tx_ring[i]->cp_ring->napi;
705                 napi_disable(napi);
706         }
707 }
708
709 static int xgene_enet_open(struct net_device *ndev)
710 {
711         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
712         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
713         int ret;
714
715         ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
716         if (ret)
717                 return ret;
718
719         ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
720         if (ret)
721                 return ret;
722
723         mac_ops->tx_enable(pdata);
724         mac_ops->rx_enable(pdata);
725
726         xgene_enet_napi_enable(pdata);
727         ret = xgene_enet_register_irq(ndev);
728         if (ret)
729                 return ret;
730
731         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
732                 phy_start(pdata->phy_dev);
733         else
734                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
735
736         netif_start_queue(ndev);
737
738         return ret;
739 }
740
741 static int xgene_enet_close(struct net_device *ndev)
742 {
743         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
744         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
745         int i;
746
747         netif_stop_queue(ndev);
748
749         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
750                 phy_stop(pdata->phy_dev);
751         else
752                 cancel_delayed_work_sync(&pdata->link_work);
753
754         mac_ops->tx_disable(pdata);
755         mac_ops->rx_disable(pdata);
756
757         xgene_enet_free_irq(ndev);
758         xgene_enet_napi_disable(pdata);
759         for (i = 0; i < pdata->rxq_cnt; i++)
760                 xgene_enet_process_ring(pdata->rx_ring[i], -1);
761
762         return 0;
763 }
764
765 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
766 {
767         struct xgene_enet_pdata *pdata;
768         struct device *dev;
769
770         pdata = netdev_priv(ring->ndev);
771         dev = ndev_to_dev(ring->ndev);
772
773         pdata->ring_ops->clear(ring);
774         dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
775 }
776
777 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
778 {
779         struct xgene_enet_desc_ring *buf_pool;
780         struct xgene_enet_desc_ring *ring;
781         int i;
782
783         for (i = 0; i < pdata->txq_cnt; i++) {
784                 ring = pdata->tx_ring[i];
785                 if (ring) {
786                         xgene_enet_delete_ring(ring);
787                         pdata->tx_ring[i] = NULL;
788                 }
789         }
790
791         for (i = 0; i < pdata->rxq_cnt; i++) {
792                 ring = pdata->rx_ring[i];
793                 if (ring) {
794                         buf_pool = ring->buf_pool;
795                         xgene_enet_delete_bufpool(buf_pool);
796                         xgene_enet_delete_ring(buf_pool);
797                         xgene_enet_delete_ring(ring);
798                         pdata->rx_ring[i] = NULL;
799                 }
800         }
801 }
802
803 static int xgene_enet_get_ring_size(struct device *dev,
804                                     enum xgene_enet_ring_cfgsize cfgsize)
805 {
806         int size = -EINVAL;
807
808         switch (cfgsize) {
809         case RING_CFGSIZE_512B:
810                 size = 0x200;
811                 break;
812         case RING_CFGSIZE_2KB:
813                 size = 0x800;
814                 break;
815         case RING_CFGSIZE_16KB:
816                 size = 0x4000;
817                 break;
818         case RING_CFGSIZE_64KB:
819                 size = 0x10000;
820                 break;
821         case RING_CFGSIZE_512KB:
822                 size = 0x80000;
823                 break;
824         default:
825                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
826                 break;
827         }
828
829         return size;
830 }
831
832 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
833 {
834         struct xgene_enet_pdata *pdata;
835         struct device *dev;
836
837         if (!ring)
838                 return;
839
840         dev = ndev_to_dev(ring->ndev);
841         pdata = netdev_priv(ring->ndev);
842
843         if (ring->desc_addr) {
844                 pdata->ring_ops->clear(ring);
845                 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
846         }
847         devm_kfree(dev, ring);
848 }
849
850 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
851 {
852         struct device *dev = &pdata->pdev->dev;
853         struct xgene_enet_desc_ring *ring;
854         int i;
855
856         for (i = 0; i < pdata->txq_cnt; i++) {
857                 ring = pdata->tx_ring[i];
858                 if (ring) {
859                         if (ring->cp_ring && ring->cp_ring->cp_skb)
860                                 devm_kfree(dev, ring->cp_ring->cp_skb);
861                         if (ring->cp_ring && pdata->cq_cnt)
862                                 xgene_enet_free_desc_ring(ring->cp_ring);
863                         xgene_enet_free_desc_ring(ring);
864                 }
865         }
866
867         for (i = 0; i < pdata->rxq_cnt; i++) {
868                 ring = pdata->rx_ring[i];
869                 if (ring) {
870                         if (ring->buf_pool) {
871                                 if (ring->buf_pool->rx_skb)
872                                         devm_kfree(dev, ring->buf_pool->rx_skb);
873                                 xgene_enet_free_desc_ring(ring->buf_pool);
874                         }
875                         xgene_enet_free_desc_ring(ring);
876                 }
877         }
878 }
879
880 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
881                                  struct xgene_enet_desc_ring *ring)
882 {
883         if ((pdata->enet_id == XGENE_ENET2) &&
884             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
885                 return true;
886         }
887
888         return false;
889 }
890
891 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
892                                               struct xgene_enet_desc_ring *ring)
893 {
894         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
895
896         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
897 }
898
899 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
900                         struct net_device *ndev, u32 ring_num,
901                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
902 {
903         struct xgene_enet_desc_ring *ring;
904         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
905         struct device *dev = ndev_to_dev(ndev);
906         int size;
907
908         size = xgene_enet_get_ring_size(dev, cfgsize);
909         if (size < 0)
910                 return NULL;
911
912         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
913                             GFP_KERNEL);
914         if (!ring)
915                 return NULL;
916
917         ring->ndev = ndev;
918         ring->num = ring_num;
919         ring->cfgsize = cfgsize;
920         ring->id = ring_id;
921
922         ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
923                                               GFP_KERNEL);
924         if (!ring->desc_addr) {
925                 devm_kfree(dev, ring);
926                 return NULL;
927         }
928         ring->size = size;
929
930         if (is_irq_mbox_required(pdata, ring)) {
931                 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
932                                 &ring->irq_mbox_dma, GFP_KERNEL);
933                 if (!ring->irq_mbox_addr) {
934                         dma_free_coherent(dev, size, ring->desc_addr,
935                                           ring->dma);
936                         devm_kfree(dev, ring);
937                         return NULL;
938                 }
939         }
940
941         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
942         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
943         ring = pdata->ring_ops->setup(ring);
944         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
945                    ring->num, ring->size, ring->id, ring->slots);
946
947         return ring;
948 }
949
950 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
951 {
952         return (owner << 6) | (bufnum & GENMASK(5, 0));
953 }
954
955 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
956 {
957         enum xgene_ring_owner owner;
958
959         if (p->enet_id == XGENE_ENET1) {
960                 switch (p->phy_mode) {
961                 case PHY_INTERFACE_MODE_SGMII:
962                         owner = RING_OWNER_ETH0;
963                         break;
964                 default:
965                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
966                                                 RING_OWNER_ETH1;
967                         break;
968                 }
969         } else {
970                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
971         }
972
973         return owner;
974 }
975
976 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
977 {
978         struct device *dev = &pdata->pdev->dev;
979         u32 cpu_bufnum;
980         int ret;
981
982         ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
983
984         return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
985 }
986
987 static int xgene_enet_create_desc_rings(struct net_device *ndev)
988 {
989         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
990         struct device *dev = ndev_to_dev(ndev);
991         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
992         struct xgene_enet_desc_ring *buf_pool = NULL;
993         enum xgene_ring_owner owner;
994         dma_addr_t dma_exp_bufs;
995         u8 cpu_bufnum;
996         u8 eth_bufnum = pdata->eth_bufnum;
997         u8 bp_bufnum = pdata->bp_bufnum;
998         u16 ring_num = pdata->ring_num;
999         u16 ring_id;
1000         int i, ret, size;
1001
1002         cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1003
1004         for (i = 0; i < pdata->rxq_cnt; i++) {
1005                 /* allocate rx descriptor ring */
1006                 owner = xgene_derive_ring_owner(pdata);
1007                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1008                 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1009                                                       RING_CFGSIZE_16KB,
1010                                                       ring_id);
1011                 if (!rx_ring) {
1012                         ret = -ENOMEM;
1013                         goto err;
1014                 }
1015
1016                 /* allocate buffer pool for receiving packets */
1017                 owner = xgene_derive_ring_owner(pdata);
1018                 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1019                 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1020                                                        RING_CFGSIZE_2KB,
1021                                                        ring_id);
1022                 if (!buf_pool) {
1023                         ret = -ENOMEM;
1024                         goto err;
1025                 }
1026
1027                 rx_ring->nbufpool = NUM_BUFPOOL;
1028                 rx_ring->buf_pool = buf_pool;
1029                 rx_ring->irq = pdata->irqs[i];
1030                 if (!pdata->cq_cnt) {
1031                         snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
1032                                  ndev->name);
1033                 } else {
1034                         snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
1035                                  ndev->name, i);
1036                 }
1037                 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1038                                                 sizeof(struct sk_buff *),
1039                                                 GFP_KERNEL);
1040                 if (!buf_pool->rx_skb) {
1041                         ret = -ENOMEM;
1042                         goto err;
1043                 }
1044
1045                 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1046                 rx_ring->buf_pool = buf_pool;
1047                 pdata->rx_ring[i] = rx_ring;
1048         }
1049
1050         for (i = 0; i < pdata->txq_cnt; i++) {
1051                 /* allocate tx descriptor ring */
1052                 owner = xgene_derive_ring_owner(pdata);
1053                 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1054                 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1055                                                       RING_CFGSIZE_16KB,
1056                                                       ring_id);
1057                 if (!tx_ring) {
1058                         ret = -ENOMEM;
1059                         goto err;
1060                 }
1061
1062                 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1063                 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
1064                                                         &dma_exp_bufs,
1065                                                         GFP_KERNEL);
1066                 if (!tx_ring->exp_bufs) {
1067                         ret = -ENOMEM;
1068                         goto err;
1069                 }
1070
1071                 pdata->tx_ring[i] = tx_ring;
1072
1073                 if (!pdata->cq_cnt) {
1074                         cp_ring = pdata->rx_ring[i];
1075                 } else {
1076                         /* allocate tx completion descriptor ring */
1077                         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1078                                                          cpu_bufnum++);
1079                         cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1080                                                               RING_CFGSIZE_16KB,
1081                                                               ring_id);
1082                         if (!cp_ring) {
1083                                 ret = -ENOMEM;
1084                                 goto err;
1085                         }
1086
1087                         cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1088                         cp_ring->index = i;
1089                         snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
1090                                  ndev->name, i);
1091                 }
1092
1093                 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1094                                                sizeof(struct sk_buff *),
1095                                                GFP_KERNEL);
1096                 if (!cp_ring->cp_skb) {
1097                         ret = -ENOMEM;
1098                         goto err;
1099                 }
1100
1101                 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1102                 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1103                                                       size, GFP_KERNEL);
1104                 if (!cp_ring->frag_dma_addr) {
1105                         devm_kfree(dev, cp_ring->cp_skb);
1106                         ret = -ENOMEM;
1107                         goto err;
1108                 }
1109
1110                 tx_ring->cp_ring = cp_ring;
1111                 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1112         }
1113
1114         pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1115         pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1116
1117         return 0;
1118
1119 err:
1120         xgene_enet_free_desc_rings(pdata);
1121         return ret;
1122 }
1123
1124 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1125                         struct net_device *ndev,
1126                         struct rtnl_link_stats64 *storage)
1127 {
1128         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1129         struct rtnl_link_stats64 *stats = &pdata->stats;
1130         struct xgene_enet_desc_ring *ring;
1131         int i;
1132
1133         memset(stats, 0, sizeof(struct rtnl_link_stats64));
1134         for (i = 0; i < pdata->txq_cnt; i++) {
1135                 ring = pdata->tx_ring[i];
1136                 if (ring) {
1137                         stats->tx_packets += ring->tx_packets;
1138                         stats->tx_bytes += ring->tx_bytes;
1139                 }
1140         }
1141
1142         for (i = 0; i < pdata->rxq_cnt; i++) {
1143                 ring = pdata->rx_ring[i];
1144                 if (ring) {
1145                         stats->rx_packets += ring->rx_packets;
1146                         stats->rx_bytes += ring->rx_bytes;
1147                         stats->rx_errors += ring->rx_length_errors +
1148                                 ring->rx_crc_errors +
1149                                 ring->rx_frame_errors +
1150                                 ring->rx_fifo_errors;
1151                         stats->rx_dropped += ring->rx_dropped;
1152                 }
1153         }
1154         memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1155
1156         return storage;
1157 }
1158
1159 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1160 {
1161         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1162         int ret;
1163
1164         ret = eth_mac_addr(ndev, addr);
1165         if (ret)
1166                 return ret;
1167         pdata->mac_ops->set_mac_addr(pdata);
1168
1169         return ret;
1170 }
1171
1172 static const struct net_device_ops xgene_ndev_ops = {
1173         .ndo_open = xgene_enet_open,
1174         .ndo_stop = xgene_enet_close,
1175         .ndo_start_xmit = xgene_enet_start_xmit,
1176         .ndo_tx_timeout = xgene_enet_timeout,
1177         .ndo_get_stats64 = xgene_enet_get_stats64,
1178         .ndo_change_mtu = eth_change_mtu,
1179         .ndo_set_mac_address = xgene_enet_set_mac_address,
1180 };
1181
1182 #ifdef CONFIG_ACPI
1183 static void xgene_get_port_id_acpi(struct device *dev,
1184                                   struct xgene_enet_pdata *pdata)
1185 {
1186         acpi_status status;
1187         u64 temp;
1188
1189         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1190         if (ACPI_FAILURE(status)) {
1191                 pdata->port_id = 0;
1192         } else {
1193                 pdata->port_id = temp;
1194         }
1195
1196         return;
1197 }
1198 #endif
1199
1200 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1201 {
1202         u32 id = 0;
1203
1204         of_property_read_u32(dev->of_node, "port-id", &id);
1205
1206         pdata->port_id = id & BIT(0);
1207
1208         return;
1209 }
1210
1211 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1212 {
1213         struct device *dev = &pdata->pdev->dev;
1214         int delay, ret;
1215
1216         ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1217         if (ret) {
1218                 pdata->tx_delay = 4;
1219                 return 0;
1220         }
1221
1222         if (delay < 0 || delay > 7) {
1223                 dev_err(dev, "Invalid tx-delay specified\n");
1224                 return -EINVAL;
1225         }
1226
1227         pdata->tx_delay = delay;
1228
1229         return 0;
1230 }
1231
1232 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1233 {
1234         struct device *dev = &pdata->pdev->dev;
1235         int delay, ret;
1236
1237         ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1238         if (ret) {
1239                 pdata->rx_delay = 2;
1240                 return 0;
1241         }
1242
1243         if (delay < 0 || delay > 7) {
1244                 dev_err(dev, "Invalid rx-delay specified\n");
1245                 return -EINVAL;
1246         }
1247
1248         pdata->rx_delay = delay;
1249
1250         return 0;
1251 }
1252
1253 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1254 {
1255         struct platform_device *pdev = pdata->pdev;
1256         struct device *dev = &pdev->dev;
1257         int i, ret, max_irqs;
1258
1259         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1260                 max_irqs = 1;
1261         else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1262                 max_irqs = 2;
1263         else
1264                 max_irqs = XGENE_MAX_ENET_IRQ;
1265
1266         for (i = 0; i < max_irqs; i++) {
1267                 ret = platform_get_irq(pdev, i);
1268                 if (ret <= 0) {
1269                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1270                                 max_irqs = i;
1271                                 pdata->rxq_cnt = max_irqs / 2;
1272                                 pdata->txq_cnt = max_irqs / 2;
1273                                 pdata->cq_cnt = max_irqs / 2;
1274                                 break;
1275                         }
1276                         dev_err(dev, "Unable to get ENET IRQ\n");
1277                         ret = ret ? : -ENXIO;
1278                         return ret;
1279                 }
1280                 pdata->irqs[i] = ret;
1281         }
1282
1283         return 0;
1284 }
1285
1286 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1287 {
1288         struct platform_device *pdev;
1289         struct net_device *ndev;
1290         struct device *dev;
1291         struct resource *res;
1292         void __iomem *base_addr;
1293         u32 offset;
1294         int ret = 0;
1295
1296         pdev = pdata->pdev;
1297         dev = &pdev->dev;
1298         ndev = pdata->ndev;
1299
1300         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1301         if (!res) {
1302                 dev_err(dev, "Resource enet_csr not defined\n");
1303                 return -ENODEV;
1304         }
1305         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1306         if (!pdata->base_addr) {
1307                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1308                 return -ENOMEM;
1309         }
1310
1311         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1312         if (!res) {
1313                 dev_err(dev, "Resource ring_csr not defined\n");
1314                 return -ENODEV;
1315         }
1316         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1317                                                         resource_size(res));
1318         if (!pdata->ring_csr_addr) {
1319                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1320                 return -ENOMEM;
1321         }
1322
1323         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1324         if (!res) {
1325                 dev_err(dev, "Resource ring_cmd not defined\n");
1326                 return -ENODEV;
1327         }
1328         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1329                                                         resource_size(res));
1330         if (!pdata->ring_cmd_addr) {
1331                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1332                 return -ENOMEM;
1333         }
1334
1335         if (dev->of_node)
1336                 xgene_get_port_id_dt(dev, pdata);
1337 #ifdef CONFIG_ACPI
1338         else
1339                 xgene_get_port_id_acpi(dev, pdata);
1340 #endif
1341
1342         if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1343                 eth_hw_addr_random(ndev);
1344
1345         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1346
1347         pdata->phy_mode = device_get_phy_mode(dev);
1348         if (pdata->phy_mode < 0) {
1349                 dev_err(dev, "Unable to get phy-connection-type\n");
1350                 return pdata->phy_mode;
1351         }
1352         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1353             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1354             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1355                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1356                 return -ENODEV;
1357         }
1358
1359         ret = xgene_get_tx_delay(pdata);
1360         if (ret)
1361                 return ret;
1362
1363         ret = xgene_get_rx_delay(pdata);
1364         if (ret)
1365                 return ret;
1366
1367         ret = xgene_enet_get_irqs(pdata);
1368         if (ret)
1369                 return ret;
1370
1371         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1372         if (IS_ERR(pdata->clk)) {
1373                 /* Firmware may have set up the clock already. */
1374                 dev_info(dev, "clocks have been setup already\n");
1375         }
1376
1377         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1378                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1379         else
1380                 base_addr = pdata->base_addr;
1381         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1382         pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1383         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1384         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1385         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1386             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1387                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1388                 offset = (pdata->enet_id == XGENE_ENET1) ?
1389                           BLOCK_ETH_MAC_CSR_OFFSET :
1390                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1391                 pdata->mcx_mac_csr_addr = base_addr + offset;
1392         } else {
1393                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1394                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1395         }
1396         pdata->rx_buff_cnt = NUM_PKT_BUF;
1397
1398         return 0;
1399 }
1400
1401 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1402 {
1403         struct xgene_enet_cle *enet_cle = &pdata->cle;
1404         struct net_device *ndev = pdata->ndev;
1405         struct xgene_enet_desc_ring *buf_pool;
1406         u16 dst_ring_num;
1407         int i, ret;
1408
1409         ret = pdata->port_ops->reset(pdata);
1410         if (ret)
1411                 return ret;
1412
1413         ret = xgene_enet_create_desc_rings(ndev);
1414         if (ret) {
1415                 netdev_err(ndev, "Error in ring configuration\n");
1416                 return ret;
1417         }
1418
1419         /* setup buffer pool */
1420         for (i = 0; i < pdata->rxq_cnt; i++) {
1421                 buf_pool = pdata->rx_ring[i]->buf_pool;
1422                 xgene_enet_init_bufpool(buf_pool);
1423                 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1424                 if (ret) {
1425                         xgene_enet_delete_desc_rings(pdata);
1426                         return ret;
1427                 }
1428         }
1429
1430         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1431         buf_pool = pdata->rx_ring[0]->buf_pool;
1432         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1433                 /* Initialize and Enable  PreClassifier Tree */
1434                 enet_cle->max_nodes = 512;
1435                 enet_cle->max_dbptrs = 1024;
1436                 enet_cle->parsers = 3;
1437                 enet_cle->active_parser = PARSER_ALL;
1438                 enet_cle->ptree.start_node = 0;
1439                 enet_cle->ptree.start_dbptr = 0;
1440                 enet_cle->jump_bytes = 8;
1441                 ret = pdata->cle_ops->cle_init(pdata);
1442                 if (ret) {
1443                         netdev_err(ndev, "Preclass Tree init error\n");
1444                         return ret;
1445                 }
1446         } else {
1447                 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1448         }
1449
1450         pdata->mac_ops->init(pdata);
1451
1452         return ret;
1453 }
1454
1455 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1456 {
1457         switch (pdata->phy_mode) {
1458         case PHY_INTERFACE_MODE_RGMII:
1459                 pdata->mac_ops = &xgene_gmac_ops;
1460                 pdata->port_ops = &xgene_gport_ops;
1461                 pdata->rm = RM3;
1462                 pdata->rxq_cnt = 1;
1463                 pdata->txq_cnt = 1;
1464                 pdata->cq_cnt = 0;
1465                 break;
1466         case PHY_INTERFACE_MODE_SGMII:
1467                 pdata->mac_ops = &xgene_sgmac_ops;
1468                 pdata->port_ops = &xgene_sgport_ops;
1469                 pdata->rm = RM1;
1470                 pdata->rxq_cnt = 1;
1471                 pdata->txq_cnt = 1;
1472                 pdata->cq_cnt = 1;
1473                 break;
1474         default:
1475                 pdata->mac_ops = &xgene_xgmac_ops;
1476                 pdata->port_ops = &xgene_xgport_ops;
1477                 pdata->cle_ops = &xgene_cle3in_ops;
1478                 pdata->rm = RM0;
1479                 if (!pdata->rxq_cnt) {
1480                         pdata->rxq_cnt = XGENE_NUM_RX_RING;
1481                         pdata->txq_cnt = XGENE_NUM_TX_RING;
1482                         pdata->cq_cnt = XGENE_NUM_TXC_RING;
1483                 }
1484                 break;
1485         }
1486
1487         if (pdata->enet_id == XGENE_ENET1) {
1488                 switch (pdata->port_id) {
1489                 case 0:
1490                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1491                                 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1492                                 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1493                                 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1494                                 pdata->ring_num = START_RING_NUM_0;
1495                         } else {
1496                                 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1497                                 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1498                                 pdata->bp_bufnum = START_BP_BUFNUM_0;
1499                                 pdata->ring_num = START_RING_NUM_0;
1500                         }
1501                         break;
1502                 case 1:
1503                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1504                                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1505                                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1506                                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1507                                 pdata->ring_num = XG_START_RING_NUM_1;
1508                         } else {
1509                                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1510                                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1511                                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1512                                 pdata->ring_num = START_RING_NUM_1;
1513                         }
1514                         break;
1515                 default:
1516                         break;
1517                 }
1518                 pdata->ring_ops = &xgene_ring1_ops;
1519         } else {
1520                 switch (pdata->port_id) {
1521                 case 0:
1522                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1523                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1524                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1525                         pdata->ring_num = X2_START_RING_NUM_0;
1526                         break;
1527                 case 1:
1528                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1529                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1530                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1531                         pdata->ring_num = X2_START_RING_NUM_1;
1532                         break;
1533                 default:
1534                         break;
1535                 }
1536                 pdata->rm = RM0;
1537                 pdata->ring_ops = &xgene_ring2_ops;
1538         }
1539 }
1540
1541 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1542 {
1543         struct napi_struct *napi;
1544         int i;
1545
1546         for (i = 0; i < pdata->rxq_cnt; i++) {
1547                 napi = &pdata->rx_ring[i]->napi;
1548                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1549                                NAPI_POLL_WEIGHT);
1550         }
1551
1552         for (i = 0; i < pdata->cq_cnt; i++) {
1553                 napi = &pdata->tx_ring[i]->cp_ring->napi;
1554                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1555                                NAPI_POLL_WEIGHT);
1556         }
1557 }
1558
1559 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1560 {
1561         struct napi_struct *napi;
1562         int i;
1563
1564         for (i = 0; i < pdata->rxq_cnt; i++) {
1565                 napi = &pdata->rx_ring[i]->napi;
1566                 netif_napi_del(napi);
1567         }
1568
1569         for (i = 0; i < pdata->cq_cnt; i++) {
1570                 napi = &pdata->tx_ring[i]->cp_ring->napi;
1571                 netif_napi_del(napi);
1572         }
1573 }
1574
1575 static int xgene_enet_probe(struct platform_device *pdev)
1576 {
1577         struct net_device *ndev;
1578         struct xgene_enet_pdata *pdata;
1579         struct device *dev = &pdev->dev;
1580         const struct xgene_mac_ops *mac_ops;
1581         const struct of_device_id *of_id;
1582         int ret;
1583
1584         ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1585                                   XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1586         if (!ndev)
1587                 return -ENOMEM;
1588
1589         pdata = netdev_priv(ndev);
1590
1591         pdata->pdev = pdev;
1592         pdata->ndev = ndev;
1593         SET_NETDEV_DEV(ndev, dev);
1594         platform_set_drvdata(pdev, pdata);
1595         ndev->netdev_ops = &xgene_ndev_ops;
1596         xgene_enet_set_ethtool_ops(ndev);
1597         ndev->features |= NETIF_F_IP_CSUM |
1598                           NETIF_F_GSO |
1599                           NETIF_F_GRO |
1600                           NETIF_F_SG;
1601
1602         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1603         if (of_id) {
1604                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1605         }
1606 #ifdef CONFIG_ACPI
1607         else {
1608                 const struct acpi_device_id *acpi_id;
1609
1610                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1611                 if (acpi_id)
1612                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1613         }
1614 #endif
1615         if (!pdata->enet_id) {
1616                 free_netdev(ndev);
1617                 return -ENODEV;
1618         }
1619
1620         ret = xgene_enet_get_resources(pdata);
1621         if (ret)
1622                 goto err;
1623
1624         xgene_enet_setup_ops(pdata);
1625
1626         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1627                 ndev->features |= NETIF_F_TSO;
1628                 pdata->mss = XGENE_ENET_MSS;
1629         }
1630         ndev->hw_features = ndev->features;
1631
1632         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1633         if (ret) {
1634                 netdev_err(ndev, "No usable DMA configuration\n");
1635                 goto err;
1636         }
1637
1638         ret = register_netdev(ndev);
1639         if (ret) {
1640                 netdev_err(ndev, "Failed to register netdev\n");
1641                 goto err;
1642         }
1643
1644         ret = xgene_enet_init_hw(pdata);
1645         if (ret)
1646                 goto err_netdev;
1647
1648         mac_ops = pdata->mac_ops;
1649         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1650                 ret = xgene_enet_mdio_config(pdata);
1651                 if (ret)
1652                         goto err_netdev;
1653         } else {
1654                 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1655         }
1656
1657         xgene_enet_napi_add(pdata);
1658         return 0;
1659 err_netdev:
1660         unregister_netdev(ndev);
1661 err:
1662         free_netdev(ndev);
1663         return ret;
1664 }
1665
1666 static int xgene_enet_remove(struct platform_device *pdev)
1667 {
1668         struct xgene_enet_pdata *pdata;
1669         const struct xgene_mac_ops *mac_ops;
1670         struct net_device *ndev;
1671
1672         pdata = platform_get_drvdata(pdev);
1673         mac_ops = pdata->mac_ops;
1674         ndev = pdata->ndev;
1675
1676         mac_ops->rx_disable(pdata);
1677         mac_ops->tx_disable(pdata);
1678
1679         xgene_enet_napi_del(pdata);
1680         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1681                 xgene_enet_mdio_remove(pdata);
1682         unregister_netdev(ndev);
1683         xgene_enet_delete_desc_rings(pdata);
1684         pdata->port_ops->shutdown(pdata);
1685         free_netdev(ndev);
1686
1687         return 0;
1688 }
1689
1690 #ifdef CONFIG_ACPI
1691 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1692         { "APMC0D05", XGENE_ENET1},
1693         { "APMC0D30", XGENE_ENET1},
1694         { "APMC0D31", XGENE_ENET1},
1695         { "APMC0D3F", XGENE_ENET1},
1696         { "APMC0D26", XGENE_ENET2},
1697         { "APMC0D25", XGENE_ENET2},
1698         { }
1699 };
1700 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1701 #endif
1702
1703 #ifdef CONFIG_OF
1704 static const struct of_device_id xgene_enet_of_match[] = {
1705         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1706         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1707         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1708         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1709         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1710         {},
1711 };
1712
1713 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1714 #endif
1715
1716 static struct platform_driver xgene_enet_driver = {
1717         .driver = {
1718                    .name = "xgene-enet",
1719                    .of_match_table = of_match_ptr(xgene_enet_of_match),
1720                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1721         },
1722         .probe = xgene_enet_probe,
1723         .remove = xgene_enet_remove,
1724 };
1725
1726 module_platform_driver(xgene_enet_driver);
1727
1728 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1729 MODULE_VERSION(XGENE_DRV_VERSION);
1730 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1731 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1732 MODULE_LICENSE("GPL");