760b3ef64f17c175a571303a20d5ac1a7baf35e5
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include "en.h"
37
38 static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
39                                      struct mlx5e_rx_wqe *wqe, u16 ix)
40 {
41         struct sk_buff *skb;
42         dma_addr_t dma_addr;
43
44         skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
45         if (unlikely(!skb))
46                 return -ENOMEM;
47
48         dma_addr = dma_map_single(rq->pdev,
49                                   /* hw start padding */
50                                   skb->data,
51                                   /* hw end padding */
52                                   rq->wqe_sz,
53                                   DMA_FROM_DEVICE);
54
55         if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
56                 goto err_free_skb;
57
58         skb_reserve(skb, MLX5E_NET_IP_ALIGN);
59
60         *((dma_addr_t *)skb->cb) = dma_addr;
61         wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
62
63         rq->skb[ix] = skb;
64
65         return 0;
66
67 err_free_skb:
68         dev_kfree_skb(skb);
69
70         return -ENOMEM;
71 }
72
73 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
74 {
75         struct mlx5_wq_ll *wq = &rq->wq;
76
77         if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
78                 return false;
79
80         while (!mlx5_wq_ll_is_full(wq)) {
81                 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
82
83                 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
84                         break;
85
86                 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
87         }
88
89         /* ensure wqes are visible to device before updating doorbell record */
90         dma_wmb();
91
92         mlx5_wq_ll_update_db_record(wq);
93
94         return !mlx5_wq_ll_is_full(wq);
95 }
96
97 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
98 {
99         struct ethhdr   *eth    = (struct ethhdr *)(skb->data);
100         struct iphdr    *ipv4   = (struct iphdr *)(skb->data + ETH_HLEN);
101         struct ipv6hdr  *ipv6   = (struct ipv6hdr *)(skb->data + ETH_HLEN);
102         struct tcphdr   *tcp;
103
104         u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
105         int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
106                        (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
107
108         u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
109
110         if (eth->h_proto == htons(ETH_P_IP)) {
111                 tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
112                                         sizeof(struct iphdr));
113                 ipv6 = NULL;
114         } else {
115                 tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
116                                         sizeof(struct ipv6hdr));
117                 ipv4 = NULL;
118         }
119
120         if (get_cqe_lro_tcppsh(cqe))
121                 tcp->psh                = 1;
122
123         if (tcp_ack) {
124                 tcp->ack                = 1;
125                 tcp->ack_seq            = cqe->lro_ack_seq_num;
126                 tcp->window             = cqe->lro_tcp_win;
127         }
128
129         if (ipv4) {
130                 ipv4->ttl               = cqe->lro_min_ttl;
131                 ipv4->tot_len           = cpu_to_be16(tot_len);
132                 ipv4->check             = 0;
133                 ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
134                                                        ipv4->ihl);
135         } else {
136                 ipv6->hop_limit         = cqe->lro_min_ttl;
137                 ipv6->payload_len       = cpu_to_be16(tot_len -
138                                                       sizeof(struct ipv6hdr));
139         }
140 }
141
142 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
143                                       struct sk_buff *skb)
144 {
145         u8 cht = cqe->rss_hash_type;
146         int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
147                  (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
148                                             PKT_HASH_TYPE_NONE;
149         skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
150 }
151
152 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
153                                       struct mlx5e_rq *rq,
154                                       struct sk_buff *skb)
155 {
156         struct net_device *netdev = rq->netdev;
157         u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
158         int lro_num_seg;
159
160         skb_put(skb, cqe_bcnt);
161
162         lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
163         if (lro_num_seg > 1) {
164                 mlx5e_lro_update_hdr(skb, cqe);
165                 skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
166                 rq->stats.lro_packets++;
167                 rq->stats.lro_bytes += cqe_bcnt;
168         }
169
170         if (likely(netdev->features & NETIF_F_RXCSUM) &&
171             (cqe->hds_ip_ext & CQE_L2_OK) &&
172             (cqe->hds_ip_ext & CQE_L3_OK) &&
173             (cqe->hds_ip_ext & CQE_L4_OK)) {
174                 skb->ip_summed = CHECKSUM_UNNECESSARY;
175         } else {
176                 skb->ip_summed = CHECKSUM_NONE;
177                 rq->stats.csum_none++;
178         }
179
180         skb->protocol = eth_type_trans(skb, netdev);
181
182         skb_record_rx_queue(skb, rq->ix);
183
184         if (likely(netdev->features & NETIF_F_RXHASH))
185                 mlx5e_skb_set_hash(cqe, skb);
186
187         if (cqe_has_vlan(cqe))
188                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
189                                        be16_to_cpu(cqe->vlan_info));
190 }
191
192 bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
193 {
194         struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
195         int i;
196
197         /* avoid accessing cq (dma coherent memory) if not needed */
198         if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
199                 return false;
200
201         for (i = 0; i < budget; i++) {
202                 struct mlx5e_rx_wqe *wqe;
203                 struct mlx5_cqe64 *cqe;
204                 struct sk_buff *skb;
205                 __be16 wqe_counter_be;
206                 u16 wqe_counter;
207
208                 cqe = mlx5e_get_cqe(cq);
209                 if (!cqe)
210                         break;
211
212                 mlx5_cqwq_pop(&cq->wq);
213
214                 wqe_counter_be = cqe->wqe_counter;
215                 wqe_counter    = be16_to_cpu(wqe_counter_be);
216                 wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
217                 skb            = rq->skb[wqe_counter];
218                 rq->skb[wqe_counter] = NULL;
219
220                 dma_unmap_single(rq->pdev,
221                                  *((dma_addr_t *)skb->cb),
222                                  rq->wqe_sz,
223                                  DMA_FROM_DEVICE);
224
225                 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
226                         rq->stats.wqe_err++;
227                         dev_kfree_skb(skb);
228                         goto wq_ll_pop;
229                 }
230
231                 mlx5e_build_rx_skb(cqe, rq, skb);
232                 rq->stats.packets++;
233                 napi_gro_receive(cq->napi, skb);
234
235 wq_ll_pop:
236                 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
237                                &wqe->next.next_wqe_index);
238         }
239
240         mlx5_cqwq_update_db_record(&cq->wq);
241
242         /* ensure cq space is freed before enabling more cqes */
243         wmb();
244
245         if (i == budget) {
246                 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
247                 return true;
248         }
249
250         return false;
251 }