1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
241 return err ? -EOPNOTSUPP : 0;
244 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
249 * @add: true adds a filter, false removes it
251 * Returns 0 if the filters were successfully added or removed
253 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
254 struct i40e_fdir_filter *fd_data,
257 struct i40e_pf *pf = vsi->back;
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
278 ip->daddr = fd_data->dst_ip[0];
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip[0];
281 tcp->source = fd_data->src_port;
285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
286 if (I40E_DEBUG_FD & pf->hw.debug_mask)
287 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
288 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
291 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
292 (pf->fd_tcp_rule - 1) : 0;
293 if (pf->fd_tcp_rule == 0) {
294 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
295 if (I40E_DEBUG_FD & pf->hw.debug_mask)
296 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
300 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
301 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
304 dev_info(&pf->pdev->dev,
305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data->pctype, fd_data->fd_id, ret);
308 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
310 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data->pctype, fd_data->fd_id);
313 dev_info(&pf->pdev->dev,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data->pctype, fd_data->fd_id);
321 return err ? -EOPNOTSUPP : 0;
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
329 * @add: true adds a filter, false removes it
331 * Returns 0 if the filters were successfully added or removed
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
340 #define I40E_IP_DUMMY_PACKET_LEN 34
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
346 * @add: true adds a filter, false removes it
348 * Returns 0 if the filters were successfully added or removed
350 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
351 struct i40e_fdir_filter *fd_data,
354 struct i40e_pf *pf = vsi->back;
360 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
364 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
365 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
366 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
369 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
370 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
372 ip->saddr = fd_data->src_ip[0];
373 ip->daddr = fd_data->dst_ip[0];
377 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
380 dev_info(&pf->pdev->dev,
381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data->pctype, fd_data->fd_id, ret);
384 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
386 dev_info(&pf->pdev->dev,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data->pctype, fd_data->fd_id);
390 dev_info(&pf->pdev->dev,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data->pctype, fd_data->fd_id);
399 return err ? -EOPNOTSUPP : 0;
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
409 int i40e_add_del_fdir(struct i40e_vsi *vsi,
410 struct i40e_fdir_filter *input, bool add)
412 struct i40e_pf *pf = vsi->back;
415 switch (input->flow_type & ~FLOW_EXT) {
417 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
420 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
423 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
426 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
429 switch (input->ip4_proto) {
431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
445 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
458 * @prog_id: the id originally used for programming
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
463 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
464 union i40e_rx_desc *rx_desc, u8 prog_id)
466 struct i40e_pf *pf = rx_ring->vsi->back;
467 struct pci_dev *pdev = pf->pdev;
468 u32 fcnt_prog, fcnt_avail;
472 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
473 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
476 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
477 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
478 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
479 (I40E_DEBUG_FD & pf->hw.debug_mask))
480 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
493 /* store the current atr filter count */
494 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
496 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
497 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
498 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
499 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
502 /* filter programming failed most likely due to table full */
503 fcnt_prog = i40e_get_global_fd_count(pf);
504 fcnt_avail = pf->fdir_pf_filter_count;
505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
509 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
510 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
511 !(pf->auto_disable_flags &
512 I40E_FLAG_FD_SB_ENABLED)) {
513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
515 pf->auto_disable_flags |=
516 I40E_FLAG_FD_SB_ENABLED;
519 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
520 if (I40E_DEBUG_FD & pf->hw.debug_mask)
521 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
522 rx_desc->wb.qword0.hi_dword.fd_id);
527 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
528 * @ring: the ring that owns the buffer
529 * @tx_buffer: the buffer to free
531 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
532 struct i40e_tx_buffer *tx_buffer)
534 if (tx_buffer->skb) {
535 dev_kfree_skb_any(tx_buffer->skb);
536 if (dma_unmap_len(tx_buffer, len))
537 dma_unmap_single(ring->dev,
538 dma_unmap_addr(tx_buffer, dma),
539 dma_unmap_len(tx_buffer, len),
541 } else if (dma_unmap_len(tx_buffer, len)) {
542 dma_unmap_page(ring->dev,
543 dma_unmap_addr(tx_buffer, dma),
544 dma_unmap_len(tx_buffer, len),
548 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
549 kfree(tx_buffer->raw_buf);
551 tx_buffer->next_to_watch = NULL;
552 tx_buffer->skb = NULL;
553 dma_unmap_len_set(tx_buffer, len, 0);
554 /* tx_buffer must be completely set up in the transmit path */
558 * i40e_clean_tx_ring - Free any empty Tx buffers
559 * @tx_ring: ring to be cleaned
561 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
563 unsigned long bi_size;
566 /* ring already cleared, nothing to do */
570 /* Free all the Tx ring sk_buffs */
571 for (i = 0; i < tx_ring->count; i++)
572 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
574 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
575 memset(tx_ring->tx_bi, 0, bi_size);
577 /* Zero out the descriptor ring */
578 memset(tx_ring->desc, 0, tx_ring->size);
580 tx_ring->next_to_use = 0;
581 tx_ring->next_to_clean = 0;
583 if (!tx_ring->netdev)
586 /* cleanup Tx queue statistics */
587 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
588 tx_ring->queue_index));
592 * i40e_free_tx_resources - Free Tx resources per queue
593 * @tx_ring: Tx descriptor ring for a specific queue
595 * Free all transmit software resources
597 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
599 i40e_clean_tx_ring(tx_ring);
600 kfree(tx_ring->tx_bi);
601 tx_ring->tx_bi = NULL;
604 dma_free_coherent(tx_ring->dev, tx_ring->size,
605 tx_ring->desc, tx_ring->dma);
606 tx_ring->desc = NULL;
611 * i40e_get_tx_pending - how many tx descriptors not processed
612 * @tx_ring: the ring of descriptors
613 * @in_sw: is tx_pending being checked in SW or HW
615 * Since there is no access to the ring head register
616 * in XL710, we need to use our local copies
618 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
623 head = i40e_get_head(ring);
625 head = ring->next_to_clean;
626 tail = readl(ring->tail);
629 return (head < tail) ?
630 tail - head : (tail + ring->count - head);
635 #define WB_STRIDE 0x3
638 * i40e_clean_tx_irq - Reclaim resources after transmit completes
639 * @vsi: the VSI we care about
640 * @tx_ring: Tx ring to clean
641 * @napi_budget: Used to determine if we are in netpoll
643 * Returns true if there's any budget left (e.g. the clean is finished)
645 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
646 struct i40e_ring *tx_ring, int napi_budget)
648 u16 i = tx_ring->next_to_clean;
649 struct i40e_tx_buffer *tx_buf;
650 struct i40e_tx_desc *tx_head;
651 struct i40e_tx_desc *tx_desc;
652 unsigned int total_bytes = 0, total_packets = 0;
653 unsigned int budget = vsi->work_limit;
655 tx_buf = &tx_ring->tx_bi[i];
656 tx_desc = I40E_TX_DESC(tx_ring, i);
659 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
662 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
664 /* if next_to_watch is not set then there is no work pending */
668 /* prevent any other reads prior to eop_desc */
669 read_barrier_depends();
671 /* we have caught up to head, no work left to do */
672 if (tx_head == tx_desc)
675 /* clear next_to_watch to prevent false hangs */
676 tx_buf->next_to_watch = NULL;
678 /* update the statistics for this packet */
679 total_bytes += tx_buf->bytecount;
680 total_packets += tx_buf->gso_segs;
683 napi_consume_skb(tx_buf->skb, napi_budget);
685 /* unmap skb header data */
686 dma_unmap_single(tx_ring->dev,
687 dma_unmap_addr(tx_buf, dma),
688 dma_unmap_len(tx_buf, len),
691 /* clear tx_buffer data */
693 dma_unmap_len_set(tx_buf, len, 0);
695 /* unmap remaining buffers */
696 while (tx_desc != eop_desc) {
703 tx_buf = tx_ring->tx_bi;
704 tx_desc = I40E_TX_DESC(tx_ring, 0);
707 /* unmap any remaining paged data */
708 if (dma_unmap_len(tx_buf, len)) {
709 dma_unmap_page(tx_ring->dev,
710 dma_unmap_addr(tx_buf, dma),
711 dma_unmap_len(tx_buf, len),
713 dma_unmap_len_set(tx_buf, len, 0);
717 /* move us one more past the eop_desc for start of next pkt */
723 tx_buf = tx_ring->tx_bi;
724 tx_desc = I40E_TX_DESC(tx_ring, 0);
729 /* update budget accounting */
731 } while (likely(budget));
734 tx_ring->next_to_clean = i;
735 u64_stats_update_begin(&tx_ring->syncp);
736 tx_ring->stats.bytes += total_bytes;
737 tx_ring->stats.packets += total_packets;
738 u64_stats_update_end(&tx_ring->syncp);
739 tx_ring->q_vector->tx.total_bytes += total_bytes;
740 tx_ring->q_vector->tx.total_packets += total_packets;
742 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
745 /* check to see if there are < 4 descriptors
746 * waiting to be written back, then kick the hardware to force
747 * them to be written back in case we stay in NAPI.
748 * In this mode on X722 we do not enable Interrupt.
750 j = i40e_get_tx_pending(tx_ring, false);
753 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
754 !test_bit(__I40E_DOWN, &vsi->state) &&
755 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
756 tx_ring->arm_wb = true;
759 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
760 tx_ring->queue_index),
761 total_packets, total_bytes);
763 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
764 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
765 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
766 /* Make sure that anybody stopping the queue after this
767 * sees the new next_to_clean.
770 if (__netif_subqueue_stopped(tx_ring->netdev,
771 tx_ring->queue_index) &&
772 !test_bit(__I40E_DOWN, &vsi->state)) {
773 netif_wake_subqueue(tx_ring->netdev,
774 tx_ring->queue_index);
775 ++tx_ring->tx_stats.restart_queue;
783 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
784 * @vsi: the VSI we care about
785 * @q_vector: the vector on which to enable writeback
788 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
789 struct i40e_q_vector *q_vector)
791 u16 flags = q_vector->tx.ring[0].flags;
794 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
797 if (q_vector->arm_wb_state)
800 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
801 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
802 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
805 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
808 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
809 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
811 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
813 q_vector->arm_wb_state = true;
817 * i40e_force_wb - Issue SW Interrupt so HW does a wb
818 * @vsi: the VSI we care about
819 * @q_vector: the vector on which to force writeback
822 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
824 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
825 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
826 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
827 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
828 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
829 /* allow 00 to be written to the index */
832 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
833 vsi->base_vector - 1), val);
835 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
836 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
837 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
838 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
839 /* allow 00 to be written to the index */
841 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
846 * i40e_set_new_dynamic_itr - Find new ITR level
847 * @rc: structure containing ring performance data
849 * Returns true if ITR changed, false if not
851 * Stores a new ITR value based on packets and byte counts during
852 * the last interrupt. The advantage of per interrupt computation
853 * is faster updates and more accurate ITR for the current traffic
854 * pattern. Constants in this function were computed based on
855 * theoretical maximum wire speed and thresholds were set based on
856 * testing data as well as attempting to minimize response time
857 * while increasing bulk throughput.
859 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
861 enum i40e_latency_range new_latency_range = rc->latency_range;
862 struct i40e_q_vector *qv = rc->ring->q_vector;
863 u32 new_itr = rc->itr;
867 if (rc->total_packets == 0 || !rc->itr)
870 /* simple throttlerate management
871 * 0-10MB/s lowest (50000 ints/s)
872 * 10-20MB/s low (20000 ints/s)
873 * 20-1249MB/s bulk (18000 ints/s)
874 * > 40000 Rx packets per second (8000 ints/s)
876 * The math works out because the divisor is in 10^(-6) which
877 * turns the bytes/us input value into MB/s values, but
878 * make sure to use usecs, as the register values written
879 * are in 2 usec increments in the ITR registers, and make sure
880 * to use the smoothed values that the countdown timer gives us.
882 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
883 bytes_per_int = rc->total_bytes / usecs;
885 switch (new_latency_range) {
886 case I40E_LOWEST_LATENCY:
887 if (bytes_per_int > 10)
888 new_latency_range = I40E_LOW_LATENCY;
890 case I40E_LOW_LATENCY:
891 if (bytes_per_int > 20)
892 new_latency_range = I40E_BULK_LATENCY;
893 else if (bytes_per_int <= 10)
894 new_latency_range = I40E_LOWEST_LATENCY;
896 case I40E_BULK_LATENCY:
897 case I40E_ULTRA_LATENCY:
899 if (bytes_per_int <= 20)
900 new_latency_range = I40E_LOW_LATENCY;
904 /* this is to adjust RX more aggressively when streaming small
905 * packets. The value of 40000 was picked as it is just beyond
906 * what the hardware can receive per second if in low latency
909 #define RX_ULTRA_PACKET_RATE 40000
911 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
913 new_latency_range = I40E_ULTRA_LATENCY;
915 rc->latency_range = new_latency_range;
917 switch (new_latency_range) {
918 case I40E_LOWEST_LATENCY:
919 new_itr = I40E_ITR_50K;
921 case I40E_LOW_LATENCY:
922 new_itr = I40E_ITR_20K;
924 case I40E_BULK_LATENCY:
925 new_itr = I40E_ITR_18K;
927 case I40E_ULTRA_LATENCY:
928 new_itr = I40E_ITR_8K;
935 rc->total_packets = 0;
937 if (new_itr != rc->itr) {
946 * i40e_clean_programming_status - clean the programming status descriptor
947 * @rx_ring: the rx ring that has this descriptor
948 * @rx_desc: the rx descriptor written back by HW
950 * Flow director should handle FD_FILTER_STATUS to check its filter programming
951 * status being successful or not and take actions accordingly. FCoE should
952 * handle its context/filter programming/invalidation status and take actions.
955 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
956 union i40e_rx_desc *rx_desc)
961 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
962 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
963 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
965 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
966 i40e_fd_handle_status(rx_ring, rx_desc, id);
968 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
969 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
970 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
975 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
976 * @tx_ring: the tx ring to set up
978 * Return 0 on success, negative on error
980 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
982 struct device *dev = tx_ring->dev;
988 /* warn if we are about to overwrite the pointer */
989 WARN_ON(tx_ring->tx_bi);
990 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
991 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
995 /* round up to nearest 4K */
996 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
997 /* add u32 for head writeback, align after this takes care of
998 * guaranteeing this is at least one cache line in size
1000 tx_ring->size += sizeof(u32);
1001 tx_ring->size = ALIGN(tx_ring->size, 4096);
1002 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1003 &tx_ring->dma, GFP_KERNEL);
1004 if (!tx_ring->desc) {
1005 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1010 tx_ring->next_to_use = 0;
1011 tx_ring->next_to_clean = 0;
1015 kfree(tx_ring->tx_bi);
1016 tx_ring->tx_bi = NULL;
1021 * i40e_clean_rx_ring - Free Rx buffers
1022 * @rx_ring: ring to be cleaned
1024 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1026 struct device *dev = rx_ring->dev;
1027 struct i40e_rx_buffer *rx_bi;
1028 unsigned long bi_size;
1031 /* ring already cleared, nothing to do */
1032 if (!rx_ring->rx_bi)
1035 if (ring_is_ps_enabled(rx_ring)) {
1036 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1038 rx_bi = &rx_ring->rx_bi[0];
1039 if (rx_bi->hdr_buf) {
1040 dma_free_coherent(dev,
1044 for (i = 0; i < rx_ring->count; i++) {
1045 rx_bi = &rx_ring->rx_bi[i];
1047 rx_bi->hdr_buf = NULL;
1051 /* Free all the Rx ring sk_buffs */
1052 for (i = 0; i < rx_ring->count; i++) {
1053 rx_bi = &rx_ring->rx_bi[i];
1055 dma_unmap_single(dev,
1057 rx_ring->rx_buf_len,
1062 dev_kfree_skb(rx_bi->skb);
1066 if (rx_bi->page_dma) {
1071 rx_bi->page_dma = 0;
1073 __free_page(rx_bi->page);
1075 rx_bi->page_offset = 0;
1079 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1080 memset(rx_ring->rx_bi, 0, bi_size);
1082 /* Zero out the descriptor ring */
1083 memset(rx_ring->desc, 0, rx_ring->size);
1085 rx_ring->next_to_clean = 0;
1086 rx_ring->next_to_use = 0;
1090 * i40e_free_rx_resources - Free Rx resources
1091 * @rx_ring: ring to clean the resources from
1093 * Free all receive software resources
1095 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1097 i40e_clean_rx_ring(rx_ring);
1098 kfree(rx_ring->rx_bi);
1099 rx_ring->rx_bi = NULL;
1101 if (rx_ring->desc) {
1102 dma_free_coherent(rx_ring->dev, rx_ring->size,
1103 rx_ring->desc, rx_ring->dma);
1104 rx_ring->desc = NULL;
1109 * i40e_alloc_rx_headers - allocate rx header buffers
1110 * @rx_ring: ring to alloc buffers
1112 * Allocate rx header buffers for the entire ring. As these are static,
1113 * this is only called when setting up a new ring.
1115 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1117 struct device *dev = rx_ring->dev;
1118 struct i40e_rx_buffer *rx_bi;
1124 if (rx_ring->rx_bi[0].hdr_buf)
1126 /* Make sure the buffers don't cross cache line boundaries. */
1127 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1128 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1132 for (i = 0; i < rx_ring->count; i++) {
1133 rx_bi = &rx_ring->rx_bi[i];
1134 rx_bi->dma = dma + (i * buf_size);
1135 rx_bi->hdr_buf = buffer + (i * buf_size);
1140 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1141 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1143 * Returns 0 on success, negative on failure
1145 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1147 struct device *dev = rx_ring->dev;
1150 /* warn if we are about to overwrite the pointer */
1151 WARN_ON(rx_ring->rx_bi);
1152 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1153 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1154 if (!rx_ring->rx_bi)
1157 u64_stats_init(&rx_ring->syncp);
1159 /* Round up to nearest 4K */
1160 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1161 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1162 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1163 rx_ring->size = ALIGN(rx_ring->size, 4096);
1164 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1165 &rx_ring->dma, GFP_KERNEL);
1167 if (!rx_ring->desc) {
1168 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1173 rx_ring->next_to_clean = 0;
1174 rx_ring->next_to_use = 0;
1178 kfree(rx_ring->rx_bi);
1179 rx_ring->rx_bi = NULL;
1184 * i40e_release_rx_desc - Store the new tail and head values
1185 * @rx_ring: ring to bump
1186 * @val: new head index
1188 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1190 rx_ring->next_to_use = val;
1191 /* Force memory writes to complete before letting h/w
1192 * know there are new descriptors to fetch. (Only
1193 * applicable for weak-ordered memory model archs,
1197 writel(val, rx_ring->tail);
1201 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1202 * @rx_ring: ring to place buffers on
1203 * @cleaned_count: number of buffers to replace
1205 * Returns true if any errors on allocation
1207 bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1209 u16 i = rx_ring->next_to_use;
1210 union i40e_rx_desc *rx_desc;
1211 struct i40e_rx_buffer *bi;
1212 const int current_node = numa_node_id();
1214 /* do nothing if no valid netdev defined */
1215 if (!rx_ring->netdev || !cleaned_count)
1218 while (cleaned_count--) {
1219 rx_desc = I40E_RX_DESC(rx_ring, i);
1220 bi = &rx_ring->rx_bi[i];
1222 if (bi->skb) /* desc is in use */
1225 /* If we've been moved to a different NUMA node, release the
1226 * page so we can get a new one on the current node.
1228 if (bi->page && page_to_nid(bi->page) != current_node) {
1229 dma_unmap_page(rx_ring->dev,
1233 __free_page(bi->page);
1236 rx_ring->rx_stats.realloc_count++;
1237 } else if (bi->page) {
1238 rx_ring->rx_stats.page_reuse_count++;
1242 bi->page = alloc_page(GFP_ATOMIC);
1244 rx_ring->rx_stats.alloc_page_failed++;
1247 bi->page_dma = dma_map_page(rx_ring->dev,
1252 if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
1253 rx_ring->rx_stats.alloc_page_failed++;
1254 __free_page(bi->page);
1257 bi->page_offset = 0;
1260 bi->page_offset = 0;
1263 /* Refresh the desc even if buffer_addrs didn't change
1264 * because each write-back erases this info.
1266 rx_desc->read.pkt_addr =
1267 cpu_to_le64(bi->page_dma + bi->page_offset);
1268 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1270 if (i == rx_ring->count)
1274 if (rx_ring->next_to_use != i)
1275 i40e_release_rx_desc(rx_ring, i);
1280 if (rx_ring->next_to_use != i)
1281 i40e_release_rx_desc(rx_ring, i);
1283 /* make sure to come back via polling to try again after
1284 * allocation failure
1290 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1291 * @rx_ring: ring to place buffers on
1292 * @cleaned_count: number of buffers to replace
1294 * Returns true if any errors on allocation
1296 bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1298 u16 i = rx_ring->next_to_use;
1299 union i40e_rx_desc *rx_desc;
1300 struct i40e_rx_buffer *bi;
1301 struct sk_buff *skb;
1303 /* do nothing if no valid netdev defined */
1304 if (!rx_ring->netdev || !cleaned_count)
1307 while (cleaned_count--) {
1308 rx_desc = I40E_RX_DESC(rx_ring, i);
1309 bi = &rx_ring->rx_bi[i];
1313 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
1314 rx_ring->rx_buf_len,
1318 rx_ring->rx_stats.alloc_buff_failed++;
1321 /* initialize queue mapping */
1322 skb_record_rx_queue(skb, rx_ring->queue_index);
1327 bi->dma = dma_map_single(rx_ring->dev,
1329 rx_ring->rx_buf_len,
1331 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1332 rx_ring->rx_stats.alloc_buff_failed++;
1334 dev_kfree_skb(bi->skb);
1340 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1341 rx_desc->read.hdr_addr = 0;
1343 if (i == rx_ring->count)
1347 if (rx_ring->next_to_use != i)
1348 i40e_release_rx_desc(rx_ring, i);
1353 if (rx_ring->next_to_use != i)
1354 i40e_release_rx_desc(rx_ring, i);
1356 /* make sure to come back via polling to try again after
1357 * allocation failure
1363 * i40e_receive_skb - Send a completed packet up the stack
1364 * @rx_ring: rx ring in play
1365 * @skb: packet to send up
1366 * @vlan_tag: vlan tag for packet
1368 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1369 struct sk_buff *skb, u16 vlan_tag)
1371 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1373 if (vlan_tag & VLAN_VID_MASK)
1374 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1376 napi_gro_receive(&q_vector->napi, skb);
1380 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1381 * @vsi: the VSI we care about
1382 * @skb: skb currently being received and modified
1383 * @rx_status: status value of last descriptor in packet
1384 * @rx_error: error value of last descriptor in packet
1385 * @rx_ptype: ptype value of last descriptor in packet
1387 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1388 struct sk_buff *skb,
1393 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1394 bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
1396 skb->ip_summed = CHECKSUM_NONE;
1398 /* Rx csum enabled and ip headers found? */
1399 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1402 /* did the hardware decode the packet and checksum? */
1403 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1406 /* both known and outer_ip must be set for the below code to work */
1407 if (!(decoded.known && decoded.outer_ip))
1410 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1411 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1412 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1413 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1416 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1417 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1420 /* likely incorrect csum if alternate IP extension headers found */
1422 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1423 /* don't increment checksum err here, non-fatal err */
1426 /* there was some L4 error, count error and punt packet to the stack */
1427 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1430 /* handle packets that were not able to be checksummed due
1431 * to arrival speed, in this case the stack can compute
1434 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1437 /* The hardware supported by this driver does not validate outer
1438 * checksums for tunneled VXLAN or GENEVE frames. I don't agree
1439 * with it but the specification states that you "MAY validate", it
1440 * doesn't make it a hard requirement so if we have validated the
1441 * inner checksum report CHECKSUM_UNNECESSARY.
1444 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1445 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1446 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1447 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1449 skb->ip_summed = CHECKSUM_UNNECESSARY;
1450 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1455 vsi->back->hw_csum_rx_error++;
1459 * i40e_ptype_to_htype - get a hash type
1460 * @ptype: the ptype value from the descriptor
1462 * Returns a hash type to be used by skb_set_hash
1464 static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
1466 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1469 return PKT_HASH_TYPE_NONE;
1471 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1472 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1473 return PKT_HASH_TYPE_L4;
1474 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1475 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1476 return PKT_HASH_TYPE_L3;
1478 return PKT_HASH_TYPE_L2;
1482 * i40e_rx_hash - set the hash value in the skb
1483 * @ring: descriptor ring
1484 * @rx_desc: specific descriptor
1486 static inline void i40e_rx_hash(struct i40e_ring *ring,
1487 union i40e_rx_desc *rx_desc,
1488 struct sk_buff *skb,
1492 const __le64 rss_mask =
1493 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1494 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1496 if (ring->netdev->features & NETIF_F_RXHASH)
1499 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1500 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1501 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1506 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1507 * @rx_ring: rx ring to clean
1508 * @budget: how many cleans we're allowed
1510 * Returns true if there's any budget left (e.g. the clean is finished)
1512 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
1514 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1515 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1516 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1517 struct i40e_vsi *vsi = rx_ring->vsi;
1518 u16 i = rx_ring->next_to_clean;
1519 union i40e_rx_desc *rx_desc;
1520 u32 rx_error, rx_status;
1521 bool failure = false;
1530 struct i40e_rx_buffer *rx_bi;
1531 struct sk_buff *skb;
1533 /* return some buffers to hardware, one at a time is too slow */
1534 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1535 failure = failure ||
1536 i40e_alloc_rx_buffers_ps(rx_ring,
1541 i = rx_ring->next_to_clean;
1542 rx_desc = I40E_RX_DESC(rx_ring, i);
1543 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1544 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1545 I40E_RXD_QW1_STATUS_SHIFT;
1547 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1550 /* This memory barrier is needed to keep us from reading
1551 * any other fields out of the rx_desc until we know the
1555 /* sync header buffer for reading */
1556 dma_sync_single_range_for_cpu(rx_ring->dev,
1557 rx_ring->rx_bi[0].dma,
1558 i * rx_ring->rx_hdr_len,
1559 rx_ring->rx_hdr_len,
1561 if (i40e_rx_is_programming_status(qword)) {
1562 i40e_clean_programming_status(rx_ring, rx_desc);
1563 I40E_RX_INCREMENT(rx_ring, i);
1566 rx_bi = &rx_ring->rx_bi[i];
1569 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
1570 rx_ring->rx_hdr_len,
1574 rx_ring->rx_stats.alloc_buff_failed++;
1579 /* initialize queue mapping */
1580 skb_record_rx_queue(skb, rx_ring->queue_index);
1581 /* we are reusing so sync this buffer for CPU use */
1582 dma_sync_single_range_for_cpu(rx_ring->dev,
1583 rx_ring->rx_bi[0].dma,
1584 i * rx_ring->rx_hdr_len,
1585 rx_ring->rx_hdr_len,
1588 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1589 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1590 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1591 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1592 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1593 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1595 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1596 I40E_RXD_QW1_ERROR_SHIFT;
1597 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1598 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1600 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1601 I40E_RXD_QW1_PTYPE_SHIFT;
1602 /* sync half-page for reading */
1603 dma_sync_single_range_for_cpu(rx_ring->dev,
1608 prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
1612 if (rx_hbo || rx_sph) {
1616 len = I40E_RX_HDR_SIZE;
1618 len = rx_header_len;
1619 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1620 } else if (skb->len == 0) {
1622 unsigned char *va = page_address(rx_bi->page) +
1625 len = min(rx_packet_len, rx_ring->rx_hdr_len);
1626 memcpy(__skb_put(skb, len), va, len);
1628 rx_packet_len -= len;
1630 /* Get the rest of the data if this was a header split */
1631 if (rx_packet_len) {
1632 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1634 rx_bi->page_offset + copysize,
1635 rx_packet_len, I40E_RXBUFFER_2048);
1637 /* If the page count is more than 2, then both halves
1638 * of the page are used and we need to free it. Do it
1639 * here instead of in the alloc code. Otherwise one
1640 * of the half-pages might be released between now and
1641 * then, and we wouldn't know which one to use.
1642 * Don't call get_page and free_page since those are
1643 * both expensive atomic operations that just change
1644 * the refcount in opposite directions. Just give the
1645 * page to the stack; he can have our refcount.
1647 if (page_count(rx_bi->page) > 2) {
1648 dma_unmap_page(rx_ring->dev,
1653 rx_bi->page_dma = 0;
1654 rx_ring->rx_stats.realloc_count++;
1656 get_page(rx_bi->page);
1657 /* switch to the other half-page here; the
1658 * allocation code programs the right addr
1659 * into HW. If we haven't used this half-page,
1660 * the address won't be changed, and HW can
1661 * just use it next time through.
1663 rx_bi->page_offset ^= PAGE_SIZE / 2;
1667 I40E_RX_INCREMENT(rx_ring, i);
1670 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1671 struct i40e_rx_buffer *next_buffer;
1673 next_buffer = &rx_ring->rx_bi[i];
1674 next_buffer->skb = skb;
1675 rx_ring->rx_stats.non_eop_descs++;
1679 /* ERR_MASK will only have valid bits if EOP set */
1680 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1681 dev_kfree_skb_any(skb);
1685 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1687 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1688 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1689 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1690 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1691 rx_ring->last_rx_timestamp = jiffies;
1694 /* probably a little skewed due to removing CRC */
1695 total_rx_bytes += skb->len;
1698 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1700 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1702 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1703 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1706 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1707 dev_kfree_skb_any(skb);
1711 i40e_receive_skb(rx_ring, skb, vlan_tag);
1713 rx_desc->wb.qword1.status_error_len = 0;
1715 } while (likely(total_rx_packets < budget));
1717 u64_stats_update_begin(&rx_ring->syncp);
1718 rx_ring->stats.packets += total_rx_packets;
1719 rx_ring->stats.bytes += total_rx_bytes;
1720 u64_stats_update_end(&rx_ring->syncp);
1721 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1722 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1724 return failure ? budget : total_rx_packets;
1728 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1729 * @rx_ring: rx ring to clean
1730 * @budget: how many cleans we're allowed
1732 * Returns number of packets cleaned
1734 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1736 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1737 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1738 struct i40e_vsi *vsi = rx_ring->vsi;
1739 union i40e_rx_desc *rx_desc;
1740 u32 rx_error, rx_status;
1742 bool failure = false;
1748 struct i40e_rx_buffer *rx_bi;
1749 struct sk_buff *skb;
1751 /* return some buffers to hardware, one at a time is too slow */
1752 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1753 failure = failure ||
1754 i40e_alloc_rx_buffers_1buf(rx_ring,
1759 i = rx_ring->next_to_clean;
1760 rx_desc = I40E_RX_DESC(rx_ring, i);
1761 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1762 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1763 I40E_RXD_QW1_STATUS_SHIFT;
1765 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1768 /* This memory barrier is needed to keep us from reading
1769 * any other fields out of the rx_desc until we know the
1774 if (i40e_rx_is_programming_status(qword)) {
1775 i40e_clean_programming_status(rx_ring, rx_desc);
1776 I40E_RX_INCREMENT(rx_ring, i);
1779 rx_bi = &rx_ring->rx_bi[i];
1781 prefetch(skb->data);
1783 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1784 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1786 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1787 I40E_RXD_QW1_ERROR_SHIFT;
1788 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1790 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1791 I40E_RXD_QW1_PTYPE_SHIFT;
1795 /* Get the header and possibly the whole packet
1796 * If this is an skb from previous receive dma will be 0
1798 skb_put(skb, rx_packet_len);
1799 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1803 I40E_RX_INCREMENT(rx_ring, i);
1806 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1807 rx_ring->rx_stats.non_eop_descs++;
1811 /* ERR_MASK will only have valid bits if EOP set */
1812 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1813 dev_kfree_skb_any(skb);
1817 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1818 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1819 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1820 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1821 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1822 rx_ring->last_rx_timestamp = jiffies;
1825 /* probably a little skewed due to removing CRC */
1826 total_rx_bytes += skb->len;
1829 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1831 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1833 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1834 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1837 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1838 dev_kfree_skb_any(skb);
1842 i40e_receive_skb(rx_ring, skb, vlan_tag);
1844 rx_desc->wb.qword1.status_error_len = 0;
1845 } while (likely(total_rx_packets < budget));
1847 u64_stats_update_begin(&rx_ring->syncp);
1848 rx_ring->stats.packets += total_rx_packets;
1849 rx_ring->stats.bytes += total_rx_bytes;
1850 u64_stats_update_end(&rx_ring->syncp);
1851 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1852 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1854 return failure ? budget : total_rx_packets;
1857 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1861 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1862 /* Don't clear PBA because that can cause lost interrupts that
1863 * came in while we were cleaning/polling
1865 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1866 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1871 /* a small macro to shorten up some long lines */
1872 #define INTREG I40E_PFINT_DYN_CTLN
1875 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1876 * @vsi: the VSI we care about
1877 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1880 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1881 struct i40e_q_vector *q_vector)
1883 struct i40e_hw *hw = &vsi->back->hw;
1884 bool rx = false, tx = false;
1887 int idx = q_vector->v_idx;
1889 vector = (q_vector->v_idx + vsi->base_vector);
1891 /* avoid dynamic calculation if in countdown mode OR if
1892 * all dynamic is disabled
1894 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1896 if (q_vector->itr_countdown > 0 ||
1897 (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
1898 !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
1902 if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
1903 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1904 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1907 if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
1908 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1909 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1913 /* get the higher of the two ITR adjustments and
1914 * use the same value for both ITR registers
1915 * when in adaptive mode (Rx and/or Tx)
1917 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1919 q_vector->tx.itr = q_vector->rx.itr = itr;
1920 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1922 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1926 /* only need to enable the interrupt once, but need
1927 * to possibly update both ITR values
1930 /* set the INTENA_MSK_MASK so that this first write
1931 * won't actually enable the interrupt, instead just
1932 * updating the ITR (it's bit 31 PF and VF)
1935 /* don't check _DOWN because interrupt isn't being enabled */
1936 wr32(hw, INTREG(vector - 1), rxval);
1940 if (!test_bit(__I40E_DOWN, &vsi->state))
1941 wr32(hw, INTREG(vector - 1), txval);
1943 if (q_vector->itr_countdown)
1944 q_vector->itr_countdown--;
1946 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1950 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1951 * @napi: napi struct with our devices info in it
1952 * @budget: amount of work driver is allowed to do this pass, in packets
1954 * This function will clean all queues associated with a q_vector.
1956 * Returns the amount of work done
1958 int i40e_napi_poll(struct napi_struct *napi, int budget)
1960 struct i40e_q_vector *q_vector =
1961 container_of(napi, struct i40e_q_vector, napi);
1962 struct i40e_vsi *vsi = q_vector->vsi;
1963 struct i40e_ring *ring;
1964 bool clean_complete = true;
1965 bool arm_wb = false;
1966 int budget_per_ring;
1969 if (test_bit(__I40E_DOWN, &vsi->state)) {
1970 napi_complete(napi);
1974 /* Clear hung_detected bit */
1975 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
1976 /* Since the actual Tx work is minimal, we can give the Tx a larger
1977 * budget and be more aggressive about cleaning up the Tx descriptors.
1979 i40e_for_each_ring(ring, q_vector->tx) {
1980 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
1981 clean_complete = false;
1984 arm_wb |= ring->arm_wb;
1985 ring->arm_wb = false;
1988 /* Handle case where we are called by netpoll with a budget of 0 */
1992 /* We attempt to distribute budget to each Rx queue fairly, but don't
1993 * allow the budget to go below 1 because that would exit polling early.
1995 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1997 i40e_for_each_ring(ring, q_vector->rx) {
2000 if (ring_is_ps_enabled(ring))
2001 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
2003 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
2005 work_done += cleaned;
2006 /* if we clean as many as budgeted, we must not be done */
2007 if (cleaned >= budget_per_ring)
2008 clean_complete = false;
2011 /* If work not completed, return budget and polling will return */
2012 if (!clean_complete) {
2015 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2016 i40e_enable_wb_on_itr(vsi, q_vector);
2021 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2022 q_vector->arm_wb_state = false;
2024 /* Work is done so exit the polling mode and re-enable the interrupt */
2025 napi_complete_done(napi, work_done);
2026 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
2027 i40e_update_enable_itr(vsi, q_vector);
2028 } else { /* Legacy mode */
2029 i40e_irq_dynamic_enable_icr0(vsi->back, false);
2035 * i40e_atr - Add a Flow Director ATR filter
2036 * @tx_ring: ring to add programming descriptor to
2038 * @tx_flags: send tx flags
2040 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2043 struct i40e_filter_program_desc *fdir_desc;
2044 struct i40e_pf *pf = tx_ring->vsi->back;
2046 unsigned char *network;
2048 struct ipv6hdr *ipv6;
2052 u32 flex_ptype, dtype_cmd;
2056 /* make sure ATR is enabled */
2057 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2060 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2063 /* if sampling is disabled do nothing */
2064 if (!tx_ring->atr_sample_rate)
2067 /* Currently only IPv4/IPv6 with TCP is supported */
2068 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2071 /* snag network header to get L4 type and address */
2072 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2073 skb_inner_network_header(skb) : skb_network_header(skb);
2075 /* Note: tx_flags gets modified to reflect inner protocols in
2076 * tx_enable_csum function if encap is enabled.
2078 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2079 /* access ihl as u8 to avoid unaligned access on ia64 */
2080 hlen = (hdr.network[0] & 0x0F) << 2;
2081 l4_proto = hdr.ipv4->protocol;
2083 hlen = hdr.network - skb->data;
2084 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2085 hlen -= hdr.network - skb->data;
2088 if (l4_proto != IPPROTO_TCP)
2091 th = (struct tcphdr *)(hdr.network + hlen);
2093 /* Due to lack of space, no more new filters can be programmed */
2094 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2096 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2097 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
2098 /* HW ATR eviction will take care of removing filters on FIN
2101 if (th->fin || th->rst)
2105 tx_ring->atr_count++;
2107 /* sample on all syn/fin/rst packets or once every atr sample rate */
2111 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2114 tx_ring->atr_count = 0;
2116 /* grab the next descriptor */
2117 i = tx_ring->next_to_use;
2118 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2121 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2123 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2124 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2125 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2126 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2127 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2128 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2129 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2131 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2133 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2135 dtype_cmd |= (th->fin || th->rst) ?
2136 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2137 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2138 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2139 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2141 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2142 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2144 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2145 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2147 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2148 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2150 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2151 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2152 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2155 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2156 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2157 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2159 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2160 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
2161 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2163 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2164 fdir_desc->rsvd = cpu_to_le32(0);
2165 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2166 fdir_desc->fd_id = cpu_to_le32(0);
2170 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2172 * @tx_ring: ring to send buffer on
2173 * @flags: the tx flags to be set
2175 * Checks the skb and set up correspondingly several generic transmit flags
2176 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2178 * Returns error code indicate the frame should be dropped upon error and the
2179 * otherwise returns 0 to indicate the flags has been set properly.
2182 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2183 struct i40e_ring *tx_ring,
2186 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2187 struct i40e_ring *tx_ring,
2191 __be16 protocol = skb->protocol;
2194 if (protocol == htons(ETH_P_8021Q) &&
2195 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2196 /* When HW VLAN acceleration is turned off by the user the
2197 * stack sets the protocol to 8021q so that the driver
2198 * can take any steps required to support the SW only
2199 * VLAN handling. In our case the driver doesn't need
2200 * to take any further steps so just set the protocol
2201 * to the encapsulated ethertype.
2203 skb->protocol = vlan_get_protocol(skb);
2207 /* if we have a HW VLAN tag being added, default to the HW one */
2208 if (skb_vlan_tag_present(skb)) {
2209 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2210 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2211 /* else if it is a SW VLAN, check the next protocol and store the tag */
2212 } else if (protocol == htons(ETH_P_8021Q)) {
2213 struct vlan_hdr *vhdr, _vhdr;
2215 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2219 protocol = vhdr->h_vlan_encapsulated_proto;
2220 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2221 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2224 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2227 /* Insert 802.1p priority into VLAN header */
2228 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2229 (skb->priority != TC_PRIO_CONTROL)) {
2230 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2231 tx_flags |= (skb->priority & 0x7) <<
2232 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2233 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2234 struct vlan_ethhdr *vhdr;
2237 rc = skb_cow_head(skb, 0);
2240 vhdr = (struct vlan_ethhdr *)skb->data;
2241 vhdr->h_vlan_TCI = htons(tx_flags >>
2242 I40E_TX_FLAGS_VLAN_SHIFT);
2244 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2254 * i40e_tso - set up the tso context descriptor
2255 * @tx_ring: ptr to the ring to send
2256 * @skb: ptr to the skb we're sending
2257 * @hdr_len: ptr to the size of the packet header
2258 * @cd_type_cmd_tso_mss: Quad Word 1
2260 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2262 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2263 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2265 u64 cd_cmd, cd_tso_len, cd_mss;
2276 u32 paylen, l4_offset;
2279 if (skb->ip_summed != CHECKSUM_PARTIAL)
2282 if (!skb_is_gso(skb))
2285 err = skb_cow_head(skb, 0);
2289 ip.hdr = skb_network_header(skb);
2290 l4.hdr = skb_transport_header(skb);
2292 /* initialize outer IP header fields */
2293 if (ip.v4->version == 4) {
2297 ip.v6->payload_len = 0;
2300 if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
2301 SKB_GSO_UDP_TUNNEL_CSUM)) {
2302 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
2303 /* determine offset of outer transport header */
2304 l4_offset = l4.hdr - skb->data;
2306 /* remove payload length from outer checksum */
2307 paylen = skb->len - l4_offset;
2308 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
2311 /* reset pointers to inner headers */
2312 ip.hdr = skb_inner_network_header(skb);
2313 l4.hdr = skb_inner_transport_header(skb);
2315 /* initialize inner IP header fields */
2316 if (ip.v4->version == 4) {
2320 ip.v6->payload_len = 0;
2324 /* determine offset of inner transport header */
2325 l4_offset = l4.hdr - skb->data;
2327 /* remove payload length from inner checksum */
2328 paylen = skb->len - l4_offset;
2329 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
2331 /* compute length of segmentation header */
2332 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2334 /* find the field values */
2335 cd_cmd = I40E_TX_CTX_DESC_TSO;
2336 cd_tso_len = skb->len - *hdr_len;
2337 cd_mss = skb_shinfo(skb)->gso_size;
2338 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2339 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2340 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2345 * i40e_tsyn - set up the tsyn context descriptor
2346 * @tx_ring: ptr to the ring to send
2347 * @skb: ptr to the skb we're sending
2348 * @tx_flags: the collected send information
2349 * @cd_type_cmd_tso_mss: Quad Word 1
2351 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2353 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2354 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2358 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2361 /* Tx timestamps cannot be sampled when doing TSO */
2362 if (tx_flags & I40E_TX_FLAGS_TSO)
2365 /* only timestamp the outbound packet if the user has requested it and
2366 * we are not already transmitting a packet to be timestamped
2368 pf = i40e_netdev_to_pf(tx_ring->netdev);
2369 if (!(pf->flags & I40E_FLAG_PTP))
2373 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2374 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2375 pf->ptp_tx_skb = skb_get(skb);
2380 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2381 I40E_TXD_CTX_QW1_CMD_SHIFT;
2387 * i40e_tx_enable_csum - Enable Tx checksum offloads
2389 * @tx_flags: pointer to Tx flags currently set
2390 * @td_cmd: Tx descriptor command bits to set
2391 * @td_offset: Tx descriptor header offsets to set
2392 * @tx_ring: Tx descriptor ring
2393 * @cd_tunneling: ptr to context desc bits
2395 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2396 u32 *td_cmd, u32 *td_offset,
2397 struct i40e_ring *tx_ring,
2410 unsigned char *exthdr;
2411 u32 offset, cmd = 0, tunnel = 0;
2415 if (skb->ip_summed != CHECKSUM_PARTIAL)
2418 ip.hdr = skb_network_header(skb);
2419 l4.hdr = skb_transport_header(skb);
2421 /* compute outer L2 header size */
2422 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2424 if (skb->encapsulation) {
2425 /* define outer network header type */
2426 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2427 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2428 I40E_TX_CTX_EXT_IP_IPV4 :
2429 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2431 l4_proto = ip.v4->protocol;
2432 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2433 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2435 exthdr = ip.hdr + sizeof(*ip.v6);
2436 l4_proto = ip.v6->nexthdr;
2437 if (l4.hdr != exthdr)
2438 ipv6_skip_exthdr(skb, exthdr - skb->data,
2439 &l4_proto, &frag_off);
2442 /* compute outer L3 header size */
2443 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2444 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2446 /* switch IP header pointer from outer to inner header */
2447 ip.hdr = skb_inner_network_header(skb);
2449 /* define outer transport */
2452 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2453 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2456 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2457 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2460 if (*tx_flags & I40E_TX_FLAGS_TSO)
2463 skb_checksum_help(skb);
2467 /* compute tunnel header size */
2468 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2469 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2471 /* indicate if we need to offload outer UDP header */
2472 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2473 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2474 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2476 /* record tunnel offload values */
2477 *cd_tunneling |= tunnel;
2479 /* switch L4 header pointer from outer to inner */
2480 l4.hdr = skb_inner_transport_header(skb);
2483 /* reset type as we transition from outer to inner headers */
2484 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2485 if (ip.v4->version == 4)
2486 *tx_flags |= I40E_TX_FLAGS_IPV4;
2487 if (ip.v6->version == 6)
2488 *tx_flags |= I40E_TX_FLAGS_IPV6;
2491 /* Enable IP checksum offloads */
2492 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2493 l4_proto = ip.v4->protocol;
2494 /* the stack computes the IP header already, the only time we
2495 * need the hardware to recompute it is in the case of TSO.
2497 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2498 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2499 I40E_TX_DESC_CMD_IIPT_IPV4;
2500 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2501 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2503 exthdr = ip.hdr + sizeof(*ip.v6);
2504 l4_proto = ip.v6->nexthdr;
2505 if (l4.hdr != exthdr)
2506 ipv6_skip_exthdr(skb, exthdr - skb->data,
2507 &l4_proto, &frag_off);
2510 /* compute inner L3 header size */
2511 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2513 /* Enable L4 checksum offloads */
2516 /* enable checksum offloads */
2517 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2518 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2521 /* enable SCTP checksum offload */
2522 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2523 offset |= (sizeof(struct sctphdr) >> 2) <<
2524 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2527 /* enable UDP checksum offload */
2528 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2529 offset |= (sizeof(struct udphdr) >> 2) <<
2530 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2533 if (*tx_flags & I40E_TX_FLAGS_TSO)
2535 skb_checksum_help(skb);
2540 *td_offset |= offset;
2546 * i40e_create_tx_ctx Build the Tx context descriptor
2547 * @tx_ring: ring to create the descriptor on
2548 * @cd_type_cmd_tso_mss: Quad Word 1
2549 * @cd_tunneling: Quad Word 0 - bits 0-31
2550 * @cd_l2tag2: Quad Word 0 - bits 32-63
2552 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2553 const u64 cd_type_cmd_tso_mss,
2554 const u32 cd_tunneling, const u32 cd_l2tag2)
2556 struct i40e_tx_context_desc *context_desc;
2557 int i = tx_ring->next_to_use;
2559 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2560 !cd_tunneling && !cd_l2tag2)
2563 /* grab the next descriptor */
2564 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2567 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2569 /* cpu_to_le32 and assign to struct fields */
2570 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2571 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2572 context_desc->rsvd = cpu_to_le16(0);
2573 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2577 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2578 * @tx_ring: the ring to be checked
2579 * @size: the size buffer we want to assure is available
2581 * Returns -EBUSY if a stop is needed, else 0
2583 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2585 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2586 /* Memory barrier before checking head and tail */
2589 /* Check again in a case another CPU has just made room available. */
2590 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2593 /* A reprieve! - use start_queue because it doesn't call schedule */
2594 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2595 ++tx_ring->tx_stats.restart_queue;
2600 * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
2603 * Note: Our HW can't scatter-gather more than 8 fragments to build
2604 * a packet on the wire and so we need to figure out the cases where we
2605 * need to linearize the skb.
2607 bool __i40e_chk_linearize(struct sk_buff *skb)
2609 const struct skb_frag_struct *frag, *stale;
2610 int gso_size, nr_frags, sum;
2612 /* check to see if TSO is enabled, if so we may get a repreive */
2613 gso_size = skb_shinfo(skb)->gso_size;
2614 if (unlikely(!gso_size))
2617 /* no need to check if number of frags is less than 8 */
2618 nr_frags = skb_shinfo(skb)->nr_frags;
2619 if (nr_frags < I40E_MAX_BUFFER_TXD)
2622 /* We need to walk through the list and validate that each group
2623 * of 6 fragments totals at least gso_size. However we don't need
2624 * to perform such validation on the first or last 6 since the first
2625 * 6 cannot inherit any data from a descriptor before them, and the
2626 * last 6 cannot inherit any data from a descriptor after them.
2628 nr_frags -= I40E_MAX_BUFFER_TXD - 1;
2629 frag = &skb_shinfo(skb)->frags[0];
2631 /* Initialize size to the negative value of gso_size minus 1. We
2632 * use this as the worst case scenerio in which the frag ahead
2633 * of us only provides one byte which is why we are limited to 6
2634 * descriptors for a single transmit as the header and previous
2635 * fragment are already consuming 2 descriptors.
2639 /* Add size of frags 1 through 5 to create our initial sum */
2640 sum += skb_frag_size(++frag);
2641 sum += skb_frag_size(++frag);
2642 sum += skb_frag_size(++frag);
2643 sum += skb_frag_size(++frag);
2644 sum += skb_frag_size(++frag);
2646 /* Walk through fragments adding latest fragment, testing it, and
2647 * then removing stale fragments from the sum.
2649 stale = &skb_shinfo(skb)->frags[0];
2651 sum += skb_frag_size(++frag);
2653 /* if sum is negative we failed to make sufficient progress */
2657 /* use pre-decrement to avoid processing last fragment */
2661 sum -= skb_frag_size(++stale);
2668 * i40e_tx_map - Build the Tx descriptor
2669 * @tx_ring: ring to send buffer on
2671 * @first: first buffer info buffer to use
2672 * @tx_flags: collected send information
2673 * @hdr_len: size of the packet header
2674 * @td_cmd: the command field in the descriptor
2675 * @td_offset: offset for checksum or crc
2678 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2679 struct i40e_tx_buffer *first, u32 tx_flags,
2680 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2682 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2683 struct i40e_tx_buffer *first, u32 tx_flags,
2684 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2687 unsigned int data_len = skb->data_len;
2688 unsigned int size = skb_headlen(skb);
2689 struct skb_frag_struct *frag;
2690 struct i40e_tx_buffer *tx_bi;
2691 struct i40e_tx_desc *tx_desc;
2692 u16 i = tx_ring->next_to_use;
2697 bool tail_bump = true;
2700 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2701 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2702 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2703 I40E_TX_FLAGS_VLAN_SHIFT;
2706 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2707 gso_segs = skb_shinfo(skb)->gso_segs;
2711 /* multiply data chunks by size of headers */
2712 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2713 first->gso_segs = gso_segs;
2715 first->tx_flags = tx_flags;
2717 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2719 tx_desc = I40E_TX_DESC(tx_ring, i);
2722 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2723 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2725 if (dma_mapping_error(tx_ring->dev, dma))
2728 /* record length, and DMA address */
2729 dma_unmap_len_set(tx_bi, len, size);
2730 dma_unmap_addr_set(tx_bi, dma, dma);
2732 /* align size to end of page */
2733 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
2734 tx_desc->buffer_addr = cpu_to_le64(dma);
2736 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2737 tx_desc->cmd_type_offset_bsz =
2738 build_ctob(td_cmd, td_offset,
2745 if (i == tx_ring->count) {
2746 tx_desc = I40E_TX_DESC(tx_ring, 0);
2753 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2754 tx_desc->buffer_addr = cpu_to_le64(dma);
2757 if (likely(!data_len))
2760 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2767 if (i == tx_ring->count) {
2768 tx_desc = I40E_TX_DESC(tx_ring, 0);
2772 size = skb_frag_size(frag);
2775 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2778 tx_bi = &tx_ring->tx_bi[i];
2781 /* set next_to_watch value indicating a packet is present */
2782 first->next_to_watch = tx_desc;
2785 if (i == tx_ring->count)
2788 tx_ring->next_to_use = i;
2790 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2791 tx_ring->queue_index),
2793 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2795 /* Algorithm to optimize tail and RS bit setting:
2796 * if xmit_more is supported
2797 * if xmit_more is true
2798 * do not update tail and do not mark RS bit.
2799 * if xmit_more is false and last xmit_more was false
2800 * if every packet spanned less than 4 desc
2801 * then set RS bit on 4th packet and update tail
2804 * update tail and set RS bit on every packet.
2805 * if xmit_more is false and last_xmit_more was true
2806 * update tail and set RS bit.
2808 * Optimization: wmb to be issued only in case of tail update.
2809 * Also optimize the Descriptor WB path for RS bit with the same
2812 * Note: If there are less than 4 packets
2813 * pending and interrupts were disabled the service task will
2814 * trigger a force WB.
2816 if (skb->xmit_more &&
2817 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2818 tx_ring->queue_index))) {
2819 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2821 } else if (!skb->xmit_more &&
2822 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2823 tx_ring->queue_index)) &&
2824 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2825 (tx_ring->packet_stride < WB_STRIDE) &&
2826 (desc_count < WB_STRIDE)) {
2827 tx_ring->packet_stride++;
2829 tx_ring->packet_stride = 0;
2830 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2834 tx_ring->packet_stride = 0;
2836 tx_desc->cmd_type_offset_bsz =
2837 build_ctob(td_cmd, td_offset, size, td_tag) |
2838 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2839 I40E_TX_DESC_CMD_EOP) <<
2840 I40E_TXD_QW1_CMD_SHIFT);
2842 /* notify HW of packet */
2844 prefetchw(tx_desc + 1);
2847 /* Force memory writes to complete before letting h/w
2848 * know there are new descriptors to fetch. (Only
2849 * applicable for weak-ordered memory model archs,
2853 writel(i, tx_ring->tail);
2859 dev_info(tx_ring->dev, "TX DMA map failed\n");
2861 /* clear dma mappings for failed tx_bi map */
2863 tx_bi = &tx_ring->tx_bi[i];
2864 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2872 tx_ring->next_to_use = i;
2876 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2878 * @tx_ring: ring to send buffer on
2880 * Returns NETDEV_TX_OK if sent, else an error code
2882 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2883 struct i40e_ring *tx_ring)
2885 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2886 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2887 struct i40e_tx_buffer *first;
2896 /* prefetch the data, we'll need it later */
2897 prefetch(skb->data);
2899 count = i40e_xmit_descriptor_count(skb);
2900 if (i40e_chk_linearize(skb, count)) {
2901 if (__skb_linearize(skb))
2903 count = i40e_txd_use_count(skb->len);
2904 tx_ring->tx_stats.tx_linearize++;
2907 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2908 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2909 * + 4 desc gap to avoid the cache line where head is,
2910 * + 1 desc for context descriptor,
2911 * otherwise try next time
2913 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2914 tx_ring->tx_stats.tx_busy++;
2915 return NETDEV_TX_BUSY;
2918 /* prepare the xmit flags */
2919 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2922 /* obtain protocol of skb */
2923 protocol = vlan_get_protocol(skb);
2925 /* record the location of the first descriptor for this packet */
2926 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2928 /* setup IPv4/IPv6 offloads */
2929 if (protocol == htons(ETH_P_IP))
2930 tx_flags |= I40E_TX_FLAGS_IPV4;
2931 else if (protocol == htons(ETH_P_IPV6))
2932 tx_flags |= I40E_TX_FLAGS_IPV6;
2934 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
2939 tx_flags |= I40E_TX_FLAGS_TSO;
2941 /* Always offload the checksum, since it's in the data descriptor */
2942 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2943 tx_ring, &cd_tunneling);
2947 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2950 tx_flags |= I40E_TX_FLAGS_TSYN;
2952 skb_tx_timestamp(skb);
2954 /* always enable CRC insertion offload */
2955 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2957 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2958 cd_tunneling, cd_l2tag2);
2960 /* Add Flow Director ATR if it's enabled.
2962 * NOTE: this must always be directly before the data descriptor.
2964 i40e_atr(tx_ring, skb, tx_flags);
2966 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2969 return NETDEV_TX_OK;
2972 dev_kfree_skb_any(skb);
2973 return NETDEV_TX_OK;
2977 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2979 * @netdev: network interface device structure
2981 * Returns NETDEV_TX_OK if sent, else an error code
2983 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2985 struct i40e_netdev_priv *np = netdev_priv(netdev);
2986 struct i40e_vsi *vsi = np->vsi;
2987 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2989 /* hardware can't handle really short frames, hardware padding works
2992 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2993 return NETDEV_TX_OK;
2995 return i40e_xmit_frame_ring(skb, tx_ring);