1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
238 return err ? -EOPNOTSUPP : 0;
241 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
246 * @add: true adds a filter, false removes it
248 * Returns 0 if the filters were successfully added or removed
250 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
254 struct i40e_pf *pf = vsi->back;
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
301 dev_info(&pf->pdev->dev,
302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
315 return err ? -EOPNOTSUPP : 0;
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
323 * @add: true adds a filter, false removes it
325 * Always returns -EOPNOTSUPP
327 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
334 #define I40E_IP_DUMMY_PACKET_LEN 34
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
340 * @add: true adds a filter, false removes it
342 * Returns 0 if the filters were successfully added or removed
344 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
348 struct i40e_pf *pf = vsi->back;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
374 dev_info(&pf->pdev->dev,
375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
390 return err ? -EOPNOTSUPP : 0;
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
400 int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
403 struct i40e_pf *pf = vsi->back;
406 switch (input->flow_type & ~FLOW_EXT) {
408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
420 switch (input->ip4_proto) {
422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
449 * @prog_id: the id originally used for programming
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
468 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
469 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
470 (I40E_DEBUG_FD & pf->hw.debug_mask))
471 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
474 /* Check if the programming error is for ATR.
475 * If so, auto disable ATR and set a state for
476 * flush in progress. Next time we come here if flush is in
477 * progress do nothing, once flush is complete the state will
480 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
484 /* store the current atr filter count */
485 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
487 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
488 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
489 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
490 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
493 /* filter programming failed most likely due to table full */
494 fcnt_prog = i40e_get_global_fd_count(pf);
495 fcnt_avail = pf->fdir_pf_filter_count;
496 /* If ATR is running fcnt_prog can quickly change,
497 * if we are very close to full, it makes sense to disable
498 * FD ATR/SB and then re-enable it when there is room.
500 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
501 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
502 !(pf->auto_disable_flags &
503 I40E_FLAG_FD_SB_ENABLED)) {
504 if (I40E_DEBUG_FD & pf->hw.debug_mask)
505 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
506 pf->auto_disable_flags |=
507 I40E_FLAG_FD_SB_ENABLED;
511 "FD filter programming failed due to incorrect filter parameters\n");
513 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
514 if (I40E_DEBUG_FD & pf->hw.debug_mask)
515 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
516 rx_desc->wb.qword0.hi_dword.fd_id);
521 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
522 * @ring: the ring that owns the buffer
523 * @tx_buffer: the buffer to free
525 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
526 struct i40e_tx_buffer *tx_buffer)
528 if (tx_buffer->skb) {
529 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
530 kfree(tx_buffer->raw_buf);
532 dev_kfree_skb_any(tx_buffer->skb);
534 if (dma_unmap_len(tx_buffer, len))
535 dma_unmap_single(ring->dev,
536 dma_unmap_addr(tx_buffer, dma),
537 dma_unmap_len(tx_buffer, len),
539 } else if (dma_unmap_len(tx_buffer, len)) {
540 dma_unmap_page(ring->dev,
541 dma_unmap_addr(tx_buffer, dma),
542 dma_unmap_len(tx_buffer, len),
545 tx_buffer->next_to_watch = NULL;
546 tx_buffer->skb = NULL;
547 dma_unmap_len_set(tx_buffer, len, 0);
548 /* tx_buffer must be completely set up in the transmit path */
552 * i40e_clean_tx_ring - Free any empty Tx buffers
553 * @tx_ring: ring to be cleaned
555 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
557 unsigned long bi_size;
560 /* ring already cleared, nothing to do */
564 /* Free all the Tx ring sk_buffs */
565 for (i = 0; i < tx_ring->count; i++)
566 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
568 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
569 memset(tx_ring->tx_bi, 0, bi_size);
571 /* Zero out the descriptor ring */
572 memset(tx_ring->desc, 0, tx_ring->size);
574 tx_ring->next_to_use = 0;
575 tx_ring->next_to_clean = 0;
577 if (!tx_ring->netdev)
580 /* cleanup Tx queue statistics */
581 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
582 tx_ring->queue_index));
586 * i40e_free_tx_resources - Free Tx resources per queue
587 * @tx_ring: Tx descriptor ring for a specific queue
589 * Free all transmit software resources
591 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
593 i40e_clean_tx_ring(tx_ring);
594 kfree(tx_ring->tx_bi);
595 tx_ring->tx_bi = NULL;
598 dma_free_coherent(tx_ring->dev, tx_ring->size,
599 tx_ring->desc, tx_ring->dma);
600 tx_ring->desc = NULL;
605 * i40e_get_tx_pending - how many tx descriptors not processed
606 * @tx_ring: the ring of descriptors
608 * Since there is no access to the ring head register
609 * in XL710, we need to use our local copies
611 u32 i40e_get_tx_pending(struct i40e_ring *ring)
615 head = i40e_get_head(ring);
616 tail = readl(ring->tail);
619 return (head < tail) ?
620 tail - head : (tail + ring->count - head);
625 #define WB_STRIDE 0x3
628 * i40e_clean_tx_irq - Reclaim resources after transmit completes
629 * @tx_ring: tx ring to clean
630 * @budget: how many cleans we're allowed
632 * Returns true if there's any budget left (e.g. the clean is finished)
634 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
636 u16 i = tx_ring->next_to_clean;
637 struct i40e_tx_buffer *tx_buf;
638 struct i40e_tx_desc *tx_head;
639 struct i40e_tx_desc *tx_desc;
640 unsigned int total_packets = 0;
641 unsigned int total_bytes = 0;
643 tx_buf = &tx_ring->tx_bi[i];
644 tx_desc = I40E_TX_DESC(tx_ring, i);
647 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
650 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
652 /* if next_to_watch is not set then there is no work pending */
656 /* prevent any other reads prior to eop_desc */
657 read_barrier_depends();
659 /* we have caught up to head, no work left to do */
660 if (tx_head == tx_desc)
663 /* clear next_to_watch to prevent false hangs */
664 tx_buf->next_to_watch = NULL;
666 /* update the statistics for this packet */
667 total_bytes += tx_buf->bytecount;
668 total_packets += tx_buf->gso_segs;
671 dev_consume_skb_any(tx_buf->skb);
673 /* unmap skb header data */
674 dma_unmap_single(tx_ring->dev,
675 dma_unmap_addr(tx_buf, dma),
676 dma_unmap_len(tx_buf, len),
679 /* clear tx_buffer data */
681 dma_unmap_len_set(tx_buf, len, 0);
683 /* unmap remaining buffers */
684 while (tx_desc != eop_desc) {
691 tx_buf = tx_ring->tx_bi;
692 tx_desc = I40E_TX_DESC(tx_ring, 0);
695 /* unmap any remaining paged data */
696 if (dma_unmap_len(tx_buf, len)) {
697 dma_unmap_page(tx_ring->dev,
698 dma_unmap_addr(tx_buf, dma),
699 dma_unmap_len(tx_buf, len),
701 dma_unmap_len_set(tx_buf, len, 0);
705 /* move us one more past the eop_desc for start of next pkt */
711 tx_buf = tx_ring->tx_bi;
712 tx_desc = I40E_TX_DESC(tx_ring, 0);
717 /* update budget accounting */
719 } while (likely(budget));
722 tx_ring->next_to_clean = i;
723 u64_stats_update_begin(&tx_ring->syncp);
724 tx_ring->stats.bytes += total_bytes;
725 tx_ring->stats.packets += total_packets;
726 u64_stats_update_end(&tx_ring->syncp);
727 tx_ring->q_vector->tx.total_bytes += total_bytes;
728 tx_ring->q_vector->tx.total_packets += total_packets;
730 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
733 /* check to see if there are < 4 descriptors
734 * waiting to be written back, then kick the hardware to force
735 * them to be written back in case we stay in NAPI.
736 * In this mode on X722 we do not enable Interrupt.
738 j = i40e_get_tx_pending(tx_ring);
741 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
742 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
743 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
744 tx_ring->arm_wb = true;
747 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
748 tx_ring->queue_index),
749 total_packets, total_bytes);
751 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
753 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
754 /* Make sure that anybody stopping the queue after this
755 * sees the new next_to_clean.
758 if (__netif_subqueue_stopped(tx_ring->netdev,
759 tx_ring->queue_index) &&
760 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
761 netif_wake_subqueue(tx_ring->netdev,
762 tx_ring->queue_index);
763 ++tx_ring->tx_stats.restart_queue;
771 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
772 * @vsi: the VSI we care about
773 * @q_vector: the vector on which to force writeback
776 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
778 u16 flags = q_vector->tx.ring[0].flags;
780 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
783 if (q_vector->arm_wb_state)
786 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
789 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
790 vsi->base_vector - 1),
792 q_vector->arm_wb_state = true;
793 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
794 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
795 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
796 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
797 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
798 /* allow 00 to be written to the index */
801 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
802 vsi->base_vector - 1), val);
804 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
805 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
806 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
807 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
808 /* allow 00 to be written to the index */
810 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
815 * i40e_set_new_dynamic_itr - Find new ITR level
816 * @rc: structure containing ring performance data
818 * Returns true if ITR changed, false if not
820 * Stores a new ITR value based on packets and byte counts during
821 * the last interrupt. The advantage of per interrupt computation
822 * is faster updates and more accurate ITR for the current traffic
823 * pattern. Constants in this function were computed based on
824 * theoretical maximum wire speed and thresholds were set based on
825 * testing data as well as attempting to minimize response time
826 * while increasing bulk throughput.
828 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
830 enum i40e_latency_range new_latency_range = rc->latency_range;
831 u32 new_itr = rc->itr;
835 if (rc->total_packets == 0 || !rc->itr)
838 /* simple throttlerate management
839 * 0-10MB/s lowest (100000 ints/s)
840 * 10-20MB/s low (20000 ints/s)
841 * 20-1249MB/s bulk (8000 ints/s)
843 * The math works out because the divisor is in 10^(-6) which
844 * turns the bytes/us input value into MB/s values, but
845 * make sure to use usecs, as the register values written
846 * are in 2 usec increments in the ITR registers.
848 usecs = (rc->itr << 1);
849 bytes_per_int = rc->total_bytes / usecs;
850 switch (new_latency_range) {
851 case I40E_LOWEST_LATENCY:
852 if (bytes_per_int > 10)
853 new_latency_range = I40E_LOW_LATENCY;
855 case I40E_LOW_LATENCY:
856 if (bytes_per_int > 20)
857 new_latency_range = I40E_BULK_LATENCY;
858 else if (bytes_per_int <= 10)
859 new_latency_range = I40E_LOWEST_LATENCY;
861 case I40E_BULK_LATENCY:
862 if (bytes_per_int <= 20)
863 new_latency_range = I40E_LOW_LATENCY;
866 if (bytes_per_int <= 20)
867 new_latency_range = I40E_LOW_LATENCY;
870 rc->latency_range = new_latency_range;
872 switch (new_latency_range) {
873 case I40E_LOWEST_LATENCY:
874 new_itr = I40E_ITR_100K;
876 case I40E_LOW_LATENCY:
877 new_itr = I40E_ITR_20K;
879 case I40E_BULK_LATENCY:
880 new_itr = I40E_ITR_8K;
887 rc->total_packets = 0;
889 if (new_itr != rc->itr) {
898 * i40e_clean_programming_status - clean the programming status descriptor
899 * @rx_ring: the rx ring that has this descriptor
900 * @rx_desc: the rx descriptor written back by HW
902 * Flow director should handle FD_FILTER_STATUS to check its filter programming
903 * status being successful or not and take actions accordingly. FCoE should
904 * handle its context/filter programming/invalidation status and take actions.
907 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
908 union i40e_rx_desc *rx_desc)
913 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
914 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
915 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
917 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
918 i40e_fd_handle_status(rx_ring, rx_desc, id);
920 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
921 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
922 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
927 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
928 * @tx_ring: the tx ring to set up
930 * Return 0 on success, negative on error
932 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
934 struct device *dev = tx_ring->dev;
940 /* warn if we are about to overwrite the pointer */
941 WARN_ON(tx_ring->tx_bi);
942 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
943 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
947 /* round up to nearest 4K */
948 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
949 /* add u32 for head writeback, align after this takes care of
950 * guaranteeing this is at least one cache line in size
952 tx_ring->size += sizeof(u32);
953 tx_ring->size = ALIGN(tx_ring->size, 4096);
954 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
955 &tx_ring->dma, GFP_KERNEL);
956 if (!tx_ring->desc) {
957 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
962 tx_ring->next_to_use = 0;
963 tx_ring->next_to_clean = 0;
967 kfree(tx_ring->tx_bi);
968 tx_ring->tx_bi = NULL;
973 * i40e_clean_rx_ring - Free Rx buffers
974 * @rx_ring: ring to be cleaned
976 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
978 struct device *dev = rx_ring->dev;
979 struct i40e_rx_buffer *rx_bi;
980 unsigned long bi_size;
983 /* ring already cleared, nothing to do */
987 if (ring_is_ps_enabled(rx_ring)) {
988 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
990 rx_bi = &rx_ring->rx_bi[0];
991 if (rx_bi->hdr_buf) {
992 dma_free_coherent(dev,
996 for (i = 0; i < rx_ring->count; i++) {
997 rx_bi = &rx_ring->rx_bi[i];
999 rx_bi->hdr_buf = NULL;
1003 /* Free all the Rx ring sk_buffs */
1004 for (i = 0; i < rx_ring->count; i++) {
1005 rx_bi = &rx_ring->rx_bi[i];
1007 dma_unmap_single(dev,
1009 rx_ring->rx_buf_len,
1014 dev_kfree_skb(rx_bi->skb);
1018 if (rx_bi->page_dma) {
1023 rx_bi->page_dma = 0;
1025 __free_page(rx_bi->page);
1027 rx_bi->page_offset = 0;
1031 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1032 memset(rx_ring->rx_bi, 0, bi_size);
1034 /* Zero out the descriptor ring */
1035 memset(rx_ring->desc, 0, rx_ring->size);
1037 rx_ring->next_to_clean = 0;
1038 rx_ring->next_to_use = 0;
1042 * i40e_free_rx_resources - Free Rx resources
1043 * @rx_ring: ring to clean the resources from
1045 * Free all receive software resources
1047 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1049 i40e_clean_rx_ring(rx_ring);
1050 kfree(rx_ring->rx_bi);
1051 rx_ring->rx_bi = NULL;
1053 if (rx_ring->desc) {
1054 dma_free_coherent(rx_ring->dev, rx_ring->size,
1055 rx_ring->desc, rx_ring->dma);
1056 rx_ring->desc = NULL;
1061 * i40e_alloc_rx_headers - allocate rx header buffers
1062 * @rx_ring: ring to alloc buffers
1064 * Allocate rx header buffers for the entire ring. As these are static,
1065 * this is only called when setting up a new ring.
1067 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1069 struct device *dev = rx_ring->dev;
1070 struct i40e_rx_buffer *rx_bi;
1076 if (rx_ring->rx_bi[0].hdr_buf)
1078 /* Make sure the buffers don't cross cache line boundaries. */
1079 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1080 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1084 for (i = 0; i < rx_ring->count; i++) {
1085 rx_bi = &rx_ring->rx_bi[i];
1086 rx_bi->dma = dma + (i * buf_size);
1087 rx_bi->hdr_buf = buffer + (i * buf_size);
1092 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1093 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1095 * Returns 0 on success, negative on failure
1097 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1099 struct device *dev = rx_ring->dev;
1102 /* warn if we are about to overwrite the pointer */
1103 WARN_ON(rx_ring->rx_bi);
1104 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1105 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1106 if (!rx_ring->rx_bi)
1109 u64_stats_init(&rx_ring->syncp);
1111 /* Round up to nearest 4K */
1112 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1113 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1114 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1115 rx_ring->size = ALIGN(rx_ring->size, 4096);
1116 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1117 &rx_ring->dma, GFP_KERNEL);
1119 if (!rx_ring->desc) {
1120 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1125 rx_ring->next_to_clean = 0;
1126 rx_ring->next_to_use = 0;
1130 kfree(rx_ring->rx_bi);
1131 rx_ring->rx_bi = NULL;
1136 * i40e_release_rx_desc - Store the new tail and head values
1137 * @rx_ring: ring to bump
1138 * @val: new head index
1140 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1142 rx_ring->next_to_use = val;
1143 /* Force memory writes to complete before letting h/w
1144 * know there are new descriptors to fetch. (Only
1145 * applicable for weak-ordered memory model archs,
1149 writel(val, rx_ring->tail);
1153 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1154 * @rx_ring: ring to place buffers on
1155 * @cleaned_count: number of buffers to replace
1157 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1159 u16 i = rx_ring->next_to_use;
1160 union i40e_rx_desc *rx_desc;
1161 struct i40e_rx_buffer *bi;
1163 /* do nothing if no valid netdev defined */
1164 if (!rx_ring->netdev || !cleaned_count)
1167 while (cleaned_count--) {
1168 rx_desc = I40E_RX_DESC(rx_ring, i);
1169 bi = &rx_ring->rx_bi[i];
1171 if (bi->skb) /* desc is in use */
1174 bi->page = alloc_page(GFP_ATOMIC);
1176 rx_ring->rx_stats.alloc_page_failed++;
1181 if (!bi->page_dma) {
1182 /* use a half page if we're re-using */
1183 bi->page_offset ^= PAGE_SIZE / 2;
1184 bi->page_dma = dma_map_page(rx_ring->dev,
1189 if (dma_mapping_error(rx_ring->dev,
1191 rx_ring->rx_stats.alloc_page_failed++;
1197 dma_sync_single_range_for_device(rx_ring->dev,
1200 rx_ring->rx_hdr_len,
1202 /* Refresh the desc even if buffer_addrs didn't change
1203 * because each write-back erases this info.
1205 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1206 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1208 if (i == rx_ring->count)
1213 if (rx_ring->next_to_use != i)
1214 i40e_release_rx_desc(rx_ring, i);
1218 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1219 * @rx_ring: ring to place buffers on
1220 * @cleaned_count: number of buffers to replace
1222 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1224 u16 i = rx_ring->next_to_use;
1225 union i40e_rx_desc *rx_desc;
1226 struct i40e_rx_buffer *bi;
1227 struct sk_buff *skb;
1229 /* do nothing if no valid netdev defined */
1230 if (!rx_ring->netdev || !cleaned_count)
1233 while (cleaned_count--) {
1234 rx_desc = I40E_RX_DESC(rx_ring, i);
1235 bi = &rx_ring->rx_bi[i];
1239 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1240 rx_ring->rx_buf_len);
1242 rx_ring->rx_stats.alloc_buff_failed++;
1245 /* initialize queue mapping */
1246 skb_record_rx_queue(skb, rx_ring->queue_index);
1251 bi->dma = dma_map_single(rx_ring->dev,
1253 rx_ring->rx_buf_len,
1255 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1256 rx_ring->rx_stats.alloc_buff_failed++;
1262 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1263 rx_desc->read.hdr_addr = 0;
1265 if (i == rx_ring->count)
1270 if (rx_ring->next_to_use != i)
1271 i40e_release_rx_desc(rx_ring, i);
1275 * i40e_receive_skb - Send a completed packet up the stack
1276 * @rx_ring: rx ring in play
1277 * @skb: packet to send up
1278 * @vlan_tag: vlan tag for packet
1280 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1281 struct sk_buff *skb, u16 vlan_tag)
1283 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1285 if (vlan_tag & VLAN_VID_MASK)
1286 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1288 napi_gro_receive(&q_vector->napi, skb);
1292 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1293 * @vsi: the VSI we care about
1294 * @skb: skb currently being received and modified
1295 * @rx_status: status value of last descriptor in packet
1296 * @rx_error: error value of last descriptor in packet
1297 * @rx_ptype: ptype value of last descriptor in packet
1299 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1300 struct sk_buff *skb,
1305 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1306 bool ipv4 = false, ipv6 = false;
1307 bool ipv4_tunnel, ipv6_tunnel;
1312 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1313 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1314 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1315 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1317 skb->ip_summed = CHECKSUM_NONE;
1319 /* Rx csum enabled and ip headers found? */
1320 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1323 /* did the hardware decode the packet and checksum? */
1324 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1327 /* both known and outer_ip must be set for the below code to work */
1328 if (!(decoded.known && decoded.outer_ip))
1331 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1332 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1334 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1335 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1339 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1340 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1343 /* likely incorrect csum if alternate IP extension headers found */
1345 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1346 /* don't increment checksum err here, non-fatal err */
1349 /* there was some L4 error, count error and punt packet to the stack */
1350 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1353 /* handle packets that were not able to be checksummed due
1354 * to arrival speed, in this case the stack can compute
1357 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1360 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1361 * it in the driver, hardware does not do it for us.
1362 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1363 * so the total length of IPv4 header is IHL*4 bytes
1364 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1366 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1368 skb->transport_header = skb->mac_header +
1369 sizeof(struct ethhdr) +
1370 (ip_hdr(skb)->ihl * 4);
1372 /* Add 4 bytes for VLAN tagged packets */
1373 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1374 skb->protocol == htons(ETH_P_8021AD))
1377 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1378 (udp_hdr(skb)->check != 0)) {
1379 rx_udp_csum = udp_csum(skb);
1381 csum = csum_tcpudp_magic(
1382 iph->saddr, iph->daddr,
1383 (skb->len - skb_transport_offset(skb)),
1384 IPPROTO_UDP, rx_udp_csum);
1386 if (udp_hdr(skb)->check != csum)
1389 } /* else its GRE and so no outer UDP header */
1392 skb->ip_summed = CHECKSUM_UNNECESSARY;
1393 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1398 vsi->back->hw_csum_rx_error++;
1402 * i40e_rx_hash - returns the hash value from the Rx descriptor
1403 * @ring: descriptor ring
1404 * @rx_desc: specific descriptor
1406 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1407 union i40e_rx_desc *rx_desc)
1409 const __le64 rss_mask =
1410 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1411 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1413 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1414 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1415 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1421 * i40e_ptype_to_hash - get a hash type
1422 * @ptype: the ptype value from the descriptor
1424 * Returns a hash type to be used by skb_set_hash
1426 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1428 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1431 return PKT_HASH_TYPE_NONE;
1433 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1434 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1435 return PKT_HASH_TYPE_L4;
1436 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1437 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1438 return PKT_HASH_TYPE_L3;
1440 return PKT_HASH_TYPE_L2;
1444 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1445 * @rx_ring: rx ring to clean
1446 * @budget: how many cleans we're allowed
1448 * Returns true if there's any budget left (e.g. the clean is finished)
1450 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1452 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1453 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1454 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1455 const int current_node = numa_mem_id();
1456 struct i40e_vsi *vsi = rx_ring->vsi;
1457 u16 i = rx_ring->next_to_clean;
1458 union i40e_rx_desc *rx_desc;
1459 u32 rx_error, rx_status;
1467 struct i40e_rx_buffer *rx_bi;
1468 struct sk_buff *skb;
1470 /* return some buffers to hardware, one at a time is too slow */
1471 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1472 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1476 i = rx_ring->next_to_clean;
1477 rx_desc = I40E_RX_DESC(rx_ring, i);
1478 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1479 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1480 I40E_RXD_QW1_STATUS_SHIFT;
1482 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1485 /* This memory barrier is needed to keep us from reading
1486 * any other fields out of the rx_desc until we know the
1490 if (i40e_rx_is_programming_status(qword)) {
1491 i40e_clean_programming_status(rx_ring, rx_desc);
1492 I40E_RX_INCREMENT(rx_ring, i);
1495 rx_bi = &rx_ring->rx_bi[i];
1498 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1499 rx_ring->rx_hdr_len);
1501 rx_ring->rx_stats.alloc_buff_failed++;
1505 /* initialize queue mapping */
1506 skb_record_rx_queue(skb, rx_ring->queue_index);
1507 /* we are reusing so sync this buffer for CPU use */
1508 dma_sync_single_range_for_cpu(rx_ring->dev,
1511 rx_ring->rx_hdr_len,
1514 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1515 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1516 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1517 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1518 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1519 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1521 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1522 I40E_RXD_QW1_ERROR_SHIFT;
1523 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1524 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1526 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1527 I40E_RXD_QW1_PTYPE_SHIFT;
1528 prefetch(rx_bi->page);
1531 if (rx_hbo || rx_sph) {
1535 len = I40E_RX_HDR_SIZE;
1537 len = rx_header_len;
1538 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1539 } else if (skb->len == 0) {
1542 len = (rx_packet_len > skb_headlen(skb) ?
1543 skb_headlen(skb) : rx_packet_len);
1544 memcpy(__skb_put(skb, len),
1545 rx_bi->page + rx_bi->page_offset,
1547 rx_bi->page_offset += len;
1548 rx_packet_len -= len;
1551 /* Get the rest of the data if this was a header split */
1552 if (rx_packet_len) {
1553 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1558 skb->len += rx_packet_len;
1559 skb->data_len += rx_packet_len;
1560 skb->truesize += rx_packet_len;
1562 if ((page_count(rx_bi->page) == 1) &&
1563 (page_to_nid(rx_bi->page) == current_node))
1564 get_page(rx_bi->page);
1568 dma_unmap_page(rx_ring->dev,
1572 rx_bi->page_dma = 0;
1574 I40E_RX_INCREMENT(rx_ring, i);
1577 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1578 struct i40e_rx_buffer *next_buffer;
1580 next_buffer = &rx_ring->rx_bi[i];
1581 next_buffer->skb = skb;
1582 rx_ring->rx_stats.non_eop_descs++;
1586 /* ERR_MASK will only have valid bits if EOP set */
1587 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1588 dev_kfree_skb_any(skb);
1592 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1593 i40e_ptype_to_hash(rx_ptype));
1594 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1595 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1596 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1597 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1598 rx_ring->last_rx_timestamp = jiffies;
1601 /* probably a little skewed due to removing CRC */
1602 total_rx_bytes += skb->len;
1605 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1607 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1609 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1610 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1613 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1614 dev_kfree_skb_any(skb);
1618 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1619 i40e_receive_skb(rx_ring, skb, vlan_tag);
1621 rx_desc->wb.qword1.status_error_len = 0;
1623 } while (likely(total_rx_packets < budget));
1625 u64_stats_update_begin(&rx_ring->syncp);
1626 rx_ring->stats.packets += total_rx_packets;
1627 rx_ring->stats.bytes += total_rx_bytes;
1628 u64_stats_update_end(&rx_ring->syncp);
1629 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1630 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1632 return total_rx_packets;
1636 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1637 * @rx_ring: rx ring to clean
1638 * @budget: how many cleans we're allowed
1640 * Returns number of packets cleaned
1642 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1644 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1645 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1646 struct i40e_vsi *vsi = rx_ring->vsi;
1647 union i40e_rx_desc *rx_desc;
1648 u32 rx_error, rx_status;
1655 struct i40e_rx_buffer *rx_bi;
1656 struct sk_buff *skb;
1658 /* return some buffers to hardware, one at a time is too slow */
1659 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1660 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1664 i = rx_ring->next_to_clean;
1665 rx_desc = I40E_RX_DESC(rx_ring, i);
1666 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1667 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1668 I40E_RXD_QW1_STATUS_SHIFT;
1670 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1673 /* This memory barrier is needed to keep us from reading
1674 * any other fields out of the rx_desc until we know the
1679 if (i40e_rx_is_programming_status(qword)) {
1680 i40e_clean_programming_status(rx_ring, rx_desc);
1681 I40E_RX_INCREMENT(rx_ring, i);
1684 rx_bi = &rx_ring->rx_bi[i];
1686 prefetch(skb->data);
1688 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1689 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1691 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1692 I40E_RXD_QW1_ERROR_SHIFT;
1693 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1695 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1696 I40E_RXD_QW1_PTYPE_SHIFT;
1700 /* Get the header and possibly the whole packet
1701 * If this is an skb from previous receive dma will be 0
1703 skb_put(skb, rx_packet_len);
1704 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1708 I40E_RX_INCREMENT(rx_ring, i);
1711 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1712 rx_ring->rx_stats.non_eop_descs++;
1716 /* ERR_MASK will only have valid bits if EOP set */
1717 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1718 dev_kfree_skb_any(skb);
1722 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1723 i40e_ptype_to_hash(rx_ptype));
1724 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1725 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1726 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1727 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1728 rx_ring->last_rx_timestamp = jiffies;
1731 /* probably a little skewed due to removing CRC */
1732 total_rx_bytes += skb->len;
1735 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1737 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1739 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1740 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1743 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1744 dev_kfree_skb_any(skb);
1748 i40e_receive_skb(rx_ring, skb, vlan_tag);
1750 rx_desc->wb.qword1.status_error_len = 0;
1751 } while (likely(total_rx_packets < budget));
1753 u64_stats_update_begin(&rx_ring->syncp);
1754 rx_ring->stats.packets += total_rx_packets;
1755 rx_ring->stats.bytes += total_rx_bytes;
1756 u64_stats_update_end(&rx_ring->syncp);
1757 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1758 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1760 return total_rx_packets;
1763 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1767 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1768 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1769 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1770 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1775 /* a small macro to shorten up some long lines */
1776 #define INTREG I40E_PFINT_DYN_CTLN
1779 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1780 * @vsi: the VSI we care about
1781 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1784 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1785 struct i40e_q_vector *q_vector)
1787 struct i40e_hw *hw = &vsi->back->hw;
1788 bool rx = false, tx = false;
1792 vector = (q_vector->v_idx + vsi->base_vector);
1794 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1796 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1797 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1798 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1801 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1802 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1803 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1807 /* get the higher of the two ITR adjustments and
1808 * use the same value for both ITR registers
1809 * when in adaptive mode (Rx and/or Tx)
1811 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1813 q_vector->tx.itr = q_vector->rx.itr = itr;
1814 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1816 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1820 /* only need to enable the interrupt once, but need
1821 * to possibly update both ITR values
1824 /* set the INTENA_MSK_MASK so that this first write
1825 * won't actually enable the interrupt, instead just
1826 * updating the ITR (it's bit 31 PF and VF)
1829 /* don't check _DOWN because interrupt isn't being enabled */
1830 wr32(hw, INTREG(vector - 1), rxval);
1833 if (!test_bit(__I40E_DOWN, &vsi->state))
1834 wr32(hw, INTREG(vector - 1), txval);
1838 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1839 * @napi: napi struct with our devices info in it
1840 * @budget: amount of work driver is allowed to do this pass, in packets
1842 * This function will clean all queues associated with a q_vector.
1844 * Returns the amount of work done
1846 int i40e_napi_poll(struct napi_struct *napi, int budget)
1848 struct i40e_q_vector *q_vector =
1849 container_of(napi, struct i40e_q_vector, napi);
1850 struct i40e_vsi *vsi = q_vector->vsi;
1851 struct i40e_ring *ring;
1852 bool clean_complete = true;
1853 bool arm_wb = false;
1854 int budget_per_ring;
1857 if (test_bit(__I40E_DOWN, &vsi->state)) {
1858 napi_complete(napi);
1862 /* Since the actual Tx work is minimal, we can give the Tx a larger
1863 * budget and be more aggressive about cleaning up the Tx descriptors.
1865 i40e_for_each_ring(ring, q_vector->tx) {
1866 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1867 arm_wb |= ring->arm_wb;
1868 ring->arm_wb = false;
1871 /* Handle case where we are called by netpoll with a budget of 0 */
1875 /* We attempt to distribute budget to each Rx queue fairly, but don't
1876 * allow the budget to go below 1 because that would exit polling early.
1878 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1880 i40e_for_each_ring(ring, q_vector->rx) {
1883 if (ring_is_ps_enabled(ring))
1884 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1886 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1888 work_done += cleaned;
1889 /* if we didn't clean as many as budgeted, we must be done */
1890 clean_complete &= (budget_per_ring != cleaned);
1893 /* If work not completed, return budget and polling will return */
1894 if (!clean_complete) {
1897 i40e_force_wb(vsi, q_vector);
1901 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1902 q_vector->arm_wb_state = false;
1904 /* Work is done so exit the polling mode and re-enable the interrupt */
1905 napi_complete_done(napi, work_done);
1906 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1907 i40e_update_enable_itr(vsi, q_vector);
1908 } else { /* Legacy mode */
1909 struct i40e_hw *hw = &vsi->back->hw;
1910 /* We re-enable the queue 0 cause, but
1911 * don't worry about dynamic_enable
1912 * because we left it on for the other
1913 * possible interrupts during napi
1915 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1916 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1918 wr32(hw, I40E_QINT_RQCTL(0), qval);
1919 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1920 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1921 wr32(hw, I40E_QINT_TQCTL(0), qval);
1922 i40e_irq_dynamic_enable_icr0(vsi->back);
1928 * i40e_atr - Add a Flow Director ATR filter
1929 * @tx_ring: ring to add programming descriptor to
1931 * @tx_flags: send tx flags
1932 * @protocol: wire protocol
1934 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1935 u32 tx_flags, __be16 protocol)
1937 struct i40e_filter_program_desc *fdir_desc;
1938 struct i40e_pf *pf = tx_ring->vsi->back;
1940 unsigned char *network;
1942 struct ipv6hdr *ipv6;
1946 u32 flex_ptype, dtype_cmd;
1949 /* make sure ATR is enabled */
1950 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1953 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1956 /* if sampling is disabled do nothing */
1957 if (!tx_ring->atr_sample_rate)
1960 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1963 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1964 /* snag network header to get L4 type and address */
1965 hdr.network = skb_network_header(skb);
1967 /* Currently only IPv4/IPv6 with TCP is supported
1968 * access ihl as u8 to avoid unaligned access on ia64
1970 if (tx_flags & I40E_TX_FLAGS_IPV4)
1971 hlen = (hdr.network[0] & 0x0F) << 2;
1972 else if (protocol == htons(ETH_P_IPV6))
1973 hlen = sizeof(struct ipv6hdr);
1977 hdr.network = skb_inner_network_header(skb);
1978 hlen = skb_inner_network_header_len(skb);
1981 /* Currently only IPv4/IPv6 with TCP is supported
1982 * Note: tx_flags gets modified to reflect inner protocols in
1983 * tx_enable_csum function if encap is enabled.
1985 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
1986 (hdr.ipv4->protocol != IPPROTO_TCP))
1988 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
1989 (hdr.ipv6->nexthdr != IPPROTO_TCP))
1992 th = (struct tcphdr *)(hdr.network + hlen);
1994 /* Due to lack of space, no more new filters can be programmed */
1995 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1997 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
1998 /* HW ATR eviction will take care of removing filters on FIN
2001 if (th->fin || th->rst)
2005 tx_ring->atr_count++;
2007 /* sample on all syn/fin/rst packets or once every atr sample rate */
2011 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2014 tx_ring->atr_count = 0;
2016 /* grab the next descriptor */
2017 i = tx_ring->next_to_use;
2018 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2021 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2023 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2024 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2025 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2026 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2027 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2028 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2029 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2031 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2033 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2035 dtype_cmd |= (th->fin || th->rst) ?
2036 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2037 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2038 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2039 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2041 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2042 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2044 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2045 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2047 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2048 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2050 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2051 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2052 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2055 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2056 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2057 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2059 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2060 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2062 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2063 fdir_desc->rsvd = cpu_to_le32(0);
2064 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2065 fdir_desc->fd_id = cpu_to_le32(0);
2069 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2071 * @tx_ring: ring to send buffer on
2072 * @flags: the tx flags to be set
2074 * Checks the skb and set up correspondingly several generic transmit flags
2075 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2077 * Returns error code indicate the frame should be dropped upon error and the
2078 * otherwise returns 0 to indicate the flags has been set properly.
2081 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2082 struct i40e_ring *tx_ring,
2085 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2086 struct i40e_ring *tx_ring,
2090 __be16 protocol = skb->protocol;
2093 if (protocol == htons(ETH_P_8021Q) &&
2094 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2095 /* When HW VLAN acceleration is turned off by the user the
2096 * stack sets the protocol to 8021q so that the driver
2097 * can take any steps required to support the SW only
2098 * VLAN handling. In our case the driver doesn't need
2099 * to take any further steps so just set the protocol
2100 * to the encapsulated ethertype.
2102 skb->protocol = vlan_get_protocol(skb);
2106 /* if we have a HW VLAN tag being added, default to the HW one */
2107 if (skb_vlan_tag_present(skb)) {
2108 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2109 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2110 /* else if it is a SW VLAN, check the next protocol and store the tag */
2111 } else if (protocol == htons(ETH_P_8021Q)) {
2112 struct vlan_hdr *vhdr, _vhdr;
2114 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2118 protocol = vhdr->h_vlan_encapsulated_proto;
2119 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2120 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2123 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2126 /* Insert 802.1p priority into VLAN header */
2127 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2128 (skb->priority != TC_PRIO_CONTROL)) {
2129 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2130 tx_flags |= (skb->priority & 0x7) <<
2131 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2132 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2133 struct vlan_ethhdr *vhdr;
2136 rc = skb_cow_head(skb, 0);
2139 vhdr = (struct vlan_ethhdr *)skb->data;
2140 vhdr->h_vlan_TCI = htons(tx_flags >>
2141 I40E_TX_FLAGS_VLAN_SHIFT);
2143 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2153 * i40e_tso - set up the tso context descriptor
2154 * @tx_ring: ptr to the ring to send
2155 * @skb: ptr to the skb we're sending
2156 * @hdr_len: ptr to the size of the packet header
2157 * @cd_tunneling: ptr to context descriptor bits
2159 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2161 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2162 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2165 u32 cd_cmd, cd_tso_len, cd_mss;
2166 struct ipv6hdr *ipv6h;
2167 struct tcphdr *tcph;
2172 if (!skb_is_gso(skb))
2175 err = skb_cow_head(skb, 0);
2179 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2180 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2182 if (iph->version == 4) {
2183 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2186 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2188 } else if (ipv6h->version == 6) {
2189 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2190 ipv6h->payload_len = 0;
2191 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2195 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2196 *hdr_len = (skb->encapsulation
2197 ? (skb_inner_transport_header(skb) - skb->data)
2198 : skb_transport_offset(skb)) + l4len;
2200 /* find the field values */
2201 cd_cmd = I40E_TX_CTX_DESC_TSO;
2202 cd_tso_len = skb->len - *hdr_len;
2203 cd_mss = skb_shinfo(skb)->gso_size;
2204 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2206 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2207 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2212 * i40e_tsyn - set up the tsyn context descriptor
2213 * @tx_ring: ptr to the ring to send
2214 * @skb: ptr to the skb we're sending
2215 * @tx_flags: the collected send information
2217 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2219 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2220 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2224 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2227 /* Tx timestamps cannot be sampled when doing TSO */
2228 if (tx_flags & I40E_TX_FLAGS_TSO)
2231 /* only timestamp the outbound packet if the user has requested it and
2232 * we are not already transmitting a packet to be timestamped
2234 pf = i40e_netdev_to_pf(tx_ring->netdev);
2235 if (!(pf->flags & I40E_FLAG_PTP))
2239 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2240 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2241 pf->ptp_tx_skb = skb_get(skb);
2246 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2247 I40E_TXD_CTX_QW1_CMD_SHIFT;
2253 * i40e_tx_enable_csum - Enable Tx checksum offloads
2255 * @tx_flags: pointer to Tx flags currently set
2256 * @td_cmd: Tx descriptor command bits to set
2257 * @td_offset: Tx descriptor header offsets to set
2258 * @cd_tunneling: ptr to context desc bits
2260 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2261 u32 *td_cmd, u32 *td_offset,
2262 struct i40e_ring *tx_ring,
2265 struct ipv6hdr *this_ipv6_hdr;
2266 unsigned int this_tcp_hdrlen;
2267 struct iphdr *this_ip_hdr;
2268 u32 network_hdr_len;
2270 struct udphdr *oudph;
2274 if (skb->encapsulation) {
2275 switch (ip_hdr(skb)->protocol) {
2277 oudph = udp_hdr(skb);
2279 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2280 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2283 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2288 network_hdr_len = skb_inner_network_header_len(skb);
2289 this_ip_hdr = inner_ip_hdr(skb);
2290 this_ipv6_hdr = inner_ipv6_hdr(skb);
2291 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2293 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2294 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2295 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2296 ip_hdr(skb)->check = 0;
2299 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2301 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2302 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2303 if (*tx_flags & I40E_TX_FLAGS_TSO)
2304 ip_hdr(skb)->check = 0;
2307 /* Now set the ctx descriptor fields */
2308 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2309 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2311 ((skb_inner_network_offset(skb) -
2312 skb_transport_offset(skb)) >> 1) <<
2313 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2314 if (this_ip_hdr->version == 6) {
2315 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2316 *tx_flags |= I40E_TX_FLAGS_IPV6;
2318 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2319 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2320 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2321 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2323 (skb->len - skb_transport_offset(skb)),
2325 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2328 network_hdr_len = skb_network_header_len(skb);
2329 this_ip_hdr = ip_hdr(skb);
2330 this_ipv6_hdr = ipv6_hdr(skb);
2331 this_tcp_hdrlen = tcp_hdrlen(skb);
2334 /* Enable IP checksum offloads */
2335 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2336 l4_hdr = this_ip_hdr->protocol;
2337 /* the stack computes the IP header already, the only time we
2338 * need the hardware to recompute it is in the case of TSO.
2340 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2341 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2342 this_ip_hdr->check = 0;
2344 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2346 /* Now set the td_offset for IP header length */
2347 *td_offset = (network_hdr_len >> 2) <<
2348 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2349 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2350 l4_hdr = this_ipv6_hdr->nexthdr;
2351 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2352 /* Now set the td_offset for IP header length */
2353 *td_offset = (network_hdr_len >> 2) <<
2354 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2356 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2357 *td_offset |= (skb_network_offset(skb) >> 1) <<
2358 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2360 /* Enable L4 checksum offloads */
2363 /* enable checksum offloads */
2364 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2365 *td_offset |= (this_tcp_hdrlen >> 2) <<
2366 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2369 /* enable SCTP checksum offload */
2370 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2371 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2372 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2375 /* enable UDP checksum offload */
2376 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2377 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2378 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2386 * i40e_create_tx_ctx Build the Tx context descriptor
2387 * @tx_ring: ring to create the descriptor on
2388 * @cd_type_cmd_tso_mss: Quad Word 1
2389 * @cd_tunneling: Quad Word 0 - bits 0-31
2390 * @cd_l2tag2: Quad Word 0 - bits 32-63
2392 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2393 const u64 cd_type_cmd_tso_mss,
2394 const u32 cd_tunneling, const u32 cd_l2tag2)
2396 struct i40e_tx_context_desc *context_desc;
2397 int i = tx_ring->next_to_use;
2399 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2400 !cd_tunneling && !cd_l2tag2)
2403 /* grab the next descriptor */
2404 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2407 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2409 /* cpu_to_le32 and assign to struct fields */
2410 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2411 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2412 context_desc->rsvd = cpu_to_le16(0);
2413 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2417 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2418 * @tx_ring: the ring to be checked
2419 * @size: the size buffer we want to assure is available
2421 * Returns -EBUSY if a stop is needed, else 0
2423 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2425 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2426 /* Memory barrier before checking head and tail */
2429 /* Check again in a case another CPU has just made room available. */
2430 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2433 /* A reprieve! - use start_queue because it doesn't call schedule */
2434 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2435 ++tx_ring->tx_stats.restart_queue;
2440 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2441 * @tx_ring: the ring to be checked
2442 * @size: the size buffer we want to assure is available
2444 * Returns 0 if stop is not needed
2447 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2449 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2452 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2454 return __i40e_maybe_stop_tx(tx_ring, size);
2458 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2460 * @tx_flags: collected send information
2462 * Note: Our HW can't scatter-gather more than 8 fragments to build
2463 * a packet on the wire and so we need to figure out the cases where we
2464 * need to linearize the skb.
2466 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2468 struct skb_frag_struct *frag;
2469 bool linearize = false;
2470 unsigned int size = 0;
2474 num_frags = skb_shinfo(skb)->nr_frags;
2475 gso_segs = skb_shinfo(skb)->gso_segs;
2477 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2480 if (num_frags < (I40E_MAX_BUFFER_TXD))
2481 goto linearize_chk_done;
2482 /* try the simple math, if we have too many frags per segment */
2483 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2484 I40E_MAX_BUFFER_TXD) {
2486 goto linearize_chk_done;
2488 frag = &skb_shinfo(skb)->frags[0];
2489 /* we might still have more fragments per segment */
2491 size += skb_frag_size(frag);
2493 if ((size >= skb_shinfo(skb)->gso_size) &&
2494 (j < I40E_MAX_BUFFER_TXD)) {
2495 size = (size % skb_shinfo(skb)->gso_size);
2498 if (j == I40E_MAX_BUFFER_TXD) {
2503 } while (num_frags);
2505 if (num_frags >= I40E_MAX_BUFFER_TXD)
2514 * i40e_tx_map - Build the Tx descriptor
2515 * @tx_ring: ring to send buffer on
2517 * @first: first buffer info buffer to use
2518 * @tx_flags: collected send information
2519 * @hdr_len: size of the packet header
2520 * @td_cmd: the command field in the descriptor
2521 * @td_offset: offset for checksum or crc
2524 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2525 struct i40e_tx_buffer *first, u32 tx_flags,
2526 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2528 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2529 struct i40e_tx_buffer *first, u32 tx_flags,
2530 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2533 unsigned int data_len = skb->data_len;
2534 unsigned int size = skb_headlen(skb);
2535 struct skb_frag_struct *frag;
2536 struct i40e_tx_buffer *tx_bi;
2537 struct i40e_tx_desc *tx_desc;
2538 u16 i = tx_ring->next_to_use;
2543 bool tail_bump = true;
2546 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2547 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2548 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2549 I40E_TX_FLAGS_VLAN_SHIFT;
2552 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2553 gso_segs = skb_shinfo(skb)->gso_segs;
2557 /* multiply data chunks by size of headers */
2558 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2559 first->gso_segs = gso_segs;
2561 first->tx_flags = tx_flags;
2563 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2565 tx_desc = I40E_TX_DESC(tx_ring, i);
2568 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2569 if (dma_mapping_error(tx_ring->dev, dma))
2572 /* record length, and DMA address */
2573 dma_unmap_len_set(tx_bi, len, size);
2574 dma_unmap_addr_set(tx_bi, dma, dma);
2576 tx_desc->buffer_addr = cpu_to_le64(dma);
2578 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2579 tx_desc->cmd_type_offset_bsz =
2580 build_ctob(td_cmd, td_offset,
2581 I40E_MAX_DATA_PER_TXD, td_tag);
2587 if (i == tx_ring->count) {
2588 tx_desc = I40E_TX_DESC(tx_ring, 0);
2592 dma += I40E_MAX_DATA_PER_TXD;
2593 size -= I40E_MAX_DATA_PER_TXD;
2595 tx_desc->buffer_addr = cpu_to_le64(dma);
2598 if (likely(!data_len))
2601 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2608 if (i == tx_ring->count) {
2609 tx_desc = I40E_TX_DESC(tx_ring, 0);
2613 size = skb_frag_size(frag);
2616 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2619 tx_bi = &tx_ring->tx_bi[i];
2622 /* set next_to_watch value indicating a packet is present */
2623 first->next_to_watch = tx_desc;
2626 if (i == tx_ring->count)
2629 tx_ring->next_to_use = i;
2631 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2632 tx_ring->queue_index),
2634 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2636 /* Algorithm to optimize tail and RS bit setting:
2637 * if xmit_more is supported
2638 * if xmit_more is true
2639 * do not update tail and do not mark RS bit.
2640 * if xmit_more is false and last xmit_more was false
2641 * if every packet spanned less than 4 desc
2642 * then set RS bit on 4th packet and update tail
2645 * update tail and set RS bit on every packet.
2646 * if xmit_more is false and last_xmit_more was true
2647 * update tail and set RS bit.
2649 * Optimization: wmb to be issued only in case of tail update.
2650 * Also optimize the Descriptor WB path for RS bit with the same
2653 * Note: If there are less than 4 packets
2654 * pending and interrupts were disabled the service task will
2655 * trigger a force WB.
2657 if (skb->xmit_more &&
2658 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2659 tx_ring->queue_index))) {
2660 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2662 } else if (!skb->xmit_more &&
2663 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2664 tx_ring->queue_index)) &&
2665 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2666 (tx_ring->packet_stride < WB_STRIDE) &&
2667 (desc_count < WB_STRIDE)) {
2668 tx_ring->packet_stride++;
2670 tx_ring->packet_stride = 0;
2671 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2675 tx_ring->packet_stride = 0;
2677 tx_desc->cmd_type_offset_bsz =
2678 build_ctob(td_cmd, td_offset, size, td_tag) |
2679 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2680 I40E_TX_DESC_CMD_EOP) <<
2681 I40E_TXD_QW1_CMD_SHIFT);
2683 /* notify HW of packet */
2685 prefetchw(tx_desc + 1);
2688 /* Force memory writes to complete before letting h/w
2689 * know there are new descriptors to fetch. (Only
2690 * applicable for weak-ordered memory model archs,
2694 writel(i, tx_ring->tail);
2700 dev_info(tx_ring->dev, "TX DMA map failed\n");
2702 /* clear dma mappings for failed tx_bi map */
2704 tx_bi = &tx_ring->tx_bi[i];
2705 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2713 tx_ring->next_to_use = i;
2717 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2719 * @tx_ring: ring to send buffer on
2721 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2722 * there is not enough descriptors available in this ring since we need at least
2726 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2727 struct i40e_ring *tx_ring)
2729 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2730 struct i40e_ring *tx_ring)
2736 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2737 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2738 * + 4 desc gap to avoid the cache line where head is,
2739 * + 1 desc for context descriptor,
2740 * otherwise try next time
2742 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2743 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2745 count += TXD_USE_COUNT(skb_headlen(skb));
2746 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2747 tx_ring->tx_stats.tx_busy++;
2754 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2756 * @tx_ring: ring to send buffer on
2758 * Returns NETDEV_TX_OK if sent, else an error code
2760 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2761 struct i40e_ring *tx_ring)
2763 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2764 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2765 struct i40e_tx_buffer *first;
2774 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2775 return NETDEV_TX_BUSY;
2777 /* prepare the xmit flags */
2778 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2781 /* obtain protocol of skb */
2782 protocol = vlan_get_protocol(skb);
2784 /* record the location of the first descriptor for this packet */
2785 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2787 /* setup IPv4/IPv6 offloads */
2788 if (protocol == htons(ETH_P_IP))
2789 tx_flags |= I40E_TX_FLAGS_IPV4;
2790 else if (protocol == htons(ETH_P_IPV6))
2791 tx_flags |= I40E_TX_FLAGS_IPV6;
2793 tso = i40e_tso(tx_ring, skb, &hdr_len,
2794 &cd_type_cmd_tso_mss, &cd_tunneling);
2799 tx_flags |= I40E_TX_FLAGS_TSO;
2801 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2804 tx_flags |= I40E_TX_FLAGS_TSYN;
2806 if (i40e_chk_linearize(skb, tx_flags)) {
2807 if (skb_linearize(skb))
2809 tx_ring->tx_stats.tx_linearize++;
2811 skb_tx_timestamp(skb);
2813 /* always enable CRC insertion offload */
2814 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2816 /* Always offload the checksum, since it's in the data descriptor */
2817 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2818 tx_flags |= I40E_TX_FLAGS_CSUM;
2820 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2821 tx_ring, &cd_tunneling);
2824 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2825 cd_tunneling, cd_l2tag2);
2827 /* Add Flow Director ATR if it's enabled.
2829 * NOTE: this must always be directly before the data descriptor.
2831 i40e_atr(tx_ring, skb, tx_flags, protocol);
2833 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2836 return NETDEV_TX_OK;
2839 dev_kfree_skb_any(skb);
2840 return NETDEV_TX_OK;
2844 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2846 * @netdev: network interface device structure
2848 * Returns NETDEV_TX_OK if sent, else an error code
2850 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2852 struct i40e_netdev_priv *np = netdev_priv(netdev);
2853 struct i40e_vsi *vsi = np->vsi;
2854 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2856 /* hardware can't handle really short frames, hardware padding works
2859 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2860 return NETDEV_TX_OK;
2862 return i40e_xmit_frame_ring(skb, tx_ring);