1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
238 return err ? -EOPNOTSUPP : 0;
241 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
246 * @add: true adds a filter, false removes it
248 * Returns 0 if the filters were successfully added or removed
250 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
254 struct i40e_pf *pf = vsi->back;
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
301 dev_info(&pf->pdev->dev,
302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
315 return err ? -EOPNOTSUPP : 0;
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
323 * @add: true adds a filter, false removes it
325 * Always returns -EOPNOTSUPP
327 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
334 #define I40E_IP_DUMMY_PACKET_LEN 34
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
340 * @add: true adds a filter, false removes it
342 * Returns 0 if the filters were successfully added or removed
344 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
348 struct i40e_pf *pf = vsi->back;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
374 dev_info(&pf->pdev->dev,
375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
390 return err ? -EOPNOTSUPP : 0;
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
400 int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
403 struct i40e_pf *pf = vsi->back;
406 switch (input->flow_type & ~FLOW_EXT) {
408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
420 switch (input->ip4_proto) {
422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
449 * @prog_id: the id originally used for programming
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
468 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
469 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
470 (I40E_DEBUG_FD & pf->hw.debug_mask))
471 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
474 /* Check if the programming error is for ATR.
475 * If so, auto disable ATR and set a state for
476 * flush in progress. Next time we come here if flush is in
477 * progress do nothing, once flush is complete the state will
480 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
484 /* store the current atr filter count */
485 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
487 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
488 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
489 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
490 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
493 /* filter programming failed most likely due to table full */
494 fcnt_prog = i40e_get_global_fd_count(pf);
495 fcnt_avail = pf->fdir_pf_filter_count;
496 /* If ATR is running fcnt_prog can quickly change,
497 * if we are very close to full, it makes sense to disable
498 * FD ATR/SB and then re-enable it when there is room.
500 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
501 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
502 !(pf->auto_disable_flags &
503 I40E_FLAG_FD_SB_ENABLED)) {
504 if (I40E_DEBUG_FD & pf->hw.debug_mask)
505 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
506 pf->auto_disable_flags |=
507 I40E_FLAG_FD_SB_ENABLED;
511 "FD filter programming failed due to incorrect filter parameters\n");
513 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
514 if (I40E_DEBUG_FD & pf->hw.debug_mask)
515 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
516 rx_desc->wb.qword0.hi_dword.fd_id);
521 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
522 * @ring: the ring that owns the buffer
523 * @tx_buffer: the buffer to free
525 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
526 struct i40e_tx_buffer *tx_buffer)
528 if (tx_buffer->skb) {
529 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
530 kfree(tx_buffer->raw_buf);
532 dev_kfree_skb_any(tx_buffer->skb);
534 if (dma_unmap_len(tx_buffer, len))
535 dma_unmap_single(ring->dev,
536 dma_unmap_addr(tx_buffer, dma),
537 dma_unmap_len(tx_buffer, len),
539 } else if (dma_unmap_len(tx_buffer, len)) {
540 dma_unmap_page(ring->dev,
541 dma_unmap_addr(tx_buffer, dma),
542 dma_unmap_len(tx_buffer, len),
545 tx_buffer->next_to_watch = NULL;
546 tx_buffer->skb = NULL;
547 dma_unmap_len_set(tx_buffer, len, 0);
548 /* tx_buffer must be completely set up in the transmit path */
552 * i40e_clean_tx_ring - Free any empty Tx buffers
553 * @tx_ring: ring to be cleaned
555 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
557 unsigned long bi_size;
560 /* ring already cleared, nothing to do */
564 /* Free all the Tx ring sk_buffs */
565 for (i = 0; i < tx_ring->count; i++)
566 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
568 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
569 memset(tx_ring->tx_bi, 0, bi_size);
571 /* Zero out the descriptor ring */
572 memset(tx_ring->desc, 0, tx_ring->size);
574 tx_ring->next_to_use = 0;
575 tx_ring->next_to_clean = 0;
577 if (!tx_ring->netdev)
580 /* cleanup Tx queue statistics */
581 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
582 tx_ring->queue_index));
586 * i40e_free_tx_resources - Free Tx resources per queue
587 * @tx_ring: Tx descriptor ring for a specific queue
589 * Free all transmit software resources
591 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
593 i40e_clean_tx_ring(tx_ring);
594 kfree(tx_ring->tx_bi);
595 tx_ring->tx_bi = NULL;
598 dma_free_coherent(tx_ring->dev, tx_ring->size,
599 tx_ring->desc, tx_ring->dma);
600 tx_ring->desc = NULL;
605 * i40e_get_tx_pending - how many tx descriptors not processed
606 * @tx_ring: the ring of descriptors
608 * Since there is no access to the ring head register
609 * in XL710, we need to use our local copies
611 u32 i40e_get_tx_pending(struct i40e_ring *ring)
615 head = i40e_get_head(ring);
616 tail = readl(ring->tail);
619 return (head < tail) ?
620 tail - head : (tail + ring->count - head);
625 #define WB_STRIDE 0x3
628 * i40e_clean_tx_irq - Reclaim resources after transmit completes
629 * @tx_ring: tx ring to clean
630 * @budget: how many cleans we're allowed
632 * Returns true if there's any budget left (e.g. the clean is finished)
634 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
636 u16 i = tx_ring->next_to_clean;
637 struct i40e_tx_buffer *tx_buf;
638 struct i40e_tx_desc *tx_head;
639 struct i40e_tx_desc *tx_desc;
640 unsigned int total_packets = 0;
641 unsigned int total_bytes = 0;
643 tx_buf = &tx_ring->tx_bi[i];
644 tx_desc = I40E_TX_DESC(tx_ring, i);
647 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
650 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
652 /* if next_to_watch is not set then there is no work pending */
656 /* prevent any other reads prior to eop_desc */
657 read_barrier_depends();
659 /* we have caught up to head, no work left to do */
660 if (tx_head == tx_desc)
663 /* clear next_to_watch to prevent false hangs */
664 tx_buf->next_to_watch = NULL;
666 /* update the statistics for this packet */
667 total_bytes += tx_buf->bytecount;
668 total_packets += tx_buf->gso_segs;
671 dev_consume_skb_any(tx_buf->skb);
673 /* unmap skb header data */
674 dma_unmap_single(tx_ring->dev,
675 dma_unmap_addr(tx_buf, dma),
676 dma_unmap_len(tx_buf, len),
679 /* clear tx_buffer data */
681 dma_unmap_len_set(tx_buf, len, 0);
683 /* unmap remaining buffers */
684 while (tx_desc != eop_desc) {
691 tx_buf = tx_ring->tx_bi;
692 tx_desc = I40E_TX_DESC(tx_ring, 0);
695 /* unmap any remaining paged data */
696 if (dma_unmap_len(tx_buf, len)) {
697 dma_unmap_page(tx_ring->dev,
698 dma_unmap_addr(tx_buf, dma),
699 dma_unmap_len(tx_buf, len),
701 dma_unmap_len_set(tx_buf, len, 0);
705 /* move us one more past the eop_desc for start of next pkt */
711 tx_buf = tx_ring->tx_bi;
712 tx_desc = I40E_TX_DESC(tx_ring, 0);
717 /* update budget accounting */
719 } while (likely(budget));
722 tx_ring->next_to_clean = i;
723 u64_stats_update_begin(&tx_ring->syncp);
724 tx_ring->stats.bytes += total_bytes;
725 tx_ring->stats.packets += total_packets;
726 u64_stats_update_end(&tx_ring->syncp);
727 tx_ring->q_vector->tx.total_bytes += total_bytes;
728 tx_ring->q_vector->tx.total_packets += total_packets;
730 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
733 /* check to see if there are < 4 descriptors
734 * waiting to be written back, then kick the hardware to force
735 * them to be written back in case we stay in NAPI.
736 * In this mode on X722 we do not enable Interrupt.
738 j = i40e_get_tx_pending(tx_ring);
741 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
742 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
743 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
744 tx_ring->arm_wb = true;
747 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
748 tx_ring->queue_index),
749 total_packets, total_bytes);
751 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
753 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
754 /* Make sure that anybody stopping the queue after this
755 * sees the new next_to_clean.
758 if (__netif_subqueue_stopped(tx_ring->netdev,
759 tx_ring->queue_index) &&
760 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
761 netif_wake_subqueue(tx_ring->netdev,
762 tx_ring->queue_index);
763 ++tx_ring->tx_stats.restart_queue;
771 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
772 * @vsi: the VSI we care about
773 * @q_vector: the vector on which to force writeback
776 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
778 u16 flags = q_vector->tx.ring[0].flags;
780 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
783 if (q_vector->arm_wb_state)
786 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
789 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
790 vsi->base_vector - 1),
792 q_vector->arm_wb_state = true;
793 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
794 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
795 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
796 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
797 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
798 /* allow 00 to be written to the index */
801 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
802 vsi->base_vector - 1), val);
804 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
805 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
806 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
807 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
808 /* allow 00 to be written to the index */
810 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
815 * i40e_set_new_dynamic_itr - Find new ITR level
816 * @rc: structure containing ring performance data
818 * Returns true if ITR changed, false if not
820 * Stores a new ITR value based on packets and byte counts during
821 * the last interrupt. The advantage of per interrupt computation
822 * is faster updates and more accurate ITR for the current traffic
823 * pattern. Constants in this function were computed based on
824 * theoretical maximum wire speed and thresholds were set based on
825 * testing data as well as attempting to minimize response time
826 * while increasing bulk throughput.
828 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
830 enum i40e_latency_range new_latency_range = rc->latency_range;
831 struct i40e_q_vector *qv = rc->ring->q_vector;
832 u32 new_itr = rc->itr;
836 if (rc->total_packets == 0 || !rc->itr)
839 /* simple throttlerate management
840 * 0-10MB/s lowest (50000 ints/s)
841 * 10-20MB/s low (20000 ints/s)
842 * 20-1249MB/s bulk (18000 ints/s)
843 * > 40000 Rx packets per second (8000 ints/s)
845 * The math works out because the divisor is in 10^(-6) which
846 * turns the bytes/us input value into MB/s values, but
847 * make sure to use usecs, as the register values written
848 * are in 2 usec increments in the ITR registers, and make sure
849 * to use the smoothed values that the countdown timer gives us.
851 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
852 bytes_per_int = rc->total_bytes / usecs;
854 switch (new_latency_range) {
855 case I40E_LOWEST_LATENCY:
856 if (bytes_per_int > 10)
857 new_latency_range = I40E_LOW_LATENCY;
859 case I40E_LOW_LATENCY:
860 if (bytes_per_int > 20)
861 new_latency_range = I40E_BULK_LATENCY;
862 else if (bytes_per_int <= 10)
863 new_latency_range = I40E_LOWEST_LATENCY;
865 case I40E_BULK_LATENCY:
866 case I40E_ULTRA_LATENCY:
868 if (bytes_per_int <= 20)
869 new_latency_range = I40E_LOW_LATENCY;
873 /* this is to adjust RX more aggressively when streaming small
874 * packets. The value of 40000 was picked as it is just beyond
875 * what the hardware can receive per second if in low latency
878 #define RX_ULTRA_PACKET_RATE 40000
880 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
882 new_latency_range = I40E_ULTRA_LATENCY;
884 rc->latency_range = new_latency_range;
886 switch (new_latency_range) {
887 case I40E_LOWEST_LATENCY:
888 new_itr = I40E_ITR_50K;
890 case I40E_LOW_LATENCY:
891 new_itr = I40E_ITR_20K;
893 case I40E_BULK_LATENCY:
894 new_itr = I40E_ITR_18K;
896 case I40E_ULTRA_LATENCY:
897 new_itr = I40E_ITR_8K;
904 rc->total_packets = 0;
906 if (new_itr != rc->itr) {
915 * i40e_clean_programming_status - clean the programming status descriptor
916 * @rx_ring: the rx ring that has this descriptor
917 * @rx_desc: the rx descriptor written back by HW
919 * Flow director should handle FD_FILTER_STATUS to check its filter programming
920 * status being successful or not and take actions accordingly. FCoE should
921 * handle its context/filter programming/invalidation status and take actions.
924 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
925 union i40e_rx_desc *rx_desc)
930 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
931 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
932 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
934 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
935 i40e_fd_handle_status(rx_ring, rx_desc, id);
937 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
938 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
939 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
944 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
945 * @tx_ring: the tx ring to set up
947 * Return 0 on success, negative on error
949 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
951 struct device *dev = tx_ring->dev;
957 /* warn if we are about to overwrite the pointer */
958 WARN_ON(tx_ring->tx_bi);
959 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
960 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
964 /* round up to nearest 4K */
965 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
966 /* add u32 for head writeback, align after this takes care of
967 * guaranteeing this is at least one cache line in size
969 tx_ring->size += sizeof(u32);
970 tx_ring->size = ALIGN(tx_ring->size, 4096);
971 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
972 &tx_ring->dma, GFP_KERNEL);
973 if (!tx_ring->desc) {
974 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
979 tx_ring->next_to_use = 0;
980 tx_ring->next_to_clean = 0;
984 kfree(tx_ring->tx_bi);
985 tx_ring->tx_bi = NULL;
990 * i40e_clean_rx_ring - Free Rx buffers
991 * @rx_ring: ring to be cleaned
993 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
995 struct device *dev = rx_ring->dev;
996 struct i40e_rx_buffer *rx_bi;
997 unsigned long bi_size;
1000 /* ring already cleared, nothing to do */
1001 if (!rx_ring->rx_bi)
1004 if (ring_is_ps_enabled(rx_ring)) {
1005 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1007 rx_bi = &rx_ring->rx_bi[0];
1008 if (rx_bi->hdr_buf) {
1009 dma_free_coherent(dev,
1013 for (i = 0; i < rx_ring->count; i++) {
1014 rx_bi = &rx_ring->rx_bi[i];
1016 rx_bi->hdr_buf = NULL;
1020 /* Free all the Rx ring sk_buffs */
1021 for (i = 0; i < rx_ring->count; i++) {
1022 rx_bi = &rx_ring->rx_bi[i];
1024 dma_unmap_single(dev,
1026 rx_ring->rx_buf_len,
1031 dev_kfree_skb(rx_bi->skb);
1035 if (rx_bi->page_dma) {
1040 rx_bi->page_dma = 0;
1042 __free_page(rx_bi->page);
1044 rx_bi->page_offset = 0;
1048 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1049 memset(rx_ring->rx_bi, 0, bi_size);
1051 /* Zero out the descriptor ring */
1052 memset(rx_ring->desc, 0, rx_ring->size);
1054 rx_ring->next_to_clean = 0;
1055 rx_ring->next_to_use = 0;
1059 * i40e_free_rx_resources - Free Rx resources
1060 * @rx_ring: ring to clean the resources from
1062 * Free all receive software resources
1064 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1066 i40e_clean_rx_ring(rx_ring);
1067 kfree(rx_ring->rx_bi);
1068 rx_ring->rx_bi = NULL;
1070 if (rx_ring->desc) {
1071 dma_free_coherent(rx_ring->dev, rx_ring->size,
1072 rx_ring->desc, rx_ring->dma);
1073 rx_ring->desc = NULL;
1078 * i40e_alloc_rx_headers - allocate rx header buffers
1079 * @rx_ring: ring to alloc buffers
1081 * Allocate rx header buffers for the entire ring. As these are static,
1082 * this is only called when setting up a new ring.
1084 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1086 struct device *dev = rx_ring->dev;
1087 struct i40e_rx_buffer *rx_bi;
1093 if (rx_ring->rx_bi[0].hdr_buf)
1095 /* Make sure the buffers don't cross cache line boundaries. */
1096 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1097 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1101 for (i = 0; i < rx_ring->count; i++) {
1102 rx_bi = &rx_ring->rx_bi[i];
1103 rx_bi->dma = dma + (i * buf_size);
1104 rx_bi->hdr_buf = buffer + (i * buf_size);
1109 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1110 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1112 * Returns 0 on success, negative on failure
1114 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1116 struct device *dev = rx_ring->dev;
1119 /* warn if we are about to overwrite the pointer */
1120 WARN_ON(rx_ring->rx_bi);
1121 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1122 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1123 if (!rx_ring->rx_bi)
1126 u64_stats_init(&rx_ring->syncp);
1128 /* Round up to nearest 4K */
1129 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1130 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1131 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1132 rx_ring->size = ALIGN(rx_ring->size, 4096);
1133 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1134 &rx_ring->dma, GFP_KERNEL);
1136 if (!rx_ring->desc) {
1137 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1142 rx_ring->next_to_clean = 0;
1143 rx_ring->next_to_use = 0;
1147 kfree(rx_ring->rx_bi);
1148 rx_ring->rx_bi = NULL;
1153 * i40e_release_rx_desc - Store the new tail and head values
1154 * @rx_ring: ring to bump
1155 * @val: new head index
1157 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1159 rx_ring->next_to_use = val;
1160 /* Force memory writes to complete before letting h/w
1161 * know there are new descriptors to fetch. (Only
1162 * applicable for weak-ordered memory model archs,
1166 writel(val, rx_ring->tail);
1170 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1171 * @rx_ring: ring to place buffers on
1172 * @cleaned_count: number of buffers to replace
1174 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1176 u16 i = rx_ring->next_to_use;
1177 union i40e_rx_desc *rx_desc;
1178 struct i40e_rx_buffer *bi;
1180 /* do nothing if no valid netdev defined */
1181 if (!rx_ring->netdev || !cleaned_count)
1184 while (cleaned_count--) {
1185 rx_desc = I40E_RX_DESC(rx_ring, i);
1186 bi = &rx_ring->rx_bi[i];
1188 if (bi->skb) /* desc is in use */
1191 bi->page = alloc_page(GFP_ATOMIC);
1193 rx_ring->rx_stats.alloc_page_failed++;
1198 if (!bi->page_dma) {
1199 /* use a half page if we're re-using */
1200 bi->page_offset ^= PAGE_SIZE / 2;
1201 bi->page_dma = dma_map_page(rx_ring->dev,
1206 if (dma_mapping_error(rx_ring->dev,
1208 rx_ring->rx_stats.alloc_page_failed++;
1214 dma_sync_single_range_for_device(rx_ring->dev,
1217 rx_ring->rx_hdr_len,
1219 /* Refresh the desc even if buffer_addrs didn't change
1220 * because each write-back erases this info.
1222 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1223 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1225 if (i == rx_ring->count)
1230 if (rx_ring->next_to_use != i)
1231 i40e_release_rx_desc(rx_ring, i);
1235 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1236 * @rx_ring: ring to place buffers on
1237 * @cleaned_count: number of buffers to replace
1239 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1241 u16 i = rx_ring->next_to_use;
1242 union i40e_rx_desc *rx_desc;
1243 struct i40e_rx_buffer *bi;
1244 struct sk_buff *skb;
1246 /* do nothing if no valid netdev defined */
1247 if (!rx_ring->netdev || !cleaned_count)
1250 while (cleaned_count--) {
1251 rx_desc = I40E_RX_DESC(rx_ring, i);
1252 bi = &rx_ring->rx_bi[i];
1256 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1257 rx_ring->rx_buf_len);
1259 rx_ring->rx_stats.alloc_buff_failed++;
1262 /* initialize queue mapping */
1263 skb_record_rx_queue(skb, rx_ring->queue_index);
1268 bi->dma = dma_map_single(rx_ring->dev,
1270 rx_ring->rx_buf_len,
1272 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1273 rx_ring->rx_stats.alloc_buff_failed++;
1279 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1280 rx_desc->read.hdr_addr = 0;
1282 if (i == rx_ring->count)
1287 if (rx_ring->next_to_use != i)
1288 i40e_release_rx_desc(rx_ring, i);
1292 * i40e_receive_skb - Send a completed packet up the stack
1293 * @rx_ring: rx ring in play
1294 * @skb: packet to send up
1295 * @vlan_tag: vlan tag for packet
1297 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1298 struct sk_buff *skb, u16 vlan_tag)
1300 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1302 if (vlan_tag & VLAN_VID_MASK)
1303 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1305 napi_gro_receive(&q_vector->napi, skb);
1309 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1310 * @vsi: the VSI we care about
1311 * @skb: skb currently being received and modified
1312 * @rx_status: status value of last descriptor in packet
1313 * @rx_error: error value of last descriptor in packet
1314 * @rx_ptype: ptype value of last descriptor in packet
1316 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1317 struct sk_buff *skb,
1322 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1323 bool ipv4 = false, ipv6 = false;
1324 bool ipv4_tunnel, ipv6_tunnel;
1329 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1330 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1331 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1332 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1334 skb->ip_summed = CHECKSUM_NONE;
1336 /* Rx csum enabled and ip headers found? */
1337 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1340 /* did the hardware decode the packet and checksum? */
1341 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1344 /* both known and outer_ip must be set for the below code to work */
1345 if (!(decoded.known && decoded.outer_ip))
1348 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1349 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1351 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1352 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1356 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1357 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1360 /* likely incorrect csum if alternate IP extension headers found */
1362 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1363 /* don't increment checksum err here, non-fatal err */
1366 /* there was some L4 error, count error and punt packet to the stack */
1367 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1370 /* handle packets that were not able to be checksummed due
1371 * to arrival speed, in this case the stack can compute
1374 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1377 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1378 * it in the driver, hardware does not do it for us.
1379 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1380 * so the total length of IPv4 header is IHL*4 bytes
1381 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1383 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1385 skb->transport_header = skb->mac_header +
1386 sizeof(struct ethhdr) +
1387 (ip_hdr(skb)->ihl * 4);
1389 /* Add 4 bytes for VLAN tagged packets */
1390 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1391 skb->protocol == htons(ETH_P_8021AD))
1394 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1395 (udp_hdr(skb)->check != 0)) {
1396 rx_udp_csum = udp_csum(skb);
1398 csum = csum_tcpudp_magic(
1399 iph->saddr, iph->daddr,
1400 (skb->len - skb_transport_offset(skb)),
1401 IPPROTO_UDP, rx_udp_csum);
1403 if (udp_hdr(skb)->check != csum)
1406 } /* else its GRE and so no outer UDP header */
1409 skb->ip_summed = CHECKSUM_UNNECESSARY;
1410 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1415 vsi->back->hw_csum_rx_error++;
1419 * i40e_rx_hash - returns the hash value from the Rx descriptor
1420 * @ring: descriptor ring
1421 * @rx_desc: specific descriptor
1423 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1424 union i40e_rx_desc *rx_desc)
1426 const __le64 rss_mask =
1427 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1428 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1430 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1431 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1432 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1438 * i40e_ptype_to_hash - get a hash type
1439 * @ptype: the ptype value from the descriptor
1441 * Returns a hash type to be used by skb_set_hash
1443 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1445 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1448 return PKT_HASH_TYPE_NONE;
1450 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1451 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1452 return PKT_HASH_TYPE_L4;
1453 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1454 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1455 return PKT_HASH_TYPE_L3;
1457 return PKT_HASH_TYPE_L2;
1461 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1462 * @rx_ring: rx ring to clean
1463 * @budget: how many cleans we're allowed
1465 * Returns true if there's any budget left (e.g. the clean is finished)
1467 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1469 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1470 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1471 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1472 const int current_node = numa_mem_id();
1473 struct i40e_vsi *vsi = rx_ring->vsi;
1474 u16 i = rx_ring->next_to_clean;
1475 union i40e_rx_desc *rx_desc;
1476 u32 rx_error, rx_status;
1484 struct i40e_rx_buffer *rx_bi;
1485 struct sk_buff *skb;
1487 /* return some buffers to hardware, one at a time is too slow */
1488 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1489 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1493 i = rx_ring->next_to_clean;
1494 rx_desc = I40E_RX_DESC(rx_ring, i);
1495 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1496 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1497 I40E_RXD_QW1_STATUS_SHIFT;
1499 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1502 /* This memory barrier is needed to keep us from reading
1503 * any other fields out of the rx_desc until we know the
1507 if (i40e_rx_is_programming_status(qword)) {
1508 i40e_clean_programming_status(rx_ring, rx_desc);
1509 I40E_RX_INCREMENT(rx_ring, i);
1512 rx_bi = &rx_ring->rx_bi[i];
1515 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1516 rx_ring->rx_hdr_len);
1518 rx_ring->rx_stats.alloc_buff_failed++;
1522 /* initialize queue mapping */
1523 skb_record_rx_queue(skb, rx_ring->queue_index);
1524 /* we are reusing so sync this buffer for CPU use */
1525 dma_sync_single_range_for_cpu(rx_ring->dev,
1528 rx_ring->rx_hdr_len,
1531 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1532 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1533 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1534 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1535 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1536 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1538 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1539 I40E_RXD_QW1_ERROR_SHIFT;
1540 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1541 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1543 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1544 I40E_RXD_QW1_PTYPE_SHIFT;
1545 prefetch(rx_bi->page);
1548 if (rx_hbo || rx_sph) {
1552 len = I40E_RX_HDR_SIZE;
1554 len = rx_header_len;
1555 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1556 } else if (skb->len == 0) {
1559 len = (rx_packet_len > skb_headlen(skb) ?
1560 skb_headlen(skb) : rx_packet_len);
1561 memcpy(__skb_put(skb, len),
1562 rx_bi->page + rx_bi->page_offset,
1564 rx_bi->page_offset += len;
1565 rx_packet_len -= len;
1568 /* Get the rest of the data if this was a header split */
1569 if (rx_packet_len) {
1570 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1575 skb->len += rx_packet_len;
1576 skb->data_len += rx_packet_len;
1577 skb->truesize += rx_packet_len;
1579 if ((page_count(rx_bi->page) == 1) &&
1580 (page_to_nid(rx_bi->page) == current_node))
1581 get_page(rx_bi->page);
1585 dma_unmap_page(rx_ring->dev,
1589 rx_bi->page_dma = 0;
1591 I40E_RX_INCREMENT(rx_ring, i);
1594 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1595 struct i40e_rx_buffer *next_buffer;
1597 next_buffer = &rx_ring->rx_bi[i];
1598 next_buffer->skb = skb;
1599 rx_ring->rx_stats.non_eop_descs++;
1603 /* ERR_MASK will only have valid bits if EOP set */
1604 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1605 dev_kfree_skb_any(skb);
1609 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1610 i40e_ptype_to_hash(rx_ptype));
1611 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1612 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1613 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1614 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1615 rx_ring->last_rx_timestamp = jiffies;
1618 /* probably a little skewed due to removing CRC */
1619 total_rx_bytes += skb->len;
1622 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1624 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1626 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1627 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1630 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1631 dev_kfree_skb_any(skb);
1635 i40e_receive_skb(rx_ring, skb, vlan_tag);
1637 rx_desc->wb.qword1.status_error_len = 0;
1639 } while (likely(total_rx_packets < budget));
1641 u64_stats_update_begin(&rx_ring->syncp);
1642 rx_ring->stats.packets += total_rx_packets;
1643 rx_ring->stats.bytes += total_rx_bytes;
1644 u64_stats_update_end(&rx_ring->syncp);
1645 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1646 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1648 return total_rx_packets;
1652 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1653 * @rx_ring: rx ring to clean
1654 * @budget: how many cleans we're allowed
1656 * Returns number of packets cleaned
1658 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1660 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1661 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1662 struct i40e_vsi *vsi = rx_ring->vsi;
1663 union i40e_rx_desc *rx_desc;
1664 u32 rx_error, rx_status;
1671 struct i40e_rx_buffer *rx_bi;
1672 struct sk_buff *skb;
1674 /* return some buffers to hardware, one at a time is too slow */
1675 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1676 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1680 i = rx_ring->next_to_clean;
1681 rx_desc = I40E_RX_DESC(rx_ring, i);
1682 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1683 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1684 I40E_RXD_QW1_STATUS_SHIFT;
1686 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1689 /* This memory barrier is needed to keep us from reading
1690 * any other fields out of the rx_desc until we know the
1695 if (i40e_rx_is_programming_status(qword)) {
1696 i40e_clean_programming_status(rx_ring, rx_desc);
1697 I40E_RX_INCREMENT(rx_ring, i);
1700 rx_bi = &rx_ring->rx_bi[i];
1702 prefetch(skb->data);
1704 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1705 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1707 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1708 I40E_RXD_QW1_ERROR_SHIFT;
1709 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1711 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1712 I40E_RXD_QW1_PTYPE_SHIFT;
1716 /* Get the header and possibly the whole packet
1717 * If this is an skb from previous receive dma will be 0
1719 skb_put(skb, rx_packet_len);
1720 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1724 I40E_RX_INCREMENT(rx_ring, i);
1727 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1728 rx_ring->rx_stats.non_eop_descs++;
1732 /* ERR_MASK will only have valid bits if EOP set */
1733 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1734 dev_kfree_skb_any(skb);
1738 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1739 i40e_ptype_to_hash(rx_ptype));
1740 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1741 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1742 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1743 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1744 rx_ring->last_rx_timestamp = jiffies;
1747 /* probably a little skewed due to removing CRC */
1748 total_rx_bytes += skb->len;
1751 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1753 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1755 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1756 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1759 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1760 dev_kfree_skb_any(skb);
1764 i40e_receive_skb(rx_ring, skb, vlan_tag);
1766 rx_desc->wb.qword1.status_error_len = 0;
1767 } while (likely(total_rx_packets < budget));
1769 u64_stats_update_begin(&rx_ring->syncp);
1770 rx_ring->stats.packets += total_rx_packets;
1771 rx_ring->stats.bytes += total_rx_bytes;
1772 u64_stats_update_end(&rx_ring->syncp);
1773 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1774 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1776 return total_rx_packets;
1779 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1783 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1784 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1785 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1786 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1791 /* a small macro to shorten up some long lines */
1792 #define INTREG I40E_PFINT_DYN_CTLN
1795 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1796 * @vsi: the VSI we care about
1797 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1800 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1801 struct i40e_q_vector *q_vector)
1803 struct i40e_hw *hw = &vsi->back->hw;
1804 bool rx = false, tx = false;
1808 vector = (q_vector->v_idx + vsi->base_vector);
1810 /* avoid dynamic calculation if in countdown mode OR if
1811 * all dynamic is disabled
1813 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1815 if (q_vector->itr_countdown > 0 ||
1816 (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1817 !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1821 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1822 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1823 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1826 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1827 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1828 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1832 /* get the higher of the two ITR adjustments and
1833 * use the same value for both ITR registers
1834 * when in adaptive mode (Rx and/or Tx)
1836 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1838 q_vector->tx.itr = q_vector->rx.itr = itr;
1839 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1841 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1845 /* only need to enable the interrupt once, but need
1846 * to possibly update both ITR values
1849 /* set the INTENA_MSK_MASK so that this first write
1850 * won't actually enable the interrupt, instead just
1851 * updating the ITR (it's bit 31 PF and VF)
1854 /* don't check _DOWN because interrupt isn't being enabled */
1855 wr32(hw, INTREG(vector - 1), rxval);
1859 if (!test_bit(__I40E_DOWN, &vsi->state))
1860 wr32(hw, INTREG(vector - 1), txval);
1862 if (q_vector->itr_countdown)
1863 q_vector->itr_countdown--;
1865 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1870 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1871 * @napi: napi struct with our devices info in it
1872 * @budget: amount of work driver is allowed to do this pass, in packets
1874 * This function will clean all queues associated with a q_vector.
1876 * Returns the amount of work done
1878 int i40e_napi_poll(struct napi_struct *napi, int budget)
1880 struct i40e_q_vector *q_vector =
1881 container_of(napi, struct i40e_q_vector, napi);
1882 struct i40e_vsi *vsi = q_vector->vsi;
1883 struct i40e_ring *ring;
1884 bool clean_complete = true;
1885 bool arm_wb = false;
1886 int budget_per_ring;
1889 if (test_bit(__I40E_DOWN, &vsi->state)) {
1890 napi_complete(napi);
1894 /* Since the actual Tx work is minimal, we can give the Tx a larger
1895 * budget and be more aggressive about cleaning up the Tx descriptors.
1897 i40e_for_each_ring(ring, q_vector->tx) {
1898 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1899 arm_wb |= ring->arm_wb;
1900 ring->arm_wb = false;
1903 /* Handle case where we are called by netpoll with a budget of 0 */
1907 /* We attempt to distribute budget to each Rx queue fairly, but don't
1908 * allow the budget to go below 1 because that would exit polling early.
1910 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1912 i40e_for_each_ring(ring, q_vector->rx) {
1915 if (ring_is_ps_enabled(ring))
1916 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1918 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1920 work_done += cleaned;
1921 /* if we didn't clean as many as budgeted, we must be done */
1922 clean_complete &= (budget_per_ring != cleaned);
1925 /* If work not completed, return budget and polling will return */
1926 if (!clean_complete) {
1929 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1930 i40e_force_wb(vsi, q_vector);
1935 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1936 q_vector->arm_wb_state = false;
1938 /* Work is done so exit the polling mode and re-enable the interrupt */
1939 napi_complete_done(napi, work_done);
1940 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1941 i40e_update_enable_itr(vsi, q_vector);
1942 } else { /* Legacy mode */
1943 struct i40e_hw *hw = &vsi->back->hw;
1944 /* We re-enable the queue 0 cause, but
1945 * don't worry about dynamic_enable
1946 * because we left it on for the other
1947 * possible interrupts during napi
1949 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1950 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1952 wr32(hw, I40E_QINT_RQCTL(0), qval);
1953 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1954 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1955 wr32(hw, I40E_QINT_TQCTL(0), qval);
1956 i40e_irq_dynamic_enable_icr0(vsi->back);
1962 * i40e_atr - Add a Flow Director ATR filter
1963 * @tx_ring: ring to add programming descriptor to
1965 * @tx_flags: send tx flags
1966 * @protocol: wire protocol
1968 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1969 u32 tx_flags, __be16 protocol)
1971 struct i40e_filter_program_desc *fdir_desc;
1972 struct i40e_pf *pf = tx_ring->vsi->back;
1974 unsigned char *network;
1976 struct ipv6hdr *ipv6;
1980 u32 flex_ptype, dtype_cmd;
1983 /* make sure ATR is enabled */
1984 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1987 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1990 /* if sampling is disabled do nothing */
1991 if (!tx_ring->atr_sample_rate)
1994 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1997 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1998 /* snag network header to get L4 type and address */
1999 hdr.network = skb_network_header(skb);
2001 /* Currently only IPv4/IPv6 with TCP is supported
2002 * access ihl as u8 to avoid unaligned access on ia64
2004 if (tx_flags & I40E_TX_FLAGS_IPV4)
2005 hlen = (hdr.network[0] & 0x0F) << 2;
2006 else if (protocol == htons(ETH_P_IPV6))
2007 hlen = sizeof(struct ipv6hdr);
2011 hdr.network = skb_inner_network_header(skb);
2012 hlen = skb_inner_network_header_len(skb);
2015 /* Currently only IPv4/IPv6 with TCP is supported
2016 * Note: tx_flags gets modified to reflect inner protocols in
2017 * tx_enable_csum function if encap is enabled.
2019 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2020 (hdr.ipv4->protocol != IPPROTO_TCP))
2022 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2023 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2026 th = (struct tcphdr *)(hdr.network + hlen);
2028 /* Due to lack of space, no more new filters can be programmed */
2029 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2031 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2032 /* HW ATR eviction will take care of removing filters on FIN
2035 if (th->fin || th->rst)
2039 tx_ring->atr_count++;
2041 /* sample on all syn/fin/rst packets or once every atr sample rate */
2045 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2048 tx_ring->atr_count = 0;
2050 /* grab the next descriptor */
2051 i = tx_ring->next_to_use;
2052 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2055 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2057 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2058 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2059 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2060 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2061 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2062 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2063 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2065 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2067 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2069 dtype_cmd |= (th->fin || th->rst) ?
2070 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2071 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2072 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2073 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2075 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2076 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2078 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2079 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2081 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2082 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2084 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2085 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2086 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2089 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2090 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2091 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2093 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2094 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2096 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2097 fdir_desc->rsvd = cpu_to_le32(0);
2098 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2099 fdir_desc->fd_id = cpu_to_le32(0);
2103 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2105 * @tx_ring: ring to send buffer on
2106 * @flags: the tx flags to be set
2108 * Checks the skb and set up correspondingly several generic transmit flags
2109 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2111 * Returns error code indicate the frame should be dropped upon error and the
2112 * otherwise returns 0 to indicate the flags has been set properly.
2115 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2116 struct i40e_ring *tx_ring,
2119 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2120 struct i40e_ring *tx_ring,
2124 __be16 protocol = skb->protocol;
2127 if (protocol == htons(ETH_P_8021Q) &&
2128 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2129 /* When HW VLAN acceleration is turned off by the user the
2130 * stack sets the protocol to 8021q so that the driver
2131 * can take any steps required to support the SW only
2132 * VLAN handling. In our case the driver doesn't need
2133 * to take any further steps so just set the protocol
2134 * to the encapsulated ethertype.
2136 skb->protocol = vlan_get_protocol(skb);
2140 /* if we have a HW VLAN tag being added, default to the HW one */
2141 if (skb_vlan_tag_present(skb)) {
2142 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2143 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2144 /* else if it is a SW VLAN, check the next protocol and store the tag */
2145 } else if (protocol == htons(ETH_P_8021Q)) {
2146 struct vlan_hdr *vhdr, _vhdr;
2148 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2152 protocol = vhdr->h_vlan_encapsulated_proto;
2153 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2154 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2157 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2160 /* Insert 802.1p priority into VLAN header */
2161 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2162 (skb->priority != TC_PRIO_CONTROL)) {
2163 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2164 tx_flags |= (skb->priority & 0x7) <<
2165 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2166 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2167 struct vlan_ethhdr *vhdr;
2170 rc = skb_cow_head(skb, 0);
2173 vhdr = (struct vlan_ethhdr *)skb->data;
2174 vhdr->h_vlan_TCI = htons(tx_flags >>
2175 I40E_TX_FLAGS_VLAN_SHIFT);
2177 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2187 * i40e_tso - set up the tso context descriptor
2188 * @tx_ring: ptr to the ring to send
2189 * @skb: ptr to the skb we're sending
2190 * @hdr_len: ptr to the size of the packet header
2191 * @cd_type_cmd_tso_mss: Quad Word 1
2193 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2195 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2196 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2198 u32 cd_cmd, cd_tso_len, cd_mss;
2199 struct ipv6hdr *ipv6h;
2200 struct tcphdr *tcph;
2205 if (!skb_is_gso(skb))
2208 err = skb_cow_head(skb, 0);
2212 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2213 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2215 if (iph->version == 4) {
2216 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2219 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2221 } else if (ipv6h->version == 6) {
2222 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2223 ipv6h->payload_len = 0;
2224 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2228 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2229 *hdr_len = (skb->encapsulation
2230 ? (skb_inner_transport_header(skb) - skb->data)
2231 : skb_transport_offset(skb)) + l4len;
2233 /* find the field values */
2234 cd_cmd = I40E_TX_CTX_DESC_TSO;
2235 cd_tso_len = skb->len - *hdr_len;
2236 cd_mss = skb_shinfo(skb)->gso_size;
2237 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2239 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2240 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2245 * i40e_tsyn - set up the tsyn context descriptor
2246 * @tx_ring: ptr to the ring to send
2247 * @skb: ptr to the skb we're sending
2248 * @tx_flags: the collected send information
2249 * @cd_type_cmd_tso_mss: Quad Word 1
2251 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2253 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2254 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2258 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2261 /* Tx timestamps cannot be sampled when doing TSO */
2262 if (tx_flags & I40E_TX_FLAGS_TSO)
2265 /* only timestamp the outbound packet if the user has requested it and
2266 * we are not already transmitting a packet to be timestamped
2268 pf = i40e_netdev_to_pf(tx_ring->netdev);
2269 if (!(pf->flags & I40E_FLAG_PTP))
2273 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2274 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2275 pf->ptp_tx_skb = skb_get(skb);
2280 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2281 I40E_TXD_CTX_QW1_CMD_SHIFT;
2287 * i40e_tx_enable_csum - Enable Tx checksum offloads
2289 * @tx_flags: pointer to Tx flags currently set
2290 * @td_cmd: Tx descriptor command bits to set
2291 * @td_offset: Tx descriptor header offsets to set
2292 * @tx_ring: Tx descriptor ring
2293 * @cd_tunneling: ptr to context desc bits
2295 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2296 u32 *td_cmd, u32 *td_offset,
2297 struct i40e_ring *tx_ring,
2300 struct ipv6hdr *this_ipv6_hdr;
2301 unsigned int this_tcp_hdrlen;
2302 struct iphdr *this_ip_hdr;
2303 u32 network_hdr_len;
2305 struct udphdr *oudph;
2309 if (skb->encapsulation) {
2310 switch (ip_hdr(skb)->protocol) {
2312 oudph = udp_hdr(skb);
2314 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2315 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2318 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2323 network_hdr_len = skb_inner_network_header_len(skb);
2324 this_ip_hdr = inner_ip_hdr(skb);
2325 this_ipv6_hdr = inner_ipv6_hdr(skb);
2326 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2328 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2329 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2330 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2331 ip_hdr(skb)->check = 0;
2334 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2336 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2337 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2338 if (*tx_flags & I40E_TX_FLAGS_TSO)
2339 ip_hdr(skb)->check = 0;
2342 /* Now set the ctx descriptor fields */
2343 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2344 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2346 ((skb_inner_network_offset(skb) -
2347 skb_transport_offset(skb)) >> 1) <<
2348 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2349 if (this_ip_hdr->version == 6) {
2350 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2351 *tx_flags |= I40E_TX_FLAGS_IPV6;
2353 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2354 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2355 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2356 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2358 (skb->len - skb_transport_offset(skb)),
2360 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2363 network_hdr_len = skb_network_header_len(skb);
2364 this_ip_hdr = ip_hdr(skb);
2365 this_ipv6_hdr = ipv6_hdr(skb);
2366 this_tcp_hdrlen = tcp_hdrlen(skb);
2369 /* Enable IP checksum offloads */
2370 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2371 l4_hdr = this_ip_hdr->protocol;
2372 /* the stack computes the IP header already, the only time we
2373 * need the hardware to recompute it is in the case of TSO.
2375 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2376 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2377 this_ip_hdr->check = 0;
2379 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2381 /* Now set the td_offset for IP header length */
2382 *td_offset = (network_hdr_len >> 2) <<
2383 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2384 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2385 l4_hdr = this_ipv6_hdr->nexthdr;
2386 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2387 /* Now set the td_offset for IP header length */
2388 *td_offset = (network_hdr_len >> 2) <<
2389 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2391 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2392 *td_offset |= (skb_network_offset(skb) >> 1) <<
2393 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2395 /* Enable L4 checksum offloads */
2398 /* enable checksum offloads */
2399 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2400 *td_offset |= (this_tcp_hdrlen >> 2) <<
2401 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2404 /* enable SCTP checksum offload */
2405 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2406 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2407 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2410 /* enable UDP checksum offload */
2411 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2412 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2413 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2421 * i40e_create_tx_ctx Build the Tx context descriptor
2422 * @tx_ring: ring to create the descriptor on
2423 * @cd_type_cmd_tso_mss: Quad Word 1
2424 * @cd_tunneling: Quad Word 0 - bits 0-31
2425 * @cd_l2tag2: Quad Word 0 - bits 32-63
2427 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2428 const u64 cd_type_cmd_tso_mss,
2429 const u32 cd_tunneling, const u32 cd_l2tag2)
2431 struct i40e_tx_context_desc *context_desc;
2432 int i = tx_ring->next_to_use;
2434 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2435 !cd_tunneling && !cd_l2tag2)
2438 /* grab the next descriptor */
2439 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2442 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2444 /* cpu_to_le32 and assign to struct fields */
2445 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2446 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2447 context_desc->rsvd = cpu_to_le16(0);
2448 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2452 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2453 * @tx_ring: the ring to be checked
2454 * @size: the size buffer we want to assure is available
2456 * Returns -EBUSY if a stop is needed, else 0
2458 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2460 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2461 /* Memory barrier before checking head and tail */
2464 /* Check again in a case another CPU has just made room available. */
2465 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2468 /* A reprieve! - use start_queue because it doesn't call schedule */
2469 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2470 ++tx_ring->tx_stats.restart_queue;
2475 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2476 * @tx_ring: the ring to be checked
2477 * @size: the size buffer we want to assure is available
2479 * Returns 0 if stop is not needed
2482 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2484 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2487 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2489 return __i40e_maybe_stop_tx(tx_ring, size);
2493 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2495 * @tx_flags: collected send information
2497 * Note: Our HW can't scatter-gather more than 8 fragments to build
2498 * a packet on the wire and so we need to figure out the cases where we
2499 * need to linearize the skb.
2501 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2503 struct skb_frag_struct *frag;
2504 bool linearize = false;
2505 unsigned int size = 0;
2509 num_frags = skb_shinfo(skb)->nr_frags;
2510 gso_segs = skb_shinfo(skb)->gso_segs;
2512 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2515 if (num_frags < (I40E_MAX_BUFFER_TXD))
2516 goto linearize_chk_done;
2517 /* try the simple math, if we have too many frags per segment */
2518 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2519 I40E_MAX_BUFFER_TXD) {
2521 goto linearize_chk_done;
2523 frag = &skb_shinfo(skb)->frags[0];
2524 /* we might still have more fragments per segment */
2526 size += skb_frag_size(frag);
2528 if ((size >= skb_shinfo(skb)->gso_size) &&
2529 (j < I40E_MAX_BUFFER_TXD)) {
2530 size = (size % skb_shinfo(skb)->gso_size);
2533 if (j == I40E_MAX_BUFFER_TXD) {
2538 } while (num_frags);
2540 if (num_frags >= I40E_MAX_BUFFER_TXD)
2549 * i40e_tx_map - Build the Tx descriptor
2550 * @tx_ring: ring to send buffer on
2552 * @first: first buffer info buffer to use
2553 * @tx_flags: collected send information
2554 * @hdr_len: size of the packet header
2555 * @td_cmd: the command field in the descriptor
2556 * @td_offset: offset for checksum or crc
2559 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2560 struct i40e_tx_buffer *first, u32 tx_flags,
2561 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2563 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2564 struct i40e_tx_buffer *first, u32 tx_flags,
2565 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2568 unsigned int data_len = skb->data_len;
2569 unsigned int size = skb_headlen(skb);
2570 struct skb_frag_struct *frag;
2571 struct i40e_tx_buffer *tx_bi;
2572 struct i40e_tx_desc *tx_desc;
2573 u16 i = tx_ring->next_to_use;
2578 bool tail_bump = true;
2581 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2582 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2583 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2584 I40E_TX_FLAGS_VLAN_SHIFT;
2587 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2588 gso_segs = skb_shinfo(skb)->gso_segs;
2592 /* multiply data chunks by size of headers */
2593 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2594 first->gso_segs = gso_segs;
2596 first->tx_flags = tx_flags;
2598 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2600 tx_desc = I40E_TX_DESC(tx_ring, i);
2603 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2604 if (dma_mapping_error(tx_ring->dev, dma))
2607 /* record length, and DMA address */
2608 dma_unmap_len_set(tx_bi, len, size);
2609 dma_unmap_addr_set(tx_bi, dma, dma);
2611 tx_desc->buffer_addr = cpu_to_le64(dma);
2613 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2614 tx_desc->cmd_type_offset_bsz =
2615 build_ctob(td_cmd, td_offset,
2616 I40E_MAX_DATA_PER_TXD, td_tag);
2622 if (i == tx_ring->count) {
2623 tx_desc = I40E_TX_DESC(tx_ring, 0);
2627 dma += I40E_MAX_DATA_PER_TXD;
2628 size -= I40E_MAX_DATA_PER_TXD;
2630 tx_desc->buffer_addr = cpu_to_le64(dma);
2633 if (likely(!data_len))
2636 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2643 if (i == tx_ring->count) {
2644 tx_desc = I40E_TX_DESC(tx_ring, 0);
2648 size = skb_frag_size(frag);
2651 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2654 tx_bi = &tx_ring->tx_bi[i];
2657 /* set next_to_watch value indicating a packet is present */
2658 first->next_to_watch = tx_desc;
2661 if (i == tx_ring->count)
2664 tx_ring->next_to_use = i;
2666 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2667 tx_ring->queue_index),
2669 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2671 /* Algorithm to optimize tail and RS bit setting:
2672 * if xmit_more is supported
2673 * if xmit_more is true
2674 * do not update tail and do not mark RS bit.
2675 * if xmit_more is false and last xmit_more was false
2676 * if every packet spanned less than 4 desc
2677 * then set RS bit on 4th packet and update tail
2680 * update tail and set RS bit on every packet.
2681 * if xmit_more is false and last_xmit_more was true
2682 * update tail and set RS bit.
2684 * Optimization: wmb to be issued only in case of tail update.
2685 * Also optimize the Descriptor WB path for RS bit with the same
2688 * Note: If there are less than 4 packets
2689 * pending and interrupts were disabled the service task will
2690 * trigger a force WB.
2692 if (skb->xmit_more &&
2693 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2694 tx_ring->queue_index))) {
2695 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2697 } else if (!skb->xmit_more &&
2698 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2699 tx_ring->queue_index)) &&
2700 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2701 (tx_ring->packet_stride < WB_STRIDE) &&
2702 (desc_count < WB_STRIDE)) {
2703 tx_ring->packet_stride++;
2705 tx_ring->packet_stride = 0;
2706 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2710 tx_ring->packet_stride = 0;
2712 tx_desc->cmd_type_offset_bsz =
2713 build_ctob(td_cmd, td_offset, size, td_tag) |
2714 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2715 I40E_TX_DESC_CMD_EOP) <<
2716 I40E_TXD_QW1_CMD_SHIFT);
2718 /* notify HW of packet */
2720 prefetchw(tx_desc + 1);
2723 /* Force memory writes to complete before letting h/w
2724 * know there are new descriptors to fetch. (Only
2725 * applicable for weak-ordered memory model archs,
2729 writel(i, tx_ring->tail);
2735 dev_info(tx_ring->dev, "TX DMA map failed\n");
2737 /* clear dma mappings for failed tx_bi map */
2739 tx_bi = &tx_ring->tx_bi[i];
2740 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2748 tx_ring->next_to_use = i;
2752 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2754 * @tx_ring: ring to send buffer on
2756 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2757 * there is not enough descriptors available in this ring since we need at least
2761 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2762 struct i40e_ring *tx_ring)
2764 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2765 struct i40e_ring *tx_ring)
2771 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2772 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2773 * + 4 desc gap to avoid the cache line where head is,
2774 * + 1 desc for context descriptor,
2775 * otherwise try next time
2777 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2778 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2780 count += TXD_USE_COUNT(skb_headlen(skb));
2781 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2782 tx_ring->tx_stats.tx_busy++;
2789 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2791 * @tx_ring: ring to send buffer on
2793 * Returns NETDEV_TX_OK if sent, else an error code
2795 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2796 struct i40e_ring *tx_ring)
2798 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2799 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2800 struct i40e_tx_buffer *first;
2809 /* prefetch the data, we'll need it later */
2810 prefetch(skb->data);
2812 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2813 return NETDEV_TX_BUSY;
2815 /* prepare the xmit flags */
2816 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2819 /* obtain protocol of skb */
2820 protocol = vlan_get_protocol(skb);
2822 /* record the location of the first descriptor for this packet */
2823 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2825 /* setup IPv4/IPv6 offloads */
2826 if (protocol == htons(ETH_P_IP))
2827 tx_flags |= I40E_TX_FLAGS_IPV4;
2828 else if (protocol == htons(ETH_P_IPV6))
2829 tx_flags |= I40E_TX_FLAGS_IPV6;
2831 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
2836 tx_flags |= I40E_TX_FLAGS_TSO;
2838 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2841 tx_flags |= I40E_TX_FLAGS_TSYN;
2843 if (i40e_chk_linearize(skb, tx_flags)) {
2844 if (skb_linearize(skb))
2846 tx_ring->tx_stats.tx_linearize++;
2848 skb_tx_timestamp(skb);
2850 /* always enable CRC insertion offload */
2851 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2853 /* Always offload the checksum, since it's in the data descriptor */
2854 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2855 tx_flags |= I40E_TX_FLAGS_CSUM;
2857 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2858 tx_ring, &cd_tunneling);
2861 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2862 cd_tunneling, cd_l2tag2);
2864 /* Add Flow Director ATR if it's enabled.
2866 * NOTE: this must always be directly before the data descriptor.
2868 i40e_atr(tx_ring, skb, tx_flags, protocol);
2870 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2873 return NETDEV_TX_OK;
2876 dev_kfree_skb_any(skb);
2877 return NETDEV_TX_OK;
2881 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2883 * @netdev: network interface device structure
2885 * Returns NETDEV_TX_OK if sent, else an error code
2887 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2889 struct i40e_netdev_priv *np = netdev_priv(netdev);
2890 struct i40e_vsi *vsi = np->vsi;
2891 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2893 /* hardware can't handle really short frames, hardware padding works
2896 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2897 return NETDEV_TX_OK;
2899 return i40e_xmit_frame_ring(skb, tx_ring);