+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+ struct sk_buff *skb;
+ u32 consumed_bytes;
+ u32 head_offset;
+ u32 frag_offset;
+ u32 wqe_offset;
+ u32 page_idx;
+ u16 byte_cnt;
+ u16 cqe_bcnt;
+ u16 headlen;
+ int i;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ rq->stats.mpwqe_filler++;
+ goto mpwrq_cqe_out;
+ }
+
+ skb = netdev_alloc_skb(rq->netdev,
+ ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+ sizeof(long)));
+ if (unlikely(!skb))
+ goto mpwrq_cqe_out;
+
+ prefetch(skb->data);
+ wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE;
+ consumed_bytes = cstrides * MLX5_MPWRQ_STRIDE_SIZE;
+ dma_sync_single_for_cpu(rq->pdev, wi->dma_info.addr + wqe_offset,
+ consumed_bytes, DMA_FROM_DEVICE);
+
+ head_offset = wqe_offset & (PAGE_SIZE - 1);
+ page_idx = wqe_offset >> PAGE_SHIFT;
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+ headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+ frag_offset = head_offset + headlen;
+
+ byte_cnt = cqe_bcnt - headlen;
+ while (byte_cnt) {
+ u32 pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ unsigned int truesize =
+ ALIGN(pg_consumed_bytes, MLX5_MPWRQ_STRIDE_SIZE);
+
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ &wi->dma_info.page[page_idx], frag_offset,
+ pg_consumed_bytes, truesize);
+ byte_cnt -= pg_consumed_bytes;
+ frag_offset = 0;
+ page_idx++;
+ }
+
+ skb_copy_to_linear_data(skb,
+ page_address(wi->dma_info.page) + wqe_offset,
+ ALIGN(headlen, sizeof(long)));
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += headlen;
+ skb->len += headlen;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES))
+ return;
+
+ dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+ PCI_DMA_FROMDEVICE);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i],
+ &wi->dma_info.page[i]._count);
+ put_page(&wi->dma_info.page[i]);
+ }
+ mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
+