unsigned int i = 0;
uint32_t rq_ci = rxq->rq_ci;
uint16_t consumed_strd = rxq->consumed_strd;
+ uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
while (i < pkts_n) {
len -= RTE_ETHER_CRC_LEN;
offset = strd_idx * strd_sz + strd_shift;
addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
- /* Initialize the offload flag. */
- pkt->ol_flags = 0;
/*
* Memcpy packets to the target mbuf if:
* - The size of packet is smaller than mprq_max_memcpy_len.
continue;
}
rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
+ DATA_LEN(pkt) = len;
} else {
rte_iova_t buf_iova;
struct rte_mbuf_ext_shared_info *shinfo;
rte_atomic16_add_return(&buf->refcnt, 1);
assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
strd_n + 1);
- buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
+ buf_addr = RTE_PTR_SUB(addr, headroom_sz);
/*
* MLX5 device doesn't use iova but it is necessary in a
* case where the Rx packet is transmitted via a
*/
rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
buf_len, shinfo);
- rte_pktmbuf_reset_headroom(pkt);
+ /* Set mbuf head-room. */
+ pkt->data_off = headroom_sz;
assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
/*
* Prevent potential overflow due to MTU change through
++rxq->stats.idropped;
continue;
}
+ DATA_LEN(pkt) = len;
+ /*
+ * LRO packet may consume all the stride memory, in this
+ * case packet head-room space is not guaranteed so must
+ * to add an empty mbuf for the head-room.
+ */
+ if (!rxq->strd_headroom_en) {
+ struct rte_mbuf *headroom_mbuf =
+ rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(headroom_mbuf == NULL)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ PORT(pkt) = rxq->port_id;
+ NEXT(headroom_mbuf) = pkt;
+ pkt = headroom_mbuf;
+ NB_SEGS(pkt) = 2;
+ }
}
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (lro_num_seg > 1) {
pkt->tso_segsz = strd_sz;
}
PKT_LEN(pkt) = len;
- DATA_LEN(pkt) = len;
PORT(pkt) = rxq->port_id;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment bytes counter. */