volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
static __rte_always_inline void
volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
static __rte_always_inline void
-mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
+ const unsigned int strd_n);
-mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
+ const unsigned int strd_n)
wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
/* If there's only one MR, no need to replace LKey in WQE. */
if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
/* If there's only one MR, no need to replace LKey in WQE. */
if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
if (consumed_strd == strd_n) {
/* Replace WQE only if the buffer is still in use. */
if (rte_atomic16_read(&buf->refcnt) > 1) {
if (consumed_strd == strd_n) {
/* Replace WQE only if the buffer is still in use. */
if (rte_atomic16_read(&buf->refcnt) > 1) {
- shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
- &buf_len, mlx5_mprq_buf_free_cb, buf);
+ shinfo = &buf->shinfos[strd_idx];
+ rte_mbuf_ext_refcnt_set(shinfo, 1);
/*
* EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
* attaching the stride to mbuf and more offload flags
/*
* EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
* attaching the stride to mbuf and more offload flags