1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RX_H_
7 #define RTE_PMD_MLX5_RX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
22 /* Support tunnel matching. */
23 #define MLX5_FLOW_TUNNEL 10
25 #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
26 #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))
27 #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))
29 /* First entry must be NULL for comparison. */
30 #define mlx5_mr_btree_len(bt) ((bt)->len - 1)
32 struct mlx5_rxq_stats {
33 #ifdef MLX5_PMD_SOFT_COUNTERS
34 uint64_t ipackets; /**< Total of successfully received packets. */
35 uint64_t ibytes; /**< Total of successfully received bytes. */
37 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
38 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
41 /* Compressed CQE context. */
43 uint16_t ai; /* Array index. */
44 uint16_t ca; /* Current array index. */
45 uint16_t na; /* Next array index. */
46 uint16_t cq_ci; /* The next CQE. */
47 uint32_t cqe_cnt; /* Number of CQEs. */
50 /* Get pointer to the first stride. */
51 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
52 sizeof(struct mlx5_mprq_buf) + \
54 sizeof(struct rte_mbuf_ext_shared_info) + \
55 RTE_PKTMBUF_HEADROOM))
57 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
58 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
60 enum mlx5_rxq_err_state {
61 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
62 MLX5_RXQ_ERR_STATE_NEED_RESET,
63 MLX5_RXQ_ERR_STATE_NEED_READY,
67 MLX5_RXQ_CODE_EXIT = 0,
69 MLX5_RXQ_CODE_DROPPED,
72 struct mlx5_eth_rxseg {
73 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
74 uint16_t length; /**< Segment data length, configures split point. */
75 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
76 uint32_t reserved; /**< Reserved field. */
79 /* RX queue descriptor. */
80 struct mlx5_rxq_data {
81 unsigned int csum:1; /* Enable checksum offloading. */
82 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
83 unsigned int rt_timestamp:1; /* Realtime timestamp format. */
84 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
85 unsigned int crc_present:1; /* CRC must be subtracted. */
86 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
87 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
88 unsigned int elts_n:4; /* Log 2 of Mbufs. */
89 unsigned int rss_hash:1; /* RSS hash result is enabled. */
90 unsigned int mark:1; /* Marked flow available on the queue. */
91 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
92 unsigned int strd_sz_n:4; /* Log 2 of stride size. */
93 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
94 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
95 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
96 unsigned int lro:1; /* Enable LRO. */
97 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
98 unsigned int mcqe_format:3; /* CQE compression format. */
99 unsigned int shared:1; /* Shared RXQ. */
100 unsigned int delay_drop:1; /* Enable delay drop. */
101 volatile uint32_t *rq_db;
102 volatile uint32_t *cq_db;
106 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
109 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
112 struct rxq_zip zip; /* Compressed context. */
113 uint16_t decompressed;
114 /* Number of ready mbufs decompressed from the CQ. */
116 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
117 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
119 volatile struct mlx5_cqe(*cqes)[];
120 struct rte_mbuf *(*elts)[];
121 struct mlx5_mprq_buf *(*mprq_bufs)[];
122 struct rte_mempool *mp;
123 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
124 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
125 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
126 uint16_t idx; /* Queue index. */
127 struct mlx5_rxq_stats stats;
128 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
129 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
130 struct mlx5_uar_data uar_data; /* CQ doorbell. */
131 uint32_t cqn; /* CQ number. */
132 uint8_t cq_arm_sn; /* CQ arm seq number. */
133 uint32_t tunnel; /* Tunnel information. */
134 int timestamp_offset; /* Dynamic mbuf field for timestamp. */
135 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
136 uint64_t flow_meta_mask;
137 int32_t flow_meta_offset;
138 uint32_t flow_meta_port_mask;
139 uint32_t rxseg_n; /* Number of split segment descriptions. */
140 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
141 /* Buffer split segment descriptions - sizes, offsets, pools. */
142 } __rte_cache_aligned;
145 MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
146 MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
147 MLX5_RXQ_TYPE_UNDEFINED,
150 /* RX queue control descriptor. */
151 struct mlx5_rxq_ctrl {
152 struct mlx5_rxq_data rxq; /* Data path structure. */
153 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
154 LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
155 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
156 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
157 enum mlx5_rxq_type type; /* Rxq type. */
158 unsigned int socket; /* CPU socket ID for allocations. */
159 LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */
160 uint32_t share_group; /* Group ID of shared RXQ. */
161 uint16_t share_qid; /* Shared RxQ ID in group. */
162 unsigned int started:1; /* Whether (shared) RXQ has been started. */
163 unsigned int irq:1; /* Whether IRQ is enabled. */
164 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
165 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
166 uint32_t wqn; /* WQ number. */
167 uint16_t dump_file_n; /* Number of dump files. */
170 /* RX queue private data. */
171 struct mlx5_rxq_priv {
172 uint16_t idx; /* Queue index. */
173 uint32_t refcnt; /* Reference counter. */
174 struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
175 LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
176 struct mlx5_priv *priv; /* Back pointer to private data. */
177 struct mlx5_devx_rq devx_rq;
178 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
179 uint32_t hairpin_status; /* Hairpin binding status. */
184 extern uint8_t rss_hash_default_key[];
186 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);
187 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
188 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
189 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
190 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
191 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
192 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
193 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
194 unsigned int socket, const struct rte_eth_rxconf *conf,
195 struct rte_mempool *mp);
196 int mlx5_rx_hairpin_queue_setup
197 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
198 const struct rte_eth_hairpin_conf *hairpin_conf);
199 void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
200 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
201 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
202 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
203 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
204 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
205 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
206 struct mlx5_rxq_priv *rxq,
207 uint16_t desc, unsigned int socket,
208 const struct rte_eth_rxconf *conf,
209 const struct rte_eth_rxseg_split *rx_seg,
211 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
212 (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
213 const struct rte_eth_hairpin_conf *hairpin_conf);
214 struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);
215 uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
216 struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
217 struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
218 struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
219 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
220 int mlx5_rxq_verify(struct rte_eth_dev *dev);
221 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
222 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
223 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
224 const uint16_t *queues,
226 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
227 struct mlx5_ind_table_obj *ind_tbl,
229 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
230 struct mlx5_ind_table_obj *ind_tbl);
231 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
232 struct mlx5_ind_table_obj *ind_tbl,
233 uint16_t *queues, const uint32_t queues_n,
235 int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
236 struct mlx5_ind_table_obj *ind_tbl);
237 int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
238 struct mlx5_ind_table_obj *ind_tbl);
239 struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx);
240 int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
242 void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
243 struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,
244 struct mlx5_list_entry *entry,
245 void *cb_ctx __rte_unused);
246 void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,
247 struct mlx5_list_entry *entry);
248 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
249 struct mlx5_flow_rss_desc *rss_desc);
250 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
251 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
252 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
253 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
254 (struct rte_eth_dev *dev, uint16_t idx);
255 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev);
256 void mlx5_drop_action_destroy(struct rte_eth_dev *dev);
257 uint64_t mlx5_get_rx_port_offloads(void);
258 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
259 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
260 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
261 const uint8_t *rss_key, uint32_t rss_key_len,
262 uint64_t hash_fields,
263 const uint16_t *queues, uint32_t queues_n);
267 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
268 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
269 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
270 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
271 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
273 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
275 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
276 uint32_t mlx5_rx_queue_count(void *rx_queue);
277 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
278 struct rte_eth_rxq_info *qinfo);
279 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
280 struct rte_eth_burst_mode *mode);
281 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
283 /* Vectorized version of mlx5_rx.c */
284 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
285 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
286 uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
288 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
291 static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
294 * Query LKey for an address on Rx. No need to flush local caches
295 * as the Rx mempool database entries are valid for the lifetime of the queue.
298 * Pointer to Rx queue structure.
303 * Searched LKey on success, UINT32_MAX on no match.
304 * This function always succeeds on valid input.
306 static __rte_always_inline uint32_t
307 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
309 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
310 struct rte_mempool *mp;
313 /* Linear search on MR cache array. */
314 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
315 MLX5_MR_CACHE_N, addr);
316 if (likely(lkey != UINT32_MAX))
318 mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
319 return mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
323 * Query LKey from a packet buffer for Rx. No need to flush local caches
324 * as the Rx mempool database entries are valid for the lifetime of the queue.
327 * Pointer to Rx queue structure.
329 * Buffer to search the address of.
332 * Searched LKey on success, UINT32_MAX on no match.
333 * This function always succeeds on valid input.
335 static __rte_always_inline uint32_t
336 mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb)
338 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
339 uintptr_t addr = (uintptr_t)mb->buf_addr;
342 /* Linear search on MR cache array. */
343 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
344 MLX5_MR_CACHE_N, addr);
345 if (likely(lkey != UINT32_MAX))
347 /* Slower search in the mempool database on miss. */
348 return mlx5_mr_mempool2mr_bh(mr_ctrl, mb->pool, addr);
352 * Convert timestamp from HW format to linear counter
353 * from Packet Pacing Clock Queue CQE timestamp format.
356 * Pointer to the device shared context. Might be needed
357 * to convert according current device configuration.
359 * Timestamp from CQE to convert.
363 static __rte_always_inline uint64_t
364 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts)
367 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S;
371 * Set timestamp in mbuf dynamic field.
374 * Structure to write into.
376 * Dynamic field offset in mbuf structure.
380 static __rte_always_inline void
381 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
382 rte_mbuf_timestamp_t timestamp)
384 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp;
388 * Replace MPRQ buffer.
391 * Pointer to Rx queue structure.
393 * RQ index to replace.
395 static __rte_always_inline void
396 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
398 const uint32_t strd_n = 1 << rxq->strd_num_n;
399 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
400 volatile struct mlx5_wqe_data_seg *wqe =
401 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
402 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
405 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
406 MLX5_ASSERT(rep != NULL);
407 /* Replace MPRQ buf. */
408 (*rxq->mprq_bufs)[rq_idx] = rep;
410 addr = mlx5_mprq_buf_addr(rep, strd_n);
411 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
412 /* If there's only one MR, no need to replace LKey in WQE. */
413 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
414 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
415 /* Stash a mbuf for next replacement. */
416 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
417 rxq->mprq_repl = rep;
419 rxq->mprq_repl = NULL;
420 /* Release the old buffer. */
421 mlx5_mprq_buf_free(buf);
422 } else if (unlikely(rxq->mprq_repl == NULL)) {
423 struct mlx5_mprq_buf *rep;
426 * Currently, the MPRQ mempool is out of buffer
427 * and doing memcpy regardless of the size of Rx
428 * packet. Retry allocation to get back to
431 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep))
432 rxq->mprq_repl = rep;
437 * Attach or copy MPRQ buffer content to a packet.
440 * Pointer to Rx queue structure.
442 * Pointer to a packet to fill.
446 * Pointer to a MPRQ buffer to take the data from.
448 * Stride index to start from.
450 * Number of strides to consume.
452 static __rte_always_inline enum mlx5_rqx_code
453 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
454 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt)
456 const uint32_t strd_n = 1 << rxq->strd_num_n;
457 const uint16_t strd_sz = 1 << rxq->strd_sz_n;
458 const uint16_t strd_shift =
459 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
460 const int32_t hdrm_overlap =
461 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
462 const uint32_t offset = strd_idx * strd_sz + strd_shift;
463 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
466 * Memcpy packets to the target mbuf if:
467 * - The size of packet is smaller than mprq_max_memcpy_len.
468 * - Out of buffer in the Mempool for Multi-Packet RQ.
469 * - The packet's stride overlaps a headroom and scatter is off.
471 if (len <= rxq->mprq_max_memcpy_len ||
472 rxq->mprq_repl == NULL ||
473 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
475 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) {
476 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
479 } else if (rxq->strd_scatter_en) {
480 struct rte_mbuf *prev = pkt;
481 uint32_t seg_len = RTE_MIN(len, (uint32_t)
482 (pkt->buf_len - RTE_PKTMBUF_HEADROOM));
483 uint32_t rem_len = len - seg_len;
485 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
487 DATA_LEN(pkt) = seg_len;
489 struct rte_mbuf *next =
490 rte_pktmbuf_alloc(rxq->mp);
492 if (unlikely(next == NULL))
493 return MLX5_RXQ_CODE_NOMBUF;
495 SET_DATA_OFF(next, 0);
496 addr = RTE_PTR_ADD(addr, seg_len);
497 seg_len = RTE_MIN(rem_len, (uint32_t)
498 (next->buf_len - RTE_PKTMBUF_HEADROOM));
500 (rte_pktmbuf_mtod(next, void *),
502 DATA_LEN(next) = seg_len;
508 return MLX5_RXQ_CODE_DROPPED;
512 struct rte_mbuf_ext_shared_info *shinfo;
513 uint16_t buf_len = strd_cnt * strd_sz;
516 /* Increment the refcnt of the whole chunk. */
517 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
518 MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
519 __ATOMIC_RELAXED) <= strd_n + 1);
520 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
522 * MLX5 device doesn't use iova but it is necessary in a
523 * case where the Rx packet is transmitted via a
526 buf_iova = rte_mempool_virt2iova(buf) +
527 RTE_PTR_DIFF(buf_addr, buf);
528 shinfo = &buf->shinfos[strd_idx];
529 rte_mbuf_ext_refcnt_set(shinfo, 1);
531 * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when
532 * attaching the stride to mbuf and more offload flags
533 * will be added below by calling rxq_cq_to_mbuf().
534 * Other fields will be overwritten.
536 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
538 /* Set mbuf head-room. */
539 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
540 MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL);
541 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
542 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
545 * Copy the last fragment of a packet (up to headroom
546 * size bytes) in case there is a stride overlap with
547 * a next packet's headroom. Allocate a separate mbuf
548 * to store this fragment and link it. Scatter is on.
550 if (hdrm_overlap > 0) {
551 MLX5_ASSERT(rxq->strd_scatter_en);
552 struct rte_mbuf *seg =
553 rte_pktmbuf_alloc(rxq->mp);
555 if (unlikely(seg == NULL))
556 return MLX5_RXQ_CODE_NOMBUF;
557 SET_DATA_OFF(seg, 0);
558 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
559 RTE_PTR_ADD(addr, len - hdrm_overlap),
561 DATA_LEN(seg) = hdrm_overlap;
562 DATA_LEN(pkt) = len - hdrm_overlap;
567 return MLX5_RXQ_CODE_EXIT;
571 * Check whether Multi-Packet RQ can be enabled for the device.
574 * Pointer to Ethernet device.
577 * 1 if supported, negative errno value if not.
579 static __rte_always_inline int
580 mlx5_check_mprq_support(struct rte_eth_dev *dev)
582 struct mlx5_priv *priv = dev->data->dev_private;
584 if (priv->config.mprq.enabled &&
585 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
591 * Check whether Multi-Packet RQ is enabled for the Rx queue.
594 * Pointer to receive queue structure.
597 * 0 if disabled, otherwise enabled.
599 static __rte_always_inline int
600 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
602 return rxq->strd_num_n > 0;
606 * Check whether Multi-Packet RQ is enabled for the device.
609 * Pointer to Ethernet device.
612 * 0 if disabled, otherwise enabled.
614 static __rte_always_inline int
615 mlx5_mprq_enabled(struct rte_eth_dev *dev)
617 struct mlx5_priv *priv = dev->data->dev_private;
622 if (mlx5_check_mprq_support(dev) < 0)
624 /* All the configured queues should be enabled. */
625 for (i = 0; i < priv->rxqs_n; ++i) {
626 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
628 if (rxq_ctrl == NULL ||
629 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
632 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
635 /* Multi-Packet RQ can't be partially configured. */
636 MLX5_ASSERT(n == 0 || n == n_ibv);
640 #endif /* RTE_PMD_MLX5_RX_H_ */