1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RX_H_
7 #define RTE_PMD_MLX5_RX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
21 #include "rte_pmd_mlx5.h"
23 /* Support tunnel matching. */
24 #define MLX5_FLOW_TUNNEL 10
26 #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
27 #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))
28 #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))
30 /* First entry must be NULL for comparison. */
31 #define mlx5_mr_btree_len(bt) ((bt)->len - 1)
33 struct mlx5_rxq_stats {
34 #ifdef MLX5_PMD_SOFT_COUNTERS
35 uint64_t ipackets; /**< Total of successfully received packets. */
36 uint64_t ibytes; /**< Total of successfully received bytes. */
38 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
39 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
42 /* Compressed CQE context. */
44 uint16_t ai; /* Array index. */
45 uint16_t ca; /* Current array index. */
46 uint16_t na; /* Next array index. */
47 uint16_t cq_ci; /* The next CQE. */
48 uint32_t cqe_cnt; /* Number of CQEs. */
51 /* Get pointer to the first stride. */
52 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
53 sizeof(struct mlx5_mprq_buf) + \
55 sizeof(struct rte_mbuf_ext_shared_info) + \
56 RTE_PKTMBUF_HEADROOM))
58 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
59 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
61 enum mlx5_rxq_err_state {
62 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
63 MLX5_RXQ_ERR_STATE_NEED_RESET,
64 MLX5_RXQ_ERR_STATE_NEED_READY,
68 MLX5_RXQ_CODE_EXIT = 0,
70 MLX5_RXQ_CODE_DROPPED,
73 struct mlx5_eth_rxseg {
74 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
75 uint16_t length; /**< Segment data length, configures split point. */
76 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
77 uint32_t reserved; /**< Reserved field. */
80 /* RX queue descriptor. */
81 struct mlx5_rxq_data {
82 unsigned int csum:1; /* Enable checksum offloading. */
83 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
84 unsigned int rt_timestamp:1; /* Realtime timestamp format. */
85 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
86 unsigned int crc_present:1; /* CRC must be subtracted. */
87 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
88 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
89 unsigned int elts_n:4; /* Log 2 of Mbufs. */
90 unsigned int rss_hash:1; /* RSS hash result is enabled. */
91 unsigned int mark:1; /* Marked flow available on the queue. */
92 unsigned int log_strd_num:5; /* Log 2 of the number of stride. */
93 unsigned int log_strd_sz:4; /* Log 2 of stride size. */
94 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
95 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
96 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
97 unsigned int lro:1; /* Enable LRO. */
98 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
99 unsigned int mcqe_format:3; /* CQE compression format. */
100 unsigned int shared:1; /* Shared RXQ. */
101 unsigned int delay_drop:1; /* Enable delay drop. */
102 volatile uint32_t *rq_db;
103 volatile uint32_t *cq_db;
107 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
110 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
113 struct rxq_zip zip; /* Compressed context. */
114 uint16_t decompressed;
115 /* Number of ready mbufs decompressed from the CQ. */
117 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
118 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
120 volatile struct mlx5_cqe(*cqes)[];
121 struct rte_mbuf *(*elts)[];
122 struct mlx5_mprq_buf *(*mprq_bufs)[];
123 struct rte_mempool *mp;
124 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
125 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
126 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
127 uint16_t idx; /* Queue index. */
128 struct mlx5_rxq_stats stats;
129 struct mlx5_rxq_stats stats_reset; /* stats on last reset. */
130 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
131 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
132 struct mlx5_uar_data uar_data; /* CQ doorbell. */
133 uint32_t cqn; /* CQ number. */
134 uint8_t cq_arm_sn; /* CQ arm seq number. */
135 uint32_t tunnel; /* Tunnel information. */
136 int timestamp_offset; /* Dynamic mbuf field for timestamp. */
137 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
138 uint64_t flow_meta_mask;
139 int32_t flow_meta_offset;
140 uint32_t flow_meta_port_mask;
141 uint32_t rxseg_n; /* Number of split segment descriptions. */
142 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
143 /* Buffer split segment descriptions - sizes, offsets, pools. */
144 } __rte_cache_aligned;
146 /* RX queue control descriptor. */
147 struct mlx5_rxq_ctrl {
148 struct mlx5_rxq_data rxq; /* Data path structure. */
149 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
150 LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
151 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
152 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
153 bool is_hairpin; /* Whether RxQ type is Hairpin. */
154 unsigned int socket; /* CPU socket ID for allocations. */
155 LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */
156 uint32_t share_group; /* Group ID of shared RXQ. */
157 uint16_t share_qid; /* Shared RxQ ID in group. */
158 unsigned int started:1; /* Whether (shared) RXQ has been started. */
159 unsigned int irq:1; /* Whether IRQ is enabled. */
160 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
161 uint32_t wqn; /* WQ number. */
162 uint32_t rxseg_n; /* Number of split segment descriptions. */
163 struct rte_eth_rxseg_split rxseg[MLX5_MAX_RXQ_NSEG];
164 /* Saved original buffer split segment configuration. */
165 uint16_t dump_file_n; /* Number of dump files. */
168 /* RX queue private data. */
169 struct mlx5_rxq_priv {
170 uint16_t idx; /* Queue index. */
171 uint32_t refcnt; /* Reference counter. */
172 struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
173 LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
174 struct mlx5_priv *priv; /* Back pointer to private data. */
175 struct mlx5_devx_rq devx_rq;
176 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
177 uint32_t hairpin_status; /* Hairpin binding status. */
180 /* External RX queue descriptor. */
181 struct mlx5_external_rxq {
182 uint32_t hw_id; /* Queue index in the Hardware. */
183 uint32_t refcnt; /* Reference counter. */
188 extern uint8_t rss_hash_default_key[];
190 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);
191 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
192 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
193 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
194 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
195 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
196 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
197 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
198 unsigned int socket, const struct rte_eth_rxconf *conf,
199 struct rte_mempool *mp);
200 int mlx5_rx_hairpin_queue_setup
201 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
202 const struct rte_eth_hairpin_conf *hairpin_conf);
203 void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
204 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
205 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
206 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
207 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
208 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
209 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
210 uint16_t desc, unsigned int socket,
211 const struct rte_eth_rxconf *conf,
212 const struct rte_eth_rxseg_split *rx_seg,
213 uint16_t n_seg, bool is_extmem);
214 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
215 (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
216 const struct rte_eth_hairpin_conf *hairpin_conf);
217 struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);
218 uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
219 struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
220 struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
221 struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
222 struct mlx5_external_rxq *mlx5_ext_rxq_ref(struct rte_eth_dev *dev,
224 uint32_t mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
225 struct mlx5_external_rxq *mlx5_ext_rxq_get(struct rte_eth_dev *dev,
227 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
228 int mlx5_rxq_verify(struct rte_eth_dev *dev);
229 int mlx5_ext_rxq_verify(struct rte_eth_dev *dev);
230 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
231 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
232 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
233 const uint16_t *queues,
235 struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev,
236 const uint16_t *queues,
240 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
241 struct mlx5_ind_table_obj *ind_tbl,
243 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
244 struct mlx5_ind_table_obj *ind_tbl,
246 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
247 struct mlx5_ind_table_obj *ind_tbl,
248 uint16_t *queues, const uint32_t queues_n,
250 bool ref_new_qs, bool deref_old_qs);
251 int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
252 struct mlx5_ind_table_obj *ind_tbl);
253 int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
254 struct mlx5_ind_table_obj *ind_tbl);
255 struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx);
256 int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
258 void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
259 struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,
260 struct mlx5_list_entry *entry,
261 void *cb_ctx __rte_unused);
262 void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,
263 struct mlx5_list_entry *entry);
264 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
265 struct mlx5_flow_rss_desc *rss_desc);
266 int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);
267 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
268 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
269 bool mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx);
270 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
271 (struct rte_eth_dev *dev, uint16_t idx);
272 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev);
273 void mlx5_drop_action_destroy(struct rte_eth_dev *dev);
274 uint64_t mlx5_get_rx_port_offloads(void);
275 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
276 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
277 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
278 const uint8_t *rss_key, uint32_t rss_key_len,
279 uint64_t hash_fields,
280 const uint16_t *queues, uint32_t queues_n);
284 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
285 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
286 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
287 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
288 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
290 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
291 uint32_t mlx5_rx_queue_count(void *rx_queue);
292 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
293 struct rte_eth_rxq_info *qinfo);
294 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
295 struct rte_eth_burst_mode *mode);
296 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
298 /* Vectorized version of mlx5_rx.c */
299 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
300 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
301 uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
303 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
306 static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
309 * Query LKey for an address on Rx. No need to flush local caches
310 * as the Rx mempool database entries are valid for the lifetime of the queue.
313 * Pointer to Rx queue structure.
318 * Searched LKey on success, UINT32_MAX on no match.
319 * This function always succeeds on valid input.
321 static __rte_always_inline uint32_t
322 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
324 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
325 struct rte_mempool *mp;
328 /* Linear search on MR cache array. */
329 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
330 MLX5_MR_CACHE_N, addr);
331 if (likely(lkey != UINT32_MAX))
333 mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
334 return mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
338 * Query LKey from a packet buffer for Rx. No need to flush local caches
339 * as the Rx mempool database entries are valid for the lifetime of the queue.
342 * Pointer to Rx queue structure.
344 * Buffer to search the address of.
347 * Searched LKey on success, UINT32_MAX on no match.
348 * This function always succeeds on valid input.
350 static __rte_always_inline uint32_t
351 mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb)
353 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
354 uintptr_t addr = (uintptr_t)mb->buf_addr;
357 /* Linear search on MR cache array. */
358 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
359 MLX5_MR_CACHE_N, addr);
360 if (likely(lkey != UINT32_MAX))
362 /* Slower search in the mempool database on miss. */
363 return mlx5_mr_mempool2mr_bh(mr_ctrl, mb->pool, addr);
367 * Convert timestamp from HW format to linear counter
368 * from Packet Pacing Clock Queue CQE timestamp format.
371 * Pointer to the device shared context. Might be needed
372 * to convert according current device configuration.
374 * Timestamp from CQE to convert.
378 static __rte_always_inline uint64_t
379 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts)
382 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S;
386 * Set timestamp in mbuf dynamic field.
389 * Structure to write into.
391 * Dynamic field offset in mbuf structure.
395 static __rte_always_inline void
396 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
397 rte_mbuf_timestamp_t timestamp)
399 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp;
403 * Replace MPRQ buffer.
406 * Pointer to Rx queue structure.
408 * RQ index to replace.
410 static __rte_always_inline void
411 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
413 const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
414 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
415 volatile struct mlx5_wqe_data_seg *wqe =
416 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
417 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
420 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
421 MLX5_ASSERT(rep != NULL);
422 /* Replace MPRQ buf. */
423 (*rxq->mprq_bufs)[rq_idx] = rep;
425 addr = mlx5_mprq_buf_addr(rep, strd_n);
426 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
427 /* If there's only one MR, no need to replace LKey in WQE. */
428 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
429 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
430 /* Stash a mbuf for next replacement. */
431 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
432 rxq->mprq_repl = rep;
434 rxq->mprq_repl = NULL;
435 /* Release the old buffer. */
436 mlx5_mprq_buf_free(buf);
437 } else if (unlikely(rxq->mprq_repl == NULL)) {
438 struct mlx5_mprq_buf *rep;
441 * Currently, the MPRQ mempool is out of buffer
442 * and doing memcpy regardless of the size of Rx
443 * packet. Retry allocation to get back to
446 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep))
447 rxq->mprq_repl = rep;
452 * Attach or copy MPRQ buffer content to a packet.
455 * Pointer to Rx queue structure.
457 * Pointer to a packet to fill.
461 * Pointer to a MPRQ buffer to take the data from.
463 * Stride index to start from.
465 * Number of strides to consume.
467 static __rte_always_inline enum mlx5_rqx_code
468 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
469 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt)
471 const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
472 const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
473 const uint16_t strd_shift =
474 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
475 const int32_t hdrm_overlap =
476 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
477 const uint32_t offset = strd_idx * strd_sz + strd_shift;
478 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
481 * Memcpy packets to the target mbuf if:
482 * - The size of packet is smaller than mprq_max_memcpy_len.
483 * - Out of buffer in the Mempool for Multi-Packet RQ.
484 * - The packet's stride overlaps a headroom and scatter is off.
486 if (len <= rxq->mprq_max_memcpy_len ||
487 rxq->mprq_repl == NULL ||
488 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
490 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) {
491 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
494 } else if (rxq->strd_scatter_en) {
495 struct rte_mbuf *prev = pkt;
496 uint32_t seg_len = RTE_MIN(len, (uint32_t)
497 (pkt->buf_len - RTE_PKTMBUF_HEADROOM));
498 uint32_t rem_len = len - seg_len;
500 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
502 DATA_LEN(pkt) = seg_len;
504 struct rte_mbuf *next =
505 rte_pktmbuf_alloc(rxq->mp);
507 if (unlikely(next == NULL))
508 return MLX5_RXQ_CODE_NOMBUF;
510 SET_DATA_OFF(next, 0);
511 addr = RTE_PTR_ADD(addr, seg_len);
512 seg_len = RTE_MIN(rem_len, (uint32_t)
513 (next->buf_len - RTE_PKTMBUF_HEADROOM));
515 (rte_pktmbuf_mtod(next, void *),
517 DATA_LEN(next) = seg_len;
523 return MLX5_RXQ_CODE_DROPPED;
527 struct rte_mbuf_ext_shared_info *shinfo;
528 uint16_t buf_len = strd_cnt * strd_sz;
531 /* Increment the refcnt of the whole chunk. */
532 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
533 MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
534 __ATOMIC_RELAXED) <= strd_n + 1);
535 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
537 * MLX5 device doesn't use iova but it is necessary in a
538 * case where the Rx packet is transmitted via a
541 buf_iova = rte_mempool_virt2iova(buf) +
542 RTE_PTR_DIFF(buf_addr, buf);
543 shinfo = &buf->shinfos[strd_idx];
544 rte_mbuf_ext_refcnt_set(shinfo, 1);
546 * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when
547 * attaching the stride to mbuf and more offload flags
548 * will be added below by calling rxq_cq_to_mbuf().
549 * Other fields will be overwritten.
551 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
553 /* Set mbuf head-room. */
554 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
555 MLX5_ASSERT(pkt->ol_flags & RTE_MBUF_F_EXTERNAL);
556 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
557 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
560 * Copy the last fragment of a packet (up to headroom
561 * size bytes) in case there is a stride overlap with
562 * a next packet's headroom. Allocate a separate mbuf
563 * to store this fragment and link it. Scatter is on.
565 if (hdrm_overlap > 0) {
566 MLX5_ASSERT(rxq->strd_scatter_en);
567 struct rte_mbuf *seg =
568 rte_pktmbuf_alloc(rxq->mp);
570 if (unlikely(seg == NULL))
571 return MLX5_RXQ_CODE_NOMBUF;
572 SET_DATA_OFF(seg, 0);
573 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
574 RTE_PTR_ADD(addr, len - hdrm_overlap),
576 DATA_LEN(seg) = hdrm_overlap;
577 DATA_LEN(pkt) = len - hdrm_overlap;
582 return MLX5_RXQ_CODE_EXIT;
586 * Check whether Multi-Packet RQ can be enabled for the device.
589 * Pointer to Ethernet device.
592 * 1 if supported, negative errno value if not.
594 static __rte_always_inline int
595 mlx5_check_mprq_support(struct rte_eth_dev *dev)
597 struct mlx5_priv *priv = dev->data->dev_private;
599 if (priv->config.mprq.enabled &&
600 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
606 * Check whether Multi-Packet RQ is enabled for the Rx queue.
609 * Pointer to receive queue structure.
612 * 0 if disabled, otherwise enabled.
614 static __rte_always_inline int
615 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
617 return rxq->log_strd_num > 0;
621 * Check whether Multi-Packet RQ is enabled for the device.
624 * Pointer to Ethernet device.
627 * 0 if disabled, otherwise enabled.
629 static __rte_always_inline int
630 mlx5_mprq_enabled(struct rte_eth_dev *dev)
632 struct mlx5_priv *priv = dev->data->dev_private;
637 if (mlx5_check_mprq_support(dev) < 0)
639 /* All the configured queues should be enabled. */
640 for (i = 0; i < priv->rxqs_n; ++i) {
641 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
643 if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
646 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
649 /* Multi-Packet RQ can't be partially configured. */
650 MLX5_ASSERT(n == 0 || n == n_ibv);
655 * Check whether given RxQ is external.
658 * Pointer to Ethernet device.
663 * True if is external RxQ, otherwise false.
665 static __rte_always_inline bool
666 mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t queue_idx)
668 struct mlx5_priv *priv = dev->data->dev_private;
669 struct mlx5_external_rxq *rxq;
671 if (!priv->ext_rxqs || queue_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
673 rxq = &priv->ext_rxqs[queue_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
674 return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
677 #endif /* RTE_PMD_MLX5_RX_H_ */