1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RX_H_
7 #define RTE_PMD_MLX5_RX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
22 /* Support tunnel matching. */
23 #define MLX5_FLOW_TUNNEL 10
25 /* First entry must be NULL for comparison. */
26 #define mlx5_mr_btree_len(bt) ((bt)->len - 1)
28 struct mlx5_rxq_stats {
29 #ifdef MLX5_PMD_SOFT_COUNTERS
30 uint64_t ipackets; /**< Total of successfully received packets. */
31 uint64_t ibytes; /**< Total of successfully received bytes. */
33 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
34 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
37 /* Compressed CQE context. */
39 uint16_t ai; /* Array index. */
40 uint16_t ca; /* Current array index. */
41 uint16_t na; /* Next array index. */
42 uint16_t cq_ci; /* The next CQE. */
43 uint32_t cqe_cnt; /* Number of CQEs. */
46 /* Get pointer to the first stride. */
47 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
48 sizeof(struct mlx5_mprq_buf) + \
50 sizeof(struct rte_mbuf_ext_shared_info) + \
51 RTE_PKTMBUF_HEADROOM))
53 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
54 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
56 enum mlx5_rxq_err_state {
57 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
58 MLX5_RXQ_ERR_STATE_NEED_RESET,
59 MLX5_RXQ_ERR_STATE_NEED_READY,
63 MLX5_RXQ_CODE_EXIT = 0,
65 MLX5_RXQ_CODE_DROPPED,
68 struct mlx5_eth_rxseg {
69 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
70 uint16_t length; /**< Segment data length, configures split point. */
71 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
72 uint32_t reserved; /**< Reserved field. */
75 /* RX queue descriptor. */
76 struct mlx5_rxq_data {
77 unsigned int csum:1; /* Enable checksum offloading. */
78 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
79 unsigned int rt_timestamp:1; /* Realtime timestamp format. */
80 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
81 unsigned int crc_present:1; /* CRC must be subtracted. */
82 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
83 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
84 unsigned int elts_n:4; /* Log 2 of Mbufs. */
85 unsigned int rss_hash:1; /* RSS hash result is enabled. */
86 unsigned int mark:1; /* Marked flow available on the queue. */
87 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
88 unsigned int strd_sz_n:4; /* Log 2 of stride size. */
89 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
90 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
91 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
92 unsigned int lro:1; /* Enable LRO. */
93 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
94 unsigned int mcqe_format:3; /* CQE compression format. */
95 volatile uint32_t *rq_db;
96 volatile uint32_t *cq_db;
100 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
103 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
106 struct rxq_zip zip; /* Compressed context. */
107 uint16_t decompressed;
108 /* Number of ready mbufs decompressed from the CQ. */
110 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
111 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
113 volatile struct mlx5_cqe(*cqes)[];
114 struct rte_mbuf *(*elts)[];
115 struct mlx5_mprq_buf *(*mprq_bufs)[];
116 struct rte_mempool *mp;
117 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
118 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
119 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
120 uint16_t idx; /* Queue index. */
121 struct mlx5_rxq_stats stats;
122 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
123 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
124 void *cq_uar; /* Verbs CQ user access region. */
125 uint32_t cqn; /* CQ number. */
126 uint8_t cq_arm_sn; /* CQ arm seq number. */
128 rte_spinlock_t *uar_lock_cq;
129 /* CQ (UAR) access lock required for 32bit implementations */
131 uint32_t tunnel; /* Tunnel information. */
132 int timestamp_offset; /* Dynamic mbuf field for timestamp. */
133 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
134 uint64_t flow_meta_mask;
135 int32_t flow_meta_offset;
136 uint32_t flow_meta_port_mask;
137 uint32_t rxseg_n; /* Number of split segment descriptions. */
138 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
139 /* Buffer split segment descriptions - sizes, offsets, pools. */
140 } __rte_cache_aligned;
143 MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
144 MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
145 MLX5_RXQ_TYPE_UNDEFINED,
148 /* RX queue control descriptor. */
149 struct mlx5_rxq_ctrl {
150 struct mlx5_rxq_data rxq; /* Data path structure. */
151 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
152 uint32_t refcnt; /* Reference counter. */
153 LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
154 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
155 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
156 struct mlx5_priv *priv; /* Back pointer to private data. */
157 enum mlx5_rxq_type type; /* Rxq type. */
158 unsigned int socket; /* CPU socket ID for allocations. */
159 uint32_t share_group; /* Group ID of shared RXQ. */
160 uint16_t share_qid; /* Shared RxQ ID in group. */
161 unsigned int irq:1; /* Whether IRQ is enabled. */
162 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
163 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
164 uint32_t wqn; /* WQ number. */
165 uint16_t dump_file_n; /* Number of dump files. */
166 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
167 uint32_t hairpin_status; /* Hairpin binding status. */
170 /* RX queue private data. */
171 struct mlx5_rxq_priv {
172 uint16_t idx; /* Queue index. */
173 struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
174 LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
175 struct mlx5_priv *priv; /* Back pointer to private data. */
180 extern uint8_t rss_hash_default_key[];
182 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);
183 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
184 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
185 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
186 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
187 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
188 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
189 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
190 unsigned int socket, const struct rte_eth_rxconf *conf,
191 struct rte_mempool *mp);
192 int mlx5_rx_hairpin_queue_setup
193 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
194 const struct rte_eth_hairpin_conf *hairpin_conf);
195 void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
196 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
197 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
198 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
199 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
200 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
201 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
202 struct mlx5_rxq_priv *rxq,
203 uint16_t desc, unsigned int socket,
204 const struct rte_eth_rxconf *conf,
205 const struct rte_eth_rxseg_split *rx_seg,
207 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
208 (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
209 const struct rte_eth_hairpin_conf *hairpin_conf);
210 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
211 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
212 int mlx5_rxq_verify(struct rte_eth_dev *dev);
213 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
214 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
215 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
216 const uint16_t *queues,
218 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
219 struct mlx5_ind_table_obj *ind_tbl,
221 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
222 struct mlx5_ind_table_obj *ind_tbl);
223 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
224 struct mlx5_ind_table_obj *ind_tbl,
225 uint16_t *queues, const uint32_t queues_n,
227 int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
228 struct mlx5_ind_table_obj *ind_tbl);
229 int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
230 struct mlx5_ind_table_obj *ind_tbl);
231 struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx);
232 int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
234 void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
235 struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,
236 struct mlx5_list_entry *entry,
237 void *cb_ctx __rte_unused);
238 void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,
239 struct mlx5_list_entry *entry);
240 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
241 struct mlx5_flow_rss_desc *rss_desc);
242 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
243 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
244 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
245 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
246 (struct rte_eth_dev *dev, uint16_t idx);
247 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev);
248 void mlx5_drop_action_destroy(struct rte_eth_dev *dev);
249 uint64_t mlx5_get_rx_port_offloads(void);
250 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
251 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
252 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
253 const uint8_t *rss_key, uint32_t rss_key_len,
254 uint64_t hash_fields,
255 const uint16_t *queues, uint32_t queues_n);
259 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
260 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
261 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
262 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
263 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
265 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
267 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
268 uint32_t mlx5_rx_queue_count(void *rx_queue);
269 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
270 struct rte_eth_rxq_info *qinfo);
271 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
272 struct rte_eth_burst_mode *mode);
273 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
275 /* Vectorized version of mlx5_rx.c */
276 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
277 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
278 uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
280 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
283 static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
286 * Query LKey from a packet buffer for Rx. No need to flush local caches
287 * as the Rx mempool database entries are valid for the lifetime of the queue.
290 * Pointer to Rx queue structure.
295 * Searched LKey on success, UINT32_MAX on no match.
296 * This function always succeeds on valid input.
298 static __rte_always_inline uint32_t
299 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
301 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
302 struct mlx5_rxq_ctrl *rxq_ctrl;
303 struct rte_mempool *mp;
306 /* Linear search on MR cache array. */
307 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
308 MLX5_MR_CACHE_N, addr);
309 if (likely(lkey != UINT32_MAX))
312 * Slower search in the mempool database on miss.
313 * During queue creation rxq->sh is not yet set, so we use rxq_ctrl.
315 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
316 mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
317 return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->cdev->mr_scache,
321 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
324 * Convert timestamp from HW format to linear counter
325 * from Packet Pacing Clock Queue CQE timestamp format.
328 * Pointer to the device shared context. Might be needed
329 * to convert according current device configuration.
331 * Timestamp from CQE to convert.
335 static __rte_always_inline uint64_t
336 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts)
339 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S;
343 * Set timestamp in mbuf dynamic field.
346 * Structure to write into.
348 * Dynamic field offset in mbuf structure.
352 static __rte_always_inline void
353 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
354 rte_mbuf_timestamp_t timestamp)
356 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp;
360 * Replace MPRQ buffer.
363 * Pointer to Rx queue structure.
365 * RQ index to replace.
367 static __rte_always_inline void
368 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
370 const uint32_t strd_n = 1 << rxq->strd_num_n;
371 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
372 volatile struct mlx5_wqe_data_seg *wqe =
373 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
374 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
377 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
378 MLX5_ASSERT(rep != NULL);
379 /* Replace MPRQ buf. */
380 (*rxq->mprq_bufs)[rq_idx] = rep;
382 addr = mlx5_mprq_buf_addr(rep, strd_n);
383 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
384 /* If there's only one MR, no need to replace LKey in WQE. */
385 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
386 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
387 /* Stash a mbuf for next replacement. */
388 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
389 rxq->mprq_repl = rep;
391 rxq->mprq_repl = NULL;
392 /* Release the old buffer. */
393 mlx5_mprq_buf_free(buf);
394 } else if (unlikely(rxq->mprq_repl == NULL)) {
395 struct mlx5_mprq_buf *rep;
398 * Currently, the MPRQ mempool is out of buffer
399 * and doing memcpy regardless of the size of Rx
400 * packet. Retry allocation to get back to
403 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep))
404 rxq->mprq_repl = rep;
409 * Attach or copy MPRQ buffer content to a packet.
412 * Pointer to Rx queue structure.
414 * Pointer to a packet to fill.
418 * Pointer to a MPRQ buffer to take the data from.
420 * Stride index to start from.
422 * Number of strides to consume.
424 static __rte_always_inline enum mlx5_rqx_code
425 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
426 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt)
428 const uint32_t strd_n = 1 << rxq->strd_num_n;
429 const uint16_t strd_sz = 1 << rxq->strd_sz_n;
430 const uint16_t strd_shift =
431 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
432 const int32_t hdrm_overlap =
433 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
434 const uint32_t offset = strd_idx * strd_sz + strd_shift;
435 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
438 * Memcpy packets to the target mbuf if:
439 * - The size of packet is smaller than mprq_max_memcpy_len.
440 * - Out of buffer in the Mempool for Multi-Packet RQ.
441 * - The packet's stride overlaps a headroom and scatter is off.
443 if (len <= rxq->mprq_max_memcpy_len ||
444 rxq->mprq_repl == NULL ||
445 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
447 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) {
448 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
451 } else if (rxq->strd_scatter_en) {
452 struct rte_mbuf *prev = pkt;
453 uint32_t seg_len = RTE_MIN(len, (uint32_t)
454 (pkt->buf_len - RTE_PKTMBUF_HEADROOM));
455 uint32_t rem_len = len - seg_len;
457 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
459 DATA_LEN(pkt) = seg_len;
461 struct rte_mbuf *next =
462 rte_pktmbuf_alloc(rxq->mp);
464 if (unlikely(next == NULL))
465 return MLX5_RXQ_CODE_NOMBUF;
467 SET_DATA_OFF(next, 0);
468 addr = RTE_PTR_ADD(addr, seg_len);
469 seg_len = RTE_MIN(rem_len, (uint32_t)
470 (next->buf_len - RTE_PKTMBUF_HEADROOM));
472 (rte_pktmbuf_mtod(next, void *),
474 DATA_LEN(next) = seg_len;
480 return MLX5_RXQ_CODE_DROPPED;
484 struct rte_mbuf_ext_shared_info *shinfo;
485 uint16_t buf_len = strd_cnt * strd_sz;
488 /* Increment the refcnt of the whole chunk. */
489 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
490 MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
491 __ATOMIC_RELAXED) <= strd_n + 1);
492 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
494 * MLX5 device doesn't use iova but it is necessary in a
495 * case where the Rx packet is transmitted via a
498 buf_iova = rte_mempool_virt2iova(buf) +
499 RTE_PTR_DIFF(buf_addr, buf);
500 shinfo = &buf->shinfos[strd_idx];
501 rte_mbuf_ext_refcnt_set(shinfo, 1);
503 * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when
504 * attaching the stride to mbuf and more offload flags
505 * will be added below by calling rxq_cq_to_mbuf().
506 * Other fields will be overwritten.
508 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
510 /* Set mbuf head-room. */
511 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
512 MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL);
513 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
514 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
517 * Copy the last fragment of a packet (up to headroom
518 * size bytes) in case there is a stride overlap with
519 * a next packet's headroom. Allocate a separate mbuf
520 * to store this fragment and link it. Scatter is on.
522 if (hdrm_overlap > 0) {
523 MLX5_ASSERT(rxq->strd_scatter_en);
524 struct rte_mbuf *seg =
525 rte_pktmbuf_alloc(rxq->mp);
527 if (unlikely(seg == NULL))
528 return MLX5_RXQ_CODE_NOMBUF;
529 SET_DATA_OFF(seg, 0);
530 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
531 RTE_PTR_ADD(addr, len - hdrm_overlap),
533 DATA_LEN(seg) = hdrm_overlap;
534 DATA_LEN(pkt) = len - hdrm_overlap;
539 return MLX5_RXQ_CODE_EXIT;
543 * Check whether Multi-Packet RQ can be enabled for the device.
546 * Pointer to Ethernet device.
549 * 1 if supported, negative errno value if not.
551 static __rte_always_inline int
552 mlx5_check_mprq_support(struct rte_eth_dev *dev)
554 struct mlx5_priv *priv = dev->data->dev_private;
556 if (priv->config.mprq.enabled &&
557 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
563 * Check whether Multi-Packet RQ is enabled for the Rx queue.
566 * Pointer to receive queue structure.
569 * 0 if disabled, otherwise enabled.
571 static __rte_always_inline int
572 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
574 return rxq->strd_num_n > 0;
578 * Check whether Multi-Packet RQ is enabled for the device.
581 * Pointer to Ethernet device.
584 * 0 if disabled, otherwise enabled.
586 static __rte_always_inline int
587 mlx5_mprq_enabled(struct rte_eth_dev *dev)
589 struct mlx5_priv *priv = dev->data->dev_private;
594 if (mlx5_check_mprq_support(dev) < 0)
596 /* All the configured queues should be enabled. */
597 for (i = 0; i < priv->rxqs_n; ++i) {
598 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
599 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
600 (rxq, struct mlx5_rxq_ctrl, rxq);
602 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
605 if (mlx5_rxq_mprq_enabled(rxq))
608 /* Multi-Packet RQ can't be partially configured. */
609 MLX5_ASSERT(n == 0 || n == n_ibv);
613 #endif /* RTE_PMD_MLX5_RX_H_ */