1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RX_H_
7 #define RTE_PMD_MLX5_RX_H_
10 #include <sys/queue.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_spinlock.h>
17 #include <mlx5_common_mr.h>
20 #include "mlx5_autoconf.h"
22 /* Support tunnel matching. */
23 #define MLX5_FLOW_TUNNEL 10
25 #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
26 #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))
27 #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))
29 /* First entry must be NULL for comparison. */
30 #define mlx5_mr_btree_len(bt) ((bt)->len - 1)
32 struct mlx5_rxq_stats {
33 #ifdef MLX5_PMD_SOFT_COUNTERS
34 uint64_t ipackets; /**< Total of successfully received packets. */
35 uint64_t ibytes; /**< Total of successfully received bytes. */
37 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
38 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
41 /* Compressed CQE context. */
43 uint16_t ai; /* Array index. */
44 uint16_t ca; /* Current array index. */
45 uint16_t na; /* Next array index. */
46 uint16_t cq_ci; /* The next CQE. */
47 uint32_t cqe_cnt; /* Number of CQEs. */
50 /* Get pointer to the first stride. */
51 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
52 sizeof(struct mlx5_mprq_buf) + \
54 sizeof(struct rte_mbuf_ext_shared_info) + \
55 RTE_PKTMBUF_HEADROOM))
57 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
58 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
60 enum mlx5_rxq_err_state {
61 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
62 MLX5_RXQ_ERR_STATE_NEED_RESET,
63 MLX5_RXQ_ERR_STATE_NEED_READY,
67 MLX5_RXQ_CODE_EXIT = 0,
69 MLX5_RXQ_CODE_DROPPED,
72 struct mlx5_eth_rxseg {
73 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
74 uint16_t length; /**< Segment data length, configures split point. */
75 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
76 uint32_t reserved; /**< Reserved field. */
79 /* RX queue descriptor. */
80 struct mlx5_rxq_data {
81 unsigned int csum:1; /* Enable checksum offloading. */
82 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
83 unsigned int rt_timestamp:1; /* Realtime timestamp format. */
84 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
85 unsigned int crc_present:1; /* CRC must be subtracted. */
86 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
87 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
88 unsigned int elts_n:4; /* Log 2 of Mbufs. */
89 unsigned int rss_hash:1; /* RSS hash result is enabled. */
90 unsigned int mark:1; /* Marked flow available on the queue. */
91 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
92 unsigned int strd_sz_n:4; /* Log 2 of stride size. */
93 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
94 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
95 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
96 unsigned int lro:1; /* Enable LRO. */
97 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
98 unsigned int mcqe_format:3; /* CQE compression format. */
99 volatile uint32_t *rq_db;
100 volatile uint32_t *cq_db;
104 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
107 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
110 struct rxq_zip zip; /* Compressed context. */
111 uint16_t decompressed;
112 /* Number of ready mbufs decompressed from the CQ. */
114 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
115 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
117 volatile struct mlx5_cqe(*cqes)[];
118 struct rte_mbuf *(*elts)[];
119 struct mlx5_mprq_buf *(*mprq_bufs)[];
120 struct rte_mempool *mp;
121 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
122 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
123 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
124 uint16_t idx; /* Queue index. */
125 struct mlx5_rxq_stats stats;
126 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
127 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
128 void *cq_uar; /* Verbs CQ user access region. */
129 uint32_t cqn; /* CQ number. */
130 uint8_t cq_arm_sn; /* CQ arm seq number. */
132 rte_spinlock_t *uar_lock_cq;
133 /* CQ (UAR) access lock required for 32bit implementations */
135 uint32_t tunnel; /* Tunnel information. */
136 int timestamp_offset; /* Dynamic mbuf field for timestamp. */
137 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
138 uint64_t flow_meta_mask;
139 int32_t flow_meta_offset;
140 uint32_t flow_meta_port_mask;
141 uint32_t rxseg_n; /* Number of split segment descriptions. */
142 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
143 /* Buffer split segment descriptions - sizes, offsets, pools. */
144 } __rte_cache_aligned;
147 MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
148 MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
149 MLX5_RXQ_TYPE_UNDEFINED,
152 /* RX queue control descriptor. */
153 struct mlx5_rxq_ctrl {
154 struct mlx5_rxq_data rxq; /* Data path structure. */
155 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
156 LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
157 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
158 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
159 enum mlx5_rxq_type type; /* Rxq type. */
160 unsigned int socket; /* CPU socket ID for allocations. */
161 uint32_t share_group; /* Group ID of shared RXQ. */
162 uint16_t share_qid; /* Shared RxQ ID in group. */
163 unsigned int irq:1; /* Whether IRQ is enabled. */
164 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
165 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
166 uint32_t wqn; /* WQ number. */
167 uint16_t dump_file_n; /* Number of dump files. */
170 /* RX queue private data. */
171 struct mlx5_rxq_priv {
172 uint16_t idx; /* Queue index. */
173 uint32_t refcnt; /* Reference counter. */
174 struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
175 LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
176 struct mlx5_priv *priv; /* Back pointer to private data. */
177 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
178 uint32_t hairpin_status; /* Hairpin binding status. */
183 extern uint8_t rss_hash_default_key[];
185 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);
186 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
187 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
188 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
189 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
190 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
191 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
192 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
193 unsigned int socket, const struct rte_eth_rxconf *conf,
194 struct rte_mempool *mp);
195 int mlx5_rx_hairpin_queue_setup
196 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
197 const struct rte_eth_hairpin_conf *hairpin_conf);
198 void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
199 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
200 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
201 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
202 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
203 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
204 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
205 struct mlx5_rxq_priv *rxq,
206 uint16_t desc, unsigned int socket,
207 const struct rte_eth_rxconf *conf,
208 const struct rte_eth_rxseg_split *rx_seg,
210 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
211 (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
212 const struct rte_eth_hairpin_conf *hairpin_conf);
213 struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);
214 uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
215 struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
216 struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
217 struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
218 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
219 int mlx5_rxq_verify(struct rte_eth_dev *dev);
220 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
221 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
222 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
223 const uint16_t *queues,
225 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
226 struct mlx5_ind_table_obj *ind_tbl,
228 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
229 struct mlx5_ind_table_obj *ind_tbl);
230 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
231 struct mlx5_ind_table_obj *ind_tbl,
232 uint16_t *queues, const uint32_t queues_n,
234 int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
235 struct mlx5_ind_table_obj *ind_tbl);
236 int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
237 struct mlx5_ind_table_obj *ind_tbl);
238 struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx);
239 int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
241 void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
242 struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,
243 struct mlx5_list_entry *entry,
244 void *cb_ctx __rte_unused);
245 void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,
246 struct mlx5_list_entry *entry);
247 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
248 struct mlx5_flow_rss_desc *rss_desc);
249 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
250 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
251 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
252 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
253 (struct rte_eth_dev *dev, uint16_t idx);
254 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev);
255 void mlx5_drop_action_destroy(struct rte_eth_dev *dev);
256 uint64_t mlx5_get_rx_port_offloads(void);
257 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
258 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
259 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
260 const uint8_t *rss_key, uint32_t rss_key_len,
261 uint64_t hash_fields,
262 const uint16_t *queues, uint32_t queues_n);
266 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
267 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
268 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
269 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
270 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
272 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
274 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
275 uint32_t mlx5_rx_queue_count(void *rx_queue);
276 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
277 struct rte_eth_rxq_info *qinfo);
278 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
279 struct rte_eth_burst_mode *mode);
280 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
282 /* Vectorized version of mlx5_rx.c */
283 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
284 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
285 uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
287 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
290 static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
293 * Query LKey from a packet buffer for Rx. No need to flush local caches
294 * as the Rx mempool database entries are valid for the lifetime of the queue.
297 * Pointer to Rx queue structure.
302 * Searched LKey on success, UINT32_MAX on no match.
303 * This function always succeeds on valid input.
305 static __rte_always_inline uint32_t
306 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
308 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
309 struct mlx5_rxq_ctrl *rxq_ctrl;
310 struct rte_mempool *mp;
313 /* Linear search on MR cache array. */
314 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
315 MLX5_MR_CACHE_N, addr);
316 if (likely(lkey != UINT32_MAX))
319 * Slower search in the mempool database on miss.
320 * During queue creation rxq->sh is not yet set, so we use rxq_ctrl.
322 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
323 mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
324 return mlx5_mr_mempool2mr_bh(&rxq_ctrl->sh->cdev->mr_scache,
328 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
331 * Convert timestamp from HW format to linear counter
332 * from Packet Pacing Clock Queue CQE timestamp format.
335 * Pointer to the device shared context. Might be needed
336 * to convert according current device configuration.
338 * Timestamp from CQE to convert.
342 static __rte_always_inline uint64_t
343 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts)
346 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S;
350 * Set timestamp in mbuf dynamic field.
353 * Structure to write into.
355 * Dynamic field offset in mbuf structure.
359 static __rte_always_inline void
360 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
361 rte_mbuf_timestamp_t timestamp)
363 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp;
367 * Replace MPRQ buffer.
370 * Pointer to Rx queue structure.
372 * RQ index to replace.
374 static __rte_always_inline void
375 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
377 const uint32_t strd_n = 1 << rxq->strd_num_n;
378 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
379 volatile struct mlx5_wqe_data_seg *wqe =
380 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
381 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
384 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
385 MLX5_ASSERT(rep != NULL);
386 /* Replace MPRQ buf. */
387 (*rxq->mprq_bufs)[rq_idx] = rep;
389 addr = mlx5_mprq_buf_addr(rep, strd_n);
390 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
391 /* If there's only one MR, no need to replace LKey in WQE. */
392 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
393 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
394 /* Stash a mbuf for next replacement. */
395 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
396 rxq->mprq_repl = rep;
398 rxq->mprq_repl = NULL;
399 /* Release the old buffer. */
400 mlx5_mprq_buf_free(buf);
401 } else if (unlikely(rxq->mprq_repl == NULL)) {
402 struct mlx5_mprq_buf *rep;
405 * Currently, the MPRQ mempool is out of buffer
406 * and doing memcpy regardless of the size of Rx
407 * packet. Retry allocation to get back to
410 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep))
411 rxq->mprq_repl = rep;
416 * Attach or copy MPRQ buffer content to a packet.
419 * Pointer to Rx queue structure.
421 * Pointer to a packet to fill.
425 * Pointer to a MPRQ buffer to take the data from.
427 * Stride index to start from.
429 * Number of strides to consume.
431 static __rte_always_inline enum mlx5_rqx_code
432 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
433 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt)
435 const uint32_t strd_n = 1 << rxq->strd_num_n;
436 const uint16_t strd_sz = 1 << rxq->strd_sz_n;
437 const uint16_t strd_shift =
438 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
439 const int32_t hdrm_overlap =
440 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
441 const uint32_t offset = strd_idx * strd_sz + strd_shift;
442 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
445 * Memcpy packets to the target mbuf if:
446 * - The size of packet is smaller than mprq_max_memcpy_len.
447 * - Out of buffer in the Mempool for Multi-Packet RQ.
448 * - The packet's stride overlaps a headroom and scatter is off.
450 if (len <= rxq->mprq_max_memcpy_len ||
451 rxq->mprq_repl == NULL ||
452 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
454 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) {
455 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
458 } else if (rxq->strd_scatter_en) {
459 struct rte_mbuf *prev = pkt;
460 uint32_t seg_len = RTE_MIN(len, (uint32_t)
461 (pkt->buf_len - RTE_PKTMBUF_HEADROOM));
462 uint32_t rem_len = len - seg_len;
464 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
466 DATA_LEN(pkt) = seg_len;
468 struct rte_mbuf *next =
469 rte_pktmbuf_alloc(rxq->mp);
471 if (unlikely(next == NULL))
472 return MLX5_RXQ_CODE_NOMBUF;
474 SET_DATA_OFF(next, 0);
475 addr = RTE_PTR_ADD(addr, seg_len);
476 seg_len = RTE_MIN(rem_len, (uint32_t)
477 (next->buf_len - RTE_PKTMBUF_HEADROOM));
479 (rte_pktmbuf_mtod(next, void *),
481 DATA_LEN(next) = seg_len;
487 return MLX5_RXQ_CODE_DROPPED;
491 struct rte_mbuf_ext_shared_info *shinfo;
492 uint16_t buf_len = strd_cnt * strd_sz;
495 /* Increment the refcnt of the whole chunk. */
496 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
497 MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
498 __ATOMIC_RELAXED) <= strd_n + 1);
499 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
501 * MLX5 device doesn't use iova but it is necessary in a
502 * case where the Rx packet is transmitted via a
505 buf_iova = rte_mempool_virt2iova(buf) +
506 RTE_PTR_DIFF(buf_addr, buf);
507 shinfo = &buf->shinfos[strd_idx];
508 rte_mbuf_ext_refcnt_set(shinfo, 1);
510 * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when
511 * attaching the stride to mbuf and more offload flags
512 * will be added below by calling rxq_cq_to_mbuf().
513 * Other fields will be overwritten.
515 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
517 /* Set mbuf head-room. */
518 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
519 MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL);
520 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
521 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
524 * Copy the last fragment of a packet (up to headroom
525 * size bytes) in case there is a stride overlap with
526 * a next packet's headroom. Allocate a separate mbuf
527 * to store this fragment and link it. Scatter is on.
529 if (hdrm_overlap > 0) {
530 MLX5_ASSERT(rxq->strd_scatter_en);
531 struct rte_mbuf *seg =
532 rte_pktmbuf_alloc(rxq->mp);
534 if (unlikely(seg == NULL))
535 return MLX5_RXQ_CODE_NOMBUF;
536 SET_DATA_OFF(seg, 0);
537 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
538 RTE_PTR_ADD(addr, len - hdrm_overlap),
540 DATA_LEN(seg) = hdrm_overlap;
541 DATA_LEN(pkt) = len - hdrm_overlap;
546 return MLX5_RXQ_CODE_EXIT;
550 * Check whether Multi-Packet RQ can be enabled for the device.
553 * Pointer to Ethernet device.
556 * 1 if supported, negative errno value if not.
558 static __rte_always_inline int
559 mlx5_check_mprq_support(struct rte_eth_dev *dev)
561 struct mlx5_priv *priv = dev->data->dev_private;
563 if (priv->config.mprq.enabled &&
564 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
570 * Check whether Multi-Packet RQ is enabled for the Rx queue.
573 * Pointer to receive queue structure.
576 * 0 if disabled, otherwise enabled.
578 static __rte_always_inline int
579 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
581 return rxq->strd_num_n > 0;
585 * Check whether Multi-Packet RQ is enabled for the device.
588 * Pointer to Ethernet device.
591 * 0 if disabled, otherwise enabled.
593 static __rte_always_inline int
594 mlx5_mprq_enabled(struct rte_eth_dev *dev)
596 struct mlx5_priv *priv = dev->data->dev_private;
601 if (mlx5_check_mprq_support(dev) < 0)
603 /* All the configured queues should be enabled. */
604 for (i = 0; i < priv->rxqs_n; ++i) {
605 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
606 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
607 (rxq, struct mlx5_rxq_ctrl, rxq);
609 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
612 if (mlx5_rxq_mprq_enabled(rxq))
615 /* Multi-Packet RQ can't be partially configured. */
616 MLX5_ASSERT(n == 0 || n == n_ibv);
620 #endif /* RTE_PMD_MLX5_RX_H_ */