1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_H_
7 #define RTE_PMD_MLX5_RXTX_H_
11 #include <sys/queue.h>
14 #include <rte_mempool.h>
15 #include <rte_common.h>
16 #include <rte_hexdump.h>
17 #include <rte_spinlock.h>
19 #include <rte_bus_pci.h>
20 #include <rte_malloc.h>
21 #include <rte_cycles.h>
23 #include <mlx5_glue.h>
25 #include <mlx5_common.h>
26 #include <mlx5_common_mr.h>
28 #include "mlx5_defs.h"
29 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
34 /* Support tunnel matching. */
35 #define MLX5_FLOW_TUNNEL 10
37 /* Mbuf dynamic flag offset for inline. */
38 extern uint64_t rte_net_mlx5_dynf_inline_mask;
40 struct mlx5_rxq_stats {
41 #ifdef MLX5_PMD_SOFT_COUNTERS
42 uint64_t ipackets; /**< Total of successfully received packets. */
43 uint64_t ibytes; /**< Total of successfully received bytes. */
45 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
46 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
49 struct mlx5_txq_stats {
50 #ifdef MLX5_PMD_SOFT_COUNTERS
51 uint64_t opackets; /**< Total of successfully sent packets. */
52 uint64_t obytes; /**< Total of successfully sent bytes. */
54 uint64_t oerrors; /**< Total number of failed transmitted packets. */
59 /* Compressed CQE context. */
61 uint16_t ai; /* Array index. */
62 uint16_t ca; /* Current array index. */
63 uint16_t na; /* Next array index. */
64 uint16_t cq_ci; /* The next CQE. */
65 uint32_t cqe_cnt; /* Number of CQEs. */
68 /* Multi-Packet RQ buffer header. */
69 struct mlx5_mprq_buf {
70 struct rte_mempool *mp;
71 uint16_t refcnt; /* Atomically accessed refcnt. */
72 uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
73 struct rte_mbuf_ext_shared_info shinfos[];
75 * Shared information per stride.
76 * More memory will be allocated for the first stride head-room and for
79 } __rte_cache_aligned;
81 /* Get pointer to the first stride. */
82 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
83 sizeof(struct mlx5_mprq_buf) + \
85 sizeof(struct rte_mbuf_ext_shared_info) + \
86 RTE_PKTMBUF_HEADROOM))
88 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
89 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
91 enum mlx5_rxq_err_state {
92 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
93 MLX5_RXQ_ERR_STATE_NEED_RESET,
94 MLX5_RXQ_ERR_STATE_NEED_READY,
98 MLX5_RXQ_CODE_EXIT = 0,
100 MLX5_RXQ_CODE_DROPPED,
103 struct mlx5_eth_rxseg {
104 struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
105 uint16_t length; /**< Segment data length, configures split point. */
106 uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
107 uint32_t reserved; /**< Reserved field. */
110 /* RX queue descriptor. */
111 struct mlx5_rxq_data {
112 unsigned int csum:1; /* Enable checksum offloading. */
113 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
114 unsigned int rt_timestamp:1; /* Realtime timestamp format. */
115 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
116 unsigned int crc_present:1; /* CRC must be subtracted. */
117 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
118 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
119 unsigned int elts_n:4; /* Log 2 of Mbufs. */
120 unsigned int rss_hash:1; /* RSS hash result is enabled. */
121 unsigned int mark:1; /* Marked flow available on the queue. */
122 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
123 unsigned int strd_sz_n:4; /* Log 2 of stride size. */
124 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
125 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
126 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
127 unsigned int lro:1; /* Enable LRO. */
128 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
129 unsigned int mcqe_format:3; /* Dynamic metadata is configured. */
130 volatile uint32_t *rq_db;
131 volatile uint32_t *cq_db;
135 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
138 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
141 struct rxq_zip zip; /* Compressed context. */
142 uint16_t decompressed;
143 /* Number of ready mbufs decompressed from the CQ. */
145 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
146 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
148 volatile struct mlx5_cqe(*cqes)[];
149 struct rte_mbuf *(*elts)[];
150 struct mlx5_mprq_buf *(*mprq_bufs)[];
151 struct rte_mempool *mp;
152 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
153 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
154 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
155 uint16_t idx; /* Queue index. */
156 struct mlx5_rxq_stats stats;
157 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
158 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
159 void *cq_uar; /* Verbs CQ user access region. */
160 uint32_t cqn; /* CQ number. */
161 uint8_t cq_arm_sn; /* CQ arm seq number. */
163 rte_spinlock_t *uar_lock_cq;
164 /* CQ (UAR) access lock required for 32bit implementations */
166 uint32_t tunnel; /* Tunnel information. */
167 int timestamp_offset; /* Dynamic mbuf field for timestamp. */
168 uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
169 uint64_t flow_meta_mask;
170 int32_t flow_meta_offset;
171 uint32_t rxseg_n; /* Number of split segment descriptions. */
172 struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
173 /* Buffer split segment descriptions - sizes, offsets, pools. */
174 } __rte_cache_aligned;
177 MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
178 MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
179 MLX5_RXQ_TYPE_UNDEFINED,
182 /* RX queue control descriptor. */
183 struct mlx5_rxq_ctrl {
184 struct mlx5_rxq_data rxq; /* Data path structure. */
185 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
186 uint32_t refcnt; /* Reference counter. */
187 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
188 struct mlx5_priv *priv; /* Back pointer to private data. */
189 enum mlx5_rxq_type type; /* Rxq type. */
190 unsigned int socket; /* CPU socket ID for allocations. */
191 unsigned int irq:1; /* Whether IRQ is enabled. */
192 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
193 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
194 uint32_t wqn; /* WQ number. */
195 uint16_t dump_file_n; /* Number of dump files. */
196 struct mlx5_devx_dbr_page *rq_dbrec_page;
197 uint64_t rq_dbr_offset;
198 /* Storing RQ door-bell information, needed when freeing door-bell. */
199 struct mlx5_devx_dbr_page *cq_dbrec_page;
200 uint64_t cq_dbr_offset;
201 /* Storing CQ door-bell information, needed when freeing door-bell. */
202 void *wq_umem; /* WQ buffer registration info. */
203 void *cq_umem; /* CQ buffer registration info. */
204 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
205 uint32_t hairpin_status; /* Hairpin binding status. */
208 /* TX queue send local data. */
210 struct mlx5_txq_local {
211 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
212 struct rte_mbuf *mbuf; /* first mbuf to process. */
213 uint16_t pkts_copy; /* packets copied to elts. */
214 uint16_t pkts_sent; /* packets sent. */
215 uint16_t pkts_loop; /* packets sent on loop entry. */
216 uint16_t elts_free; /* available elts remain. */
217 uint16_t wqe_free; /* available wqe remain. */
218 uint16_t mbuf_off; /* data offset in current mbuf. */
219 uint16_t mbuf_nseg; /* number of remaining mbuf. */
222 /* TX queue descriptor. */
224 struct mlx5_txq_data {
225 uint16_t elts_head; /* Current counter in (*elts)[]. */
226 uint16_t elts_tail; /* Counter of first element awaiting completion. */
227 uint16_t elts_comp; /* elts index since last completion request. */
228 uint16_t elts_s; /* Number of mbuf elements. */
229 uint16_t elts_m; /* Mask for mbuf elements indices. */
230 /* Fields related to elts mbuf storage. */
231 uint16_t wqe_ci; /* Consumer index for work queue. */
232 uint16_t wqe_pi; /* Producer index for work queue. */
233 uint16_t wqe_s; /* Number of WQ elements. */
234 uint16_t wqe_m; /* Mask Number for WQ elements. */
235 uint16_t wqe_comp; /* WQE index since last completion request. */
236 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
237 /* WQ related fields. */
238 uint16_t cq_ci; /* Consumer index for completion queue. */
239 uint16_t cq_pi; /* Production index for completion queue. */
240 uint16_t cqe_s; /* Number of CQ elements. */
241 uint16_t cqe_m; /* Mask for CQ indices. */
242 /* CQ related fields. */
243 uint16_t elts_n:4; /* elts[] length (in log2). */
244 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
245 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
246 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
247 uint16_t tunnel_en:1;
248 /* When set TX offload for tunneled packets are supported. */
249 uint16_t swp_en:1; /* Whether SW parser is enabled. */
250 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
251 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
252 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
253 uint16_t inlen_send; /* Ordinary send data inline size. */
254 uint16_t inlen_empw; /* eMPW max packet size to inline. */
255 uint16_t inlen_mode; /* Minimal data length to inline. */
256 uint32_t qp_num_8s; /* QP number shifted by 8. */
257 uint64_t offloads; /* Offloads for Tx Queue. */
258 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
259 struct mlx5_wqe *wqes; /* Work queue. */
260 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
261 #ifdef RTE_LIBRTE_MLX5_DEBUG
262 uint32_t *fcqs; /* Free completion queue (debug extended). */
264 uint16_t *fcqs; /* Free completion queue. */
266 volatile struct mlx5_cqe *cqes; /* Completion queue. */
267 volatile uint32_t *qp_db; /* Work queue doorbell. */
268 volatile uint32_t *cq_db; /* Completion queue doorbell. */
269 uint16_t port_id; /* Port ID of device. */
270 uint16_t idx; /* Queue index. */
271 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
272 int32_t ts_offset; /* Timestamp field dynamic offset. */
273 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
274 struct mlx5_txq_stats stats; /* TX queue counters. */
276 rte_spinlock_t *uar_lock;
277 /* UAR access lock required for 32bit implementations */
279 struct rte_mbuf *elts[0];
280 /* Storage for queued packets, must be the last field. */
281 } __rte_cache_aligned;
284 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
285 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
288 /* TX queue control descriptor. */
289 struct mlx5_txq_ctrl {
290 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
291 uint32_t refcnt; /* Reference counter. */
292 unsigned int socket; /* CPU socket ID for allocations. */
293 enum mlx5_txq_type type; /* The txq ctrl type. */
294 unsigned int max_inline_data; /* Max inline data. */
295 unsigned int max_tso_header; /* Max TSO header size. */
296 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
297 struct mlx5_priv *priv; /* Back pointer to private data. */
298 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
299 void *bf_reg; /* BlueFlame register from Verbs. */
300 uint16_t dump_file_n; /* Number of dump files. */
301 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
302 uint32_t hairpin_status; /* Hairpin binding status. */
303 struct mlx5_txq_data txq; /* Data path structure. */
304 /* Must be the last field in the structure, contains elts[]. */
307 #define MLX5_TX_BFREG(txq) \
308 (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx])
312 extern uint8_t rss_hash_default_key[];
314 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);
315 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
316 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
317 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
318 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
319 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
320 int mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
321 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
322 unsigned int socket, const struct rte_eth_rxconf *conf,
323 struct rte_mempool *mp);
324 int mlx5_rx_hairpin_queue_setup
325 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
326 const struct rte_eth_hairpin_conf *hairpin_conf);
327 void mlx5_rx_queue_release(void *dpdk_rxq);
328 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
329 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
330 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
331 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
332 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
333 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
334 uint16_t desc, unsigned int socket,
335 const struct rte_eth_rxconf *conf,
336 const struct rte_eth_rxseg_split *rx_seg,
338 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
339 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
340 const struct rte_eth_hairpin_conf *hairpin_conf);
341 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
342 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
343 int mlx5_rxq_verify(struct rte_eth_dev *dev);
344 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
345 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
346 struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
347 const uint16_t *queues,
349 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
350 struct mlx5_ind_table_obj *ind_tbl,
352 int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
353 struct mlx5_ind_table_obj *ind_tbl);
354 int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
355 struct mlx5_ind_table_obj *ind_tbl,
356 uint16_t *queues, const uint32_t queues_n,
358 struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
359 struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx);
360 int mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
361 struct mlx5_cache_entry *entry,
363 void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
364 struct mlx5_cache_entry *entry);
365 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
366 struct mlx5_flow_rss_desc *rss_desc);
367 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
368 uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
371 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
372 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
373 (struct rte_eth_dev *dev, uint16_t idx);
374 struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev);
375 void mlx5_drop_action_destroy(struct rte_eth_dev *dev);
376 uint64_t mlx5_get_rx_port_offloads(void);
377 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
378 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
379 int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
380 const uint8_t *rss_key, uint32_t rss_key_len,
381 uint64_t hash_fields,
382 const uint16_t *queues, uint32_t queues_n);
386 int mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
387 int mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
388 int mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
389 int mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t queue_id);
390 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
391 unsigned int socket, const struct rte_eth_txconf *conf);
392 int mlx5_tx_hairpin_queue_setup
393 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
394 const struct rte_eth_hairpin_conf *hairpin_conf);
395 void mlx5_tx_queue_release(void *dpdk_txq);
396 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
397 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
398 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
399 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
400 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
401 uint16_t desc, unsigned int socket,
402 const struct rte_eth_txconf *conf);
403 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
404 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
405 const struct rte_eth_hairpin_conf *hairpin_conf);
406 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
407 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
408 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
409 int mlx5_txq_verify(struct rte_eth_dev *dev);
410 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
411 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
412 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
413 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
417 extern uint32_t mlx5_ptype_table[];
418 extern uint8_t mlx5_cksum_table[];
419 extern uint8_t mlx5_swp_types_table[];
421 void mlx5_set_ptype_table(void);
422 void mlx5_set_cksum_table(void);
423 void mlx5_set_swp_types_table(void);
424 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
425 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
426 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
427 void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
428 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
429 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
431 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
433 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
435 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
436 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
437 uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
438 void mlx5_dump_debug_information(const char *path, const char *title,
439 const void *buf, unsigned int len);
440 int mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
441 const struct mlx5_mp_arg_queue_state_modify *sm);
442 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
443 struct rte_eth_rxq_info *qinfo);
444 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
445 struct rte_eth_txq_info *qinfo);
446 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
447 struct rte_eth_burst_mode *mode);
448 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
449 struct rte_eth_burst_mode *mode);
451 /* Vectorized version of mlx5_rxtx.c */
452 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
453 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
454 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
456 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_txq, struct rte_mbuf **pkts,
461 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
462 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
463 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
464 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
465 struct rte_mempool *mp);
466 int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova,
468 int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova,
472 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
473 * 64bit architectures.
476 * value to write in CPU endian format.
478 * Address to write to.
480 * Address of the lock to use for that UAR access.
482 static __rte_always_inline void
483 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
484 rte_spinlock_t *lock __rte_unused)
487 *(uint64_t *)addr = val;
488 #else /* !RTE_ARCH_64 */
489 rte_spinlock_lock(lock);
490 *(uint32_t *)addr = val;
492 *((uint32_t *)addr + 1) = val >> 32;
493 rte_spinlock_unlock(lock);
498 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
499 * 64bit architectures while guaranteeing the order of execution with the
500 * code being executed.
503 * value to write in CPU endian format.
505 * Address to write to.
507 * Address of the lock to use for that UAR access.
509 static __rte_always_inline void
510 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
513 __mlx5_uar_write64_relaxed(val, addr, lock);
516 /* Assist macros, used instead of directly calling the functions they wrap. */
518 #define mlx5_uar_write64_relaxed(val, dst, lock) \
519 __mlx5_uar_write64_relaxed(val, dst, NULL)
520 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
522 #define mlx5_uar_write64_relaxed(val, dst, lock) \
523 __mlx5_uar_write64_relaxed(val, dst, lock)
524 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
528 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
529 * cloned mbuf is allocated is returned instead.
535 * Memory pool where data is located for given mbuf.
537 static inline struct rte_mempool *
538 mlx5_mb2mp(struct rte_mbuf *buf)
540 if (unlikely(RTE_MBUF_CLONED(buf)))
541 return rte_mbuf_from_indirect(buf)->pool;
546 * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
547 * as mempool is pre-configured and static.
550 * Pointer to Rx queue structure.
555 * Searched LKey on success, UINT32_MAX on no match.
557 static __rte_always_inline uint32_t
558 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
560 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
563 /* Linear search on MR cache array. */
564 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
565 MLX5_MR_CACHE_N, addr);
566 if (likely(lkey != UINT32_MAX))
568 /* Take slower bottom-half (Binary Search) on miss. */
569 return mlx5_rx_addr2mr_bh(rxq, addr);
572 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
575 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
578 * Pointer to Tx queue structure.
583 * Searched LKey on success, UINT32_MAX on no match.
585 static __rte_always_inline uint32_t
586 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
588 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
589 uintptr_t addr = (uintptr_t)mb->buf_addr;
592 /* Check generation bit to see if there's any change on existing MRs. */
593 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
594 mlx5_mr_flush_local_cache(mr_ctrl);
595 /* Linear search on MR cache array. */
596 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
597 MLX5_MR_CACHE_N, addr);
598 if (likely(lkey != UINT32_MAX))
600 /* Take slower bottom-half on miss. */
601 return mlx5_tx_mb2mr_bh(txq, mb);
605 * Ring TX queue doorbell and flush the update if requested.
608 * Pointer to TX queue structure.
610 * Pointer to the last WQE posted in the NIC.
612 * Request for write memory barrier after BlueFlame update.
614 static __rte_always_inline void
615 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
618 uint64_t *dst = MLX5_TX_BFREG(txq);
619 volatile uint64_t *src = ((volatile uint64_t *)wqe);
622 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
623 /* Ensure ordering between DB record and BF copy. */
625 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
631 * Ring TX queue doorbell and flush the update by write memory barrier.
634 * Pointer to TX queue structure.
636 * Pointer to the last WQE posted in the NIC.
638 static __rte_always_inline void
639 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
641 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
645 * Convert timestamp from HW format to linear counter
646 * from Packet Pacing Clock Queue CQE timestamp format.
649 * Pointer to the device shared context. Might be needed
650 * to convert according current device configuration.
652 * Timestamp from CQE to convert.
656 static __rte_always_inline uint64_t
657 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts)
660 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S;
664 * Convert timestamp from mbuf format to linear counter
665 * of Clock Queue completions (24 bits)
668 * Pointer to the device shared context to fetch Tx
669 * packet pacing timestamp and parameters.
671 * Timestamp from mbuf to convert.
673 * positive or zero value - completion ID to wait
674 * negative value - conversion error
676 static __rte_always_inline int32_t
677 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
684 * Read atomically two uint64_t fields and compare lsb bits.
685 * It there is no match - the timestamp was updated in
686 * the service thread, data should be re-read.
688 rte_compiler_barrier();
689 ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
690 ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
691 rte_compiler_barrier();
692 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
695 /* Perform the skew correction, positive value to send earlier. */
696 mts -= sh->txpp.skew;
698 if (unlikely(mts >= UINT64_MAX / 2)) {
699 /* We have negative integer, mts is in the past. */
700 __atomic_fetch_add(&sh->txpp.err_ts_past,
701 1, __ATOMIC_RELAXED);
704 tick = sh->txpp.tick;
706 /* Convert delta to completions, round up. */
707 mts = (mts + tick - 1) / tick;
708 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
709 /* We have mts is too distant future. */
710 __atomic_fetch_add(&sh->txpp.err_ts_future,
711 1, __ATOMIC_RELAXED);
714 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
716 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
721 * Set timestamp in mbuf dynamic field.
724 * Structure to write into.
726 * Dynamic field offset in mbuf structure.
730 static __rte_always_inline void
731 mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
732 rte_mbuf_timestamp_t timestamp)
734 *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp;
738 * Replace MPRQ buffer.
741 * Pointer to Rx queue structure.
743 * RQ index to replace.
745 static __rte_always_inline void
746 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
748 const uint32_t strd_n = 1 << rxq->strd_num_n;
749 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
750 volatile struct mlx5_wqe_data_seg *wqe =
751 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
752 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
755 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
756 MLX5_ASSERT(rep != NULL);
757 /* Replace MPRQ buf. */
758 (*rxq->mprq_bufs)[rq_idx] = rep;
760 addr = mlx5_mprq_buf_addr(rep, strd_n);
761 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
762 /* If there's only one MR, no need to replace LKey in WQE. */
763 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
764 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
765 /* Stash a mbuf for next replacement. */
766 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
767 rxq->mprq_repl = rep;
769 rxq->mprq_repl = NULL;
770 /* Release the old buffer. */
771 mlx5_mprq_buf_free(buf);
772 } else if (unlikely(rxq->mprq_repl == NULL)) {
773 struct mlx5_mprq_buf *rep;
776 * Currently, the MPRQ mempool is out of buffer
777 * and doing memcpy regardless of the size of Rx
778 * packet. Retry allocation to get back to
781 if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep))
782 rxq->mprq_repl = rep;
787 * Attach or copy MPRQ buffer content to a packet.
790 * Pointer to Rx queue structure.
792 * Pointer to a packet to fill.
796 * Pointer to a MPRQ buffer to take the data from.
798 * Stride index to start from.
800 * Number of strides to consume.
802 static __rte_always_inline enum mlx5_rqx_code
803 mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
804 struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt)
806 const uint32_t strd_n = 1 << rxq->strd_num_n;
807 const uint16_t strd_sz = 1 << rxq->strd_sz_n;
808 const uint16_t strd_shift =
809 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
810 const int32_t hdrm_overlap =
811 len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
812 const uint32_t offset = strd_idx * strd_sz + strd_shift;
813 void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
816 * Memcpy packets to the target mbuf if:
817 * - The size of packet is smaller than mprq_max_memcpy_len.
818 * - Out of buffer in the Mempool for Multi-Packet RQ.
819 * - The packet's stride overlaps a headroom and scatter is off.
821 if (len <= rxq->mprq_max_memcpy_len ||
822 rxq->mprq_repl == NULL ||
823 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
825 (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) {
826 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
829 } else if (rxq->strd_scatter_en) {
830 struct rte_mbuf *prev = pkt;
831 uint32_t seg_len = RTE_MIN(len, (uint32_t)
832 (pkt->buf_len - RTE_PKTMBUF_HEADROOM));
833 uint32_t rem_len = len - seg_len;
835 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
837 DATA_LEN(pkt) = seg_len;
839 struct rte_mbuf *next =
840 rte_pktmbuf_alloc(rxq->mp);
842 if (unlikely(next == NULL))
843 return MLX5_RXQ_CODE_NOMBUF;
845 SET_DATA_OFF(next, 0);
846 addr = RTE_PTR_ADD(addr, seg_len);
847 seg_len = RTE_MIN(rem_len, (uint32_t)
848 (next->buf_len - RTE_PKTMBUF_HEADROOM));
850 (rte_pktmbuf_mtod(next, void *),
852 DATA_LEN(next) = seg_len;
858 return MLX5_RXQ_CODE_DROPPED;
862 struct rte_mbuf_ext_shared_info *shinfo;
863 uint16_t buf_len = strd_cnt * strd_sz;
866 /* Increment the refcnt of the whole chunk. */
867 __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
868 MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
869 __ATOMIC_RELAXED) <= strd_n + 1);
870 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
872 * MLX5 device doesn't use iova but it is necessary in a
873 * case where the Rx packet is transmitted via a
876 buf_iova = rte_mempool_virt2iova(buf) +
877 RTE_PTR_DIFF(buf_addr, buf);
878 shinfo = &buf->shinfos[strd_idx];
879 rte_mbuf_ext_refcnt_set(shinfo, 1);
881 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
882 * attaching the stride to mbuf and more offload flags
883 * will be added below by calling rxq_cq_to_mbuf().
884 * Other fields will be overwritten.
886 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
888 /* Set mbuf head-room. */
889 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
890 MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
891 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
892 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
895 * Copy the last fragment of a packet (up to headroom
896 * size bytes) in case there is a stride overlap with
897 * a next packet's headroom. Allocate a separate mbuf
898 * to store this fragment and link it. Scatter is on.
900 if (hdrm_overlap > 0) {
901 MLX5_ASSERT(rxq->strd_scatter_en);
902 struct rte_mbuf *seg =
903 rte_pktmbuf_alloc(rxq->mp);
905 if (unlikely(seg == NULL))
906 return MLX5_RXQ_CODE_NOMBUF;
907 SET_DATA_OFF(seg, 0);
908 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
909 RTE_PTR_ADD(addr, len - hdrm_overlap),
911 DATA_LEN(seg) = hdrm_overlap;
912 DATA_LEN(pkt) = len - hdrm_overlap;
917 return MLX5_RXQ_CODE_EXIT;
921 * Check whether Multi-Packet RQ can be enabled for the device.
924 * Pointer to Ethernet device.
927 * 1 if supported, negative errno value if not.
929 static __rte_always_inline int
930 mlx5_check_mprq_support(struct rte_eth_dev *dev)
932 struct mlx5_priv *priv = dev->data->dev_private;
934 if (priv->config.mprq.enabled &&
935 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
941 * Check whether Multi-Packet RQ is enabled for the Rx queue.
944 * Pointer to receive queue structure.
947 * 0 if disabled, otherwise enabled.
949 static __rte_always_inline int
950 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
952 return rxq->strd_num_n > 0;
956 * Check whether Multi-Packet RQ is enabled for the device.
959 * Pointer to Ethernet device.
962 * 0 if disabled, otherwise enabled.
964 static __rte_always_inline int
965 mlx5_mprq_enabled(struct rte_eth_dev *dev)
967 struct mlx5_priv *priv = dev->data->dev_private;
972 if (mlx5_check_mprq_support(dev) < 0)
974 /* All the configured queues should be enabled. */
975 for (i = 0; i < priv->rxqs_n; ++i) {
976 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
977 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
978 (rxq, struct mlx5_rxq_ctrl, rxq);
980 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
983 if (mlx5_rxq_mprq_enabled(rxq))
986 /* Multi-Packet RQ can't be partially configured. */
987 MLX5_ASSERT(n == 0 || n == n_ibv);
990 #endif /* RTE_PMD_MLX5_RXTX_H_ */