1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_H_
7 #define RTE_PMD_MLX5_RXTX_H_
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 #include <rte_hexdump.h>
28 #include <rte_atomic.h>
29 #include <rte_spinlock.h>
31 #include <rte_bus_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_cycles.h>
35 #include <mlx5_glue.h>
37 #include <mlx5_common.h>
38 #include <mlx5_common_mr.h>
40 #include "mlx5_defs.h"
41 #include "mlx5_utils.h"
43 #include "mlx5_autoconf.h"
45 /* Support tunnel matching. */
46 #define MLX5_FLOW_TUNNEL 10
48 /* Mbuf dynamic flag offset for inline. */
49 extern uint64_t rte_net_mlx5_dynf_inline_mask;
51 struct mlx5_rxq_stats {
52 #ifdef MLX5_PMD_SOFT_COUNTERS
53 uint64_t ipackets; /**< Total of successfully received packets. */
54 uint64_t ibytes; /**< Total of successfully received bytes. */
56 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
57 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
60 struct mlx5_txq_stats {
61 #ifdef MLX5_PMD_SOFT_COUNTERS
62 uint64_t opackets; /**< Total of successfully sent packets. */
63 uint64_t obytes; /**< Total of successfully sent bytes. */
65 uint64_t oerrors; /**< Total number of failed transmitted packets. */
70 /* Compressed CQE context. */
72 uint16_t ai; /* Array index. */
73 uint16_t ca; /* Current array index. */
74 uint16_t na; /* Next array index. */
75 uint16_t cq_ci; /* The next CQE. */
76 uint32_t cqe_cnt; /* Number of CQEs. */
79 /* Multi-Packet RQ buffer header. */
80 struct mlx5_mprq_buf {
81 struct rte_mempool *mp;
82 rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
83 uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
84 struct rte_mbuf_ext_shared_info shinfos[];
86 * Shared information per stride.
87 * More memory will be allocated for the first stride head-room and for
90 } __rte_cache_aligned;
92 /* Get pointer to the first stride. */
93 #define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
94 sizeof(struct mlx5_mprq_buf) + \
96 sizeof(struct rte_mbuf_ext_shared_info) + \
97 RTE_PKTMBUF_HEADROOM))
99 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
100 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
102 enum mlx5_rxq_err_state {
103 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
104 MLX5_RXQ_ERR_STATE_NEED_RESET,
105 MLX5_RXQ_ERR_STATE_NEED_READY,
108 /* RX queue descriptor. */
109 struct mlx5_rxq_data {
110 unsigned int csum:1; /* Enable checksum offloading. */
111 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
112 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
113 unsigned int crc_present:1; /* CRC must be subtracted. */
114 unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
115 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
116 unsigned int elts_n:4; /* Log 2 of Mbufs. */
117 unsigned int rss_hash:1; /* RSS hash result is enabled. */
118 unsigned int mark:1; /* Marked flow available on the queue. */
119 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
120 unsigned int strd_sz_n:4; /* Log 2 of stride size. */
121 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
122 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
123 unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */
124 unsigned int lro:1; /* Enable LRO. */
125 unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
126 volatile uint32_t *rq_db;
127 volatile uint32_t *cq_db;
130 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
133 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
135 struct rxq_zip zip; /* Compressed context. */
136 uint16_t decompressed;
137 /* Number of ready mbufs decompressed from the CQ. */
139 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
140 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
142 volatile struct mlx5_cqe(*cqes)[];
145 struct rte_mbuf *(*elts)[];
146 struct mlx5_mprq_buf *(*mprq_bufs)[];
148 struct rte_mempool *mp;
149 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
150 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
151 uint16_t idx; /* Queue index. */
152 struct mlx5_rxq_stats stats;
153 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
154 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
155 void *cq_uar; /* CQ user access region. */
156 uint32_t cqn; /* CQ number. */
157 uint8_t cq_arm_sn; /* CQ arm seq number. */
159 rte_spinlock_t *uar_lock_cq;
160 /* CQ (UAR) access lock required for 32bit implementations */
162 uint32_t tunnel; /* Tunnel information. */
163 uint64_t flow_meta_mask;
164 int32_t flow_meta_offset;
165 } __rte_cache_aligned;
167 enum mlx5_rxq_obj_type {
168 MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */
169 MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */
170 MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN,
171 /* mlx5_rxq_obj with mlx5_devx_rq and hairpin support. */
175 MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
176 MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
177 MLX5_RXQ_TYPE_UNDEFINED,
180 /* Verbs/DevX Rx queue elements. */
181 struct mlx5_rxq_obj {
182 LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
183 rte_atomic32_t refcnt; /* Reference counter. */
184 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
185 struct ibv_cq *cq; /* Completion Queue. */
186 enum mlx5_rxq_obj_type type;
189 struct ibv_wq *wq; /* Work Queue. */
190 struct mlx5_devx_obj *rq; /* DevX object for Rx Queue. */
192 struct ibv_comp_channel *channel;
195 /* RX queue control descriptor. */
196 struct mlx5_rxq_ctrl {
197 struct mlx5_rxq_data rxq; /* Data path structure. */
198 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
199 rte_atomic32_t refcnt; /* Reference counter. */
200 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
201 struct mlx5_priv *priv; /* Back pointer to private data. */
202 enum mlx5_rxq_type type; /* Rxq type. */
203 unsigned int socket; /* CPU socket ID for allocations. */
204 unsigned int irq:1; /* Whether IRQ is enabled. */
205 unsigned int dbr_umem_id_valid:1; /* dbr_umem_id holds a valid value. */
206 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
207 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
208 uint32_t wqn; /* WQ number. */
209 uint16_t dump_file_n; /* Number of dump files. */
210 uint32_t dbr_umem_id; /* Storing door-bell information, */
211 uint64_t dbr_offset; /* needed when freeing door-bell. */
212 struct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */
213 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
216 enum mlx5_ind_tbl_type {
217 MLX5_IND_TBL_TYPE_IBV,
218 MLX5_IND_TBL_TYPE_DEVX,
221 /* Indirection table. */
222 struct mlx5_ind_table_obj {
223 LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
224 rte_atomic32_t refcnt; /* Reference counter. */
225 enum mlx5_ind_tbl_type type;
228 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
229 struct mlx5_devx_obj *rqt; /* DevX RQT object. */
231 uint32_t queues_n; /**< Number of queues in the list. */
232 uint16_t queues[]; /**< Queue list. */
237 ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
238 rte_atomic32_t refcnt; /* Reference counter. */
239 struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
242 struct ibv_qp *qp; /* Verbs queue pair. */
243 struct mlx5_devx_obj *tir; /* DevX TIR object. */
245 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
246 void *action; /* DV QP action pointer. */
248 uint64_t hash_fields; /* Verbs Hash fields. */
249 uint32_t rss_key_len; /* Hash key length in bytes. */
250 uint8_t rss_key[]; /* Hash key. */
253 /* TX queue send local data. */
255 struct mlx5_txq_local {
256 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
257 struct rte_mbuf *mbuf; /* first mbuf to process. */
258 uint16_t pkts_copy; /* packets copied to elts. */
259 uint16_t pkts_sent; /* packets sent. */
260 uint16_t pkts_loop; /* packets sent on loop entry. */
261 uint16_t elts_free; /* available elts remain. */
262 uint16_t wqe_free; /* available wqe remain. */
263 uint16_t mbuf_off; /* data offset in current mbuf. */
264 uint16_t mbuf_nseg; /* number of remaining mbuf. */
267 /* TX queue descriptor. */
269 struct mlx5_txq_data {
270 uint16_t elts_head; /* Current counter in (*elts)[]. */
271 uint16_t elts_tail; /* Counter of first element awaiting completion. */
272 uint16_t elts_comp; /* elts index since last completion request. */
273 uint16_t elts_s; /* Number of mbuf elements. */
274 uint16_t elts_m; /* Mask for mbuf elements indices. */
275 /* Fields related to elts mbuf storage. */
276 uint16_t wqe_ci; /* Consumer index for work queue. */
277 uint16_t wqe_pi; /* Producer index for work queue. */
278 uint16_t wqe_s; /* Number of WQ elements. */
279 uint16_t wqe_m; /* Mask Number for WQ elements. */
280 uint16_t wqe_comp; /* WQE index since last completion request. */
281 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
282 /* WQ related fields. */
283 uint16_t cq_ci; /* Consumer index for completion queue. */
284 uint16_t cq_pi; /* Production index for completion queue. */
285 uint16_t cqe_s; /* Number of CQ elements. */
286 uint16_t cqe_m; /* Mask for CQ indices. */
287 /* CQ related fields. */
288 uint16_t elts_n:4; /* elts[] length (in log2). */
289 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
290 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
291 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
292 uint16_t tunnel_en:1;
293 /* When set TX offload for tunneled packets are supported. */
294 uint16_t swp_en:1; /* Whether SW parser is enabled. */
295 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
296 uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
297 uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
298 uint16_t inlen_send; /* Ordinary send data inline size. */
299 uint16_t inlen_empw; /* eMPW max packet size to inline. */
300 uint16_t inlen_mode; /* Minimal data length to inline. */
301 uint32_t qp_num_8s; /* QP number shifted by 8. */
302 uint64_t offloads; /* Offloads for Tx Queue. */
303 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
304 struct mlx5_wqe *wqes; /* Work queue. */
305 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
306 #ifdef RTE_LIBRTE_MLX5_DEBUG
307 uint32_t *fcqs; /* Free completion queue (debug extended). */
309 uint16_t *fcqs; /* Free completion queue. */
311 volatile struct mlx5_cqe *cqes; /* Completion queue. */
312 volatile uint32_t *qp_db; /* Work queue doorbell. */
313 volatile uint32_t *cq_db; /* Completion queue doorbell. */
314 uint16_t port_id; /* Port ID of device. */
315 uint16_t idx; /* Queue index. */
316 uint64_t ts_mask; /* Timestamp flag dynamic mask. */
317 int32_t ts_offset; /* Timestamp field dynamic offset. */
318 struct mlx5_dev_ctx_shared *sh; /* Shared context. */
319 struct mlx5_txq_stats stats; /* TX queue counters. */
321 rte_spinlock_t *uar_lock;
322 /* UAR access lock required for 32bit implementations */
324 struct rte_mbuf *elts[0];
325 /* Storage for queued packets, must be the last field. */
326 } __rte_cache_aligned;
328 enum mlx5_txq_obj_type {
329 MLX5_TXQ_OBJ_TYPE_IBV, /* mlx5_txq_obj with ibv_wq. */
330 MLX5_TXQ_OBJ_TYPE_DEVX_SQ, /* mlx5_txq_obj with mlx5_devx_sq. */
331 MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN,
332 /* mlx5_txq_obj with mlx5_devx_tq and hairpin support. */
336 MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */
337 MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
340 /* Verbs/DevX Tx queue elements. */
341 struct mlx5_txq_obj {
342 LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */
343 rte_atomic32_t refcnt; /* Reference counter. */
344 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
345 enum mlx5_txq_obj_type type; /* The txq object type. */
349 struct ibv_cq *cq; /* Completion Queue. */
350 struct ibv_qp *qp; /* Queue Pair. */
353 struct mlx5_devx_obj *sq;
354 /* DevX object for Sx queue. */
355 struct mlx5_devx_obj *tis; /* The TIS object. */
358 struct rte_eth_dev *dev;
359 struct mlx5_devx_obj *cq_devx;
360 struct mlx5dv_devx_umem *cq_umem;
362 int64_t cq_dbrec_offset;
363 struct mlx5_devx_dbr_page *cq_dbrec_page;
364 struct mlx5_devx_obj *sq_devx;
365 struct mlx5dv_devx_umem *sq_umem;
367 int64_t sq_dbrec_offset;
368 struct mlx5_devx_dbr_page *sq_dbrec_page;
373 /* TX queue control descriptor. */
374 struct mlx5_txq_ctrl {
375 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
376 rte_atomic32_t refcnt; /* Reference counter. */
377 unsigned int socket; /* CPU socket ID for allocations. */
378 enum mlx5_txq_type type; /* The txq ctrl type. */
379 unsigned int max_inline_data; /* Max inline data. */
380 unsigned int max_tso_header; /* Max TSO header size. */
381 struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */
382 struct mlx5_priv *priv; /* Back pointer to private data. */
383 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
384 void *bf_reg; /* BlueFlame register from Verbs. */
385 uint16_t dump_file_n; /* Number of dump files. */
386 struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
387 struct mlx5_txq_data txq; /* Data path structure. */
388 /* Must be the last field in the structure, contains elts[]. */
391 #define MLX5_TX_BFREG(txq) \
392 (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx])
396 extern uint8_t rss_hash_default_key[];
398 int mlx5_check_mprq_support(struct rte_eth_dev *dev);
399 int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
400 int mlx5_mprq_enabled(struct rte_eth_dev *dev);
401 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
402 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
403 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
404 unsigned int socket, const struct rte_eth_rxconf *conf,
405 struct rte_mempool *mp);
406 int mlx5_rx_hairpin_queue_setup
407 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
408 const struct rte_eth_hairpin_conf *hairpin_conf);
409 void mlx5_rx_queue_release(void *dpdk_rxq);
410 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
411 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
412 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
413 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
414 struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
415 enum mlx5_rxq_obj_type type);
416 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
417 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
418 uint16_t desc, unsigned int socket,
419 const struct rte_eth_rxconf *conf,
420 struct rte_mempool *mp);
421 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
422 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
423 const struct rte_eth_hairpin_conf *hairpin_conf);
424 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
425 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
426 int mlx5_rxq_verify(struct rte_eth_dev *dev);
427 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
428 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
429 uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
430 const uint8_t *rss_key, uint32_t rss_key_len,
431 uint64_t hash_fields,
432 const uint16_t *queues, uint32_t queues_n,
433 int tunnel __rte_unused);
434 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
435 const uint8_t *rss_key, uint32_t rss_key_len,
436 uint64_t hash_fields,
437 const uint16_t *queues, uint32_t queues_n);
438 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
439 int mlx5_hrxq_verify(struct rte_eth_dev *dev);
440 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
441 struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
442 void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
443 uint64_t mlx5_get_rx_port_offloads(void);
444 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
448 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
449 unsigned int socket, const struct rte_eth_txconf *conf);
450 int mlx5_tx_hairpin_queue_setup
451 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
452 const struct rte_eth_hairpin_conf *hairpin_conf);
453 void mlx5_tx_queue_release(void *dpdk_txq);
454 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
455 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
456 struct mlx5_txq_obj *mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
457 enum mlx5_txq_obj_type type);
458 struct mlx5_txq_obj *mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx);
459 int mlx5_txq_obj_release(struct mlx5_txq_obj *txq_ibv);
460 int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
461 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
462 uint16_t desc, unsigned int socket,
463 const struct rte_eth_txconf *conf);
464 struct mlx5_txq_ctrl *mlx5_txq_hairpin_new
465 (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
466 const struct rte_eth_hairpin_conf *hairpin_conf);
467 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
468 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
469 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
470 int mlx5_txq_verify(struct rte_eth_dev *dev);
471 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
472 void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);
473 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
474 void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);
478 extern uint32_t mlx5_ptype_table[];
479 extern uint8_t mlx5_cksum_table[];
480 extern uint8_t mlx5_swp_types_table[];
482 void mlx5_set_ptype_table(void);
483 void mlx5_set_cksum_table(void);
484 void mlx5_set_swp_types_table(void);
485 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
486 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
487 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
488 void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
489 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
490 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
492 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
494 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
496 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
497 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
498 uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
499 void mlx5_dump_debug_information(const char *path, const char *title,
500 const void *buf, unsigned int len);
501 int mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
502 const struct mlx5_mp_arg_queue_state_modify *sm);
503 void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
504 struct rte_eth_rxq_info *qinfo);
505 void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
506 struct rte_eth_txq_info *qinfo);
507 int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
508 struct rte_eth_burst_mode *mode);
509 int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
510 struct rte_eth_burst_mode *mode);
512 /* Vectorized version of mlx5_rxtx.c */
513 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
514 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
515 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
520 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
521 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
522 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
523 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
524 struct rte_mempool *mp);
525 int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova,
527 int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova,
531 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
532 * 64bit architectures.
535 * value to write in CPU endian format.
537 * Address to write to.
539 * Address of the lock to use for that UAR access.
541 static __rte_always_inline void
542 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
543 rte_spinlock_t *lock __rte_unused)
546 *(uint64_t *)addr = val;
547 #else /* !RTE_ARCH_64 */
548 rte_spinlock_lock(lock);
549 *(uint32_t *)addr = val;
551 *((uint32_t *)addr + 1) = val >> 32;
552 rte_spinlock_unlock(lock);
557 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
558 * 64bit architectures while guaranteeing the order of execution with the
559 * code being executed.
562 * value to write in CPU endian format.
564 * Address to write to.
566 * Address of the lock to use for that UAR access.
568 static __rte_always_inline void
569 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
572 __mlx5_uar_write64_relaxed(val, addr, lock);
575 /* Assist macros, used instead of directly calling the functions they wrap. */
577 #define mlx5_uar_write64_relaxed(val, dst, lock) \
578 __mlx5_uar_write64_relaxed(val, dst, NULL)
579 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
581 #define mlx5_uar_write64_relaxed(val, dst, lock) \
582 __mlx5_uar_write64_relaxed(val, dst, lock)
583 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
587 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
588 * cloned mbuf is allocated is returned instead.
594 * Memory pool where data is located for given mbuf.
596 static inline struct rte_mempool *
597 mlx5_mb2mp(struct rte_mbuf *buf)
599 if (unlikely(RTE_MBUF_CLONED(buf)))
600 return rte_mbuf_from_indirect(buf)->pool;
605 * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
606 * as mempool is pre-configured and static.
609 * Pointer to Rx queue structure.
614 * Searched LKey on success, UINT32_MAX on no match.
616 static __rte_always_inline uint32_t
617 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
619 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
622 /* Linear search on MR cache array. */
623 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
624 MLX5_MR_CACHE_N, addr);
625 if (likely(lkey != UINT32_MAX))
627 /* Take slower bottom-half (Binary Search) on miss. */
628 return mlx5_rx_addr2mr_bh(rxq, addr);
631 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
634 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
637 * Pointer to Tx queue structure.
642 * Searched LKey on success, UINT32_MAX on no match.
644 static __rte_always_inline uint32_t
645 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
647 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
648 uintptr_t addr = (uintptr_t)mb->buf_addr;
651 /* Check generation bit to see if there's any change on existing MRs. */
652 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
653 mlx5_mr_flush_local_cache(mr_ctrl);
654 /* Linear search on MR cache array. */
655 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
656 MLX5_MR_CACHE_N, addr);
657 if (likely(lkey != UINT32_MAX))
659 /* Take slower bottom-half on miss. */
660 return mlx5_tx_mb2mr_bh(txq, mb);
664 * Ring TX queue doorbell and flush the update if requested.
667 * Pointer to TX queue structure.
669 * Pointer to the last WQE posted in the NIC.
671 * Request for write memory barrier after BlueFlame update.
673 static __rte_always_inline void
674 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
677 uint64_t *dst = MLX5_TX_BFREG(txq);
678 volatile uint64_t *src = ((volatile uint64_t *)wqe);
681 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
682 /* Ensure ordering between DB record and BF copy. */
684 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
690 * Ring TX queue doorbell and flush the update by write memory barrier.
693 * Pointer to TX queue structure.
695 * Pointer to the last WQE posted in the NIC.
697 static __rte_always_inline void
698 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
700 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
704 * Convert timestamp from HW format to linear counter
705 * from Packet Pacing Clock Queue CQE timestamp format.
708 * Pointer to the device shared context. Might be needed
709 * to convert according current device configuration.
711 * Timestamp from CQE to convert.
715 static __rte_always_inline uint64_t
716 mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts)
719 return (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S;
723 * Convert timestamp from mbuf format to linear counter
724 * of Clock Queue completions (24 bits)
727 * Pointer to the device shared context to fetch Tx
728 * packet pacing timestamp and parameters.
730 * Timestamp from mbuf to convert.
732 * positive or zero value - completion ID to wait
733 * negative value - conversion error
735 static __rte_always_inline int32_t
736 mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts)
743 * Read atomically two uint64_t fields and compare lsb bits.
744 * It there is no match - the timestamp was updated in
745 * the service thread, data should be re-read.
747 rte_compiler_barrier();
748 ci = rte_atomic64_read(&sh->txpp.ts.ci_ts);
749 ts = rte_atomic64_read(&sh->txpp.ts.ts);
750 rte_compiler_barrier();
751 if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
754 /* Perform the skew correction, positive value to send earlier. */
755 mts -= sh->txpp.skew;
757 if (unlikely(mts >= UINT64_MAX / 2)) {
758 /* We have negative integer, mts is in the past. */
759 rte_atomic32_inc(&sh->txpp.err_ts_past);
762 tick = sh->txpp.tick;
764 /* Convert delta to completions, round up. */
765 mts = (mts + tick - 1) / tick;
766 if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
767 /* We have mts is too distant future. */
768 rte_atomic32_inc(&sh->txpp.err_ts_future);
771 mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
773 ci >>= 64 - MLX5_CQ_INDEX_WIDTH;
777 #endif /* RTE_PMD_MLX5_RXTX_H_ */