1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_H_
7 #define RTE_PMD_MLX5_RXTX_H_
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 #include <rte_hexdump.h>
28 #include <rte_atomic.h>
29 #include <rte_spinlock.h>
31 #include <rte_bus_pci.h>
32 #include <rte_malloc.h>
34 #include "mlx5_utils.h"
37 #include "mlx5_autoconf.h"
38 #include "mlx5_defs.h"
40 #include "mlx5_glue.h"
42 /* Support tunnel matching. */
43 #define MLX5_FLOW_TUNNEL 5
45 struct mlx5_rxq_stats {
46 #ifdef MLX5_PMD_SOFT_COUNTERS
47 uint64_t ipackets; /**< Total of successfully received packets. */
48 uint64_t ibytes; /**< Total of successfully received bytes. */
50 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
51 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
54 struct mlx5_txq_stats {
55 #ifdef MLX5_PMD_SOFT_COUNTERS
56 uint64_t opackets; /**< Total of successfully sent packets. */
57 uint64_t obytes; /**< Total of successfully sent bytes. */
59 uint64_t oerrors; /**< Total number of failed transmitted packets. */
64 /* Compressed CQE context. */
66 uint16_t ai; /* Array index. */
67 uint16_t ca; /* Current array index. */
68 uint16_t na; /* Next array index. */
69 uint16_t cq_ci; /* The next CQE. */
70 uint32_t cqe_cnt; /* Number of CQEs. */
73 /* Multi-Packet RQ buffer header. */
74 struct mlx5_mprq_buf {
75 struct rte_mempool *mp;
76 rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
77 uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
78 } __rte_cache_aligned;
80 /* Get pointer to the first stride. */
81 #define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
83 enum mlx5_rxq_err_state {
84 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
85 MLX5_RXQ_ERR_STATE_NEED_RESET,
86 MLX5_RXQ_ERR_STATE_NEED_READY,
89 /* RX queue descriptor. */
90 struct mlx5_rxq_data {
91 unsigned int csum:1; /* Enable checksum offloading. */
92 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
93 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
94 unsigned int crc_present:1; /* CRC must be subtracted. */
95 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
96 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
97 unsigned int elts_n:4; /* Log 2 of Mbufs. */
98 unsigned int rss_hash:1; /* RSS hash result is enabled. */
99 unsigned int mark:1; /* Marked flow available on the queue. */
100 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
101 unsigned int strd_sz_n:4; /* Log 2 of stride size. */
102 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
103 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
104 unsigned int :4; /* Remaining bits. */
105 volatile uint32_t *rq_db;
106 volatile uint32_t *cq_db;
109 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
112 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
114 struct rxq_zip zip; /* Compressed context. */
115 uint16_t decompressed;
116 /* Number of ready mbufs decompressed from the CQ. */
118 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
119 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
121 volatile struct mlx5_cqe(*cqes)[];
124 struct rte_mbuf *(*elts)[];
125 struct mlx5_mprq_buf *(*mprq_bufs)[];
127 struct rte_mempool *mp;
128 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
129 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
130 uint16_t idx; /* Queue index. */
131 struct mlx5_rxq_stats stats;
132 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
133 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
134 void *cq_uar; /* CQ user access region. */
135 uint32_t cqn; /* CQ number. */
136 uint8_t cq_arm_sn; /* CQ arm seq number. */
138 rte_spinlock_t *uar_lock_cq;
139 /* CQ (UAR) access lock required for 32bit implementations */
141 uint32_t tunnel; /* Tunnel information. */
142 } __rte_cache_aligned;
144 enum mlx5_rxq_obj_type {
145 MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */
146 MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */
149 /* Verbs/DevX Rx queue elements. */
150 struct mlx5_rxq_obj {
151 LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
152 rte_atomic32_t refcnt; /* Reference counter. */
153 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
154 struct ibv_cq *cq; /* Completion Queue. */
155 enum mlx5_rxq_obj_type type;
158 struct ibv_wq *wq; /* Work Queue. */
159 struct mlx5_devx_obj *rq; /* DevX object for Rx Queue. */
161 struct ibv_comp_channel *channel;
164 /* RX queue control descriptor. */
165 struct mlx5_rxq_ctrl {
166 struct mlx5_rxq_data rxq; /* Data path structure. */
167 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
168 rte_atomic32_t refcnt; /* Reference counter. */
169 struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
170 struct mlx5_priv *priv; /* Back pointer to private data. */
171 unsigned int socket; /* CPU socket ID for allocations. */
172 unsigned int irq:1; /* Whether IRQ is enabled. */
173 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
174 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
175 uint32_t wqn; /* WQ number. */
176 uint16_t dump_file_n; /* Number of dump files. */
179 enum mlx5_ind_tbl_type {
180 MLX5_IND_TBL_TYPE_IBV,
181 MLX5_IND_TBL_TYPE_DEVX,
184 /* Indirection table. */
185 struct mlx5_ind_table_obj {
186 LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
187 rte_atomic32_t refcnt; /* Reference counter. */
188 enum mlx5_ind_tbl_type type;
191 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
192 struct mlx5_devx_obj *rqt; /* DevX RQT object. */
194 uint32_t queues_n; /**< Number of queues in the list. */
195 uint16_t queues[]; /**< Queue list. */
200 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
201 rte_atomic32_t refcnt; /* Reference counter. */
202 struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
205 struct ibv_qp *qp; /* Verbs queue pair. */
206 struct mlx5_devx_obj *tir; /* DevX TIR object. */
208 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
209 void *action; /* DV QP action pointer. */
211 uint64_t hash_fields; /* Verbs Hash fields. */
212 uint32_t rss_key_len; /* Hash key length in bytes. */
213 uint8_t rss_key[]; /* Hash key. */
216 /* TX queue send local data. */
218 struct mlx5_txq_local {
219 struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */
220 struct rte_mbuf *mbuf; /* first mbuf to process. */
221 uint16_t pkts_copy; /* packets copied to elts. */
222 uint16_t pkts_sent; /* packets sent. */
223 uint16_t elts_free; /* available elts remain. */
224 uint16_t wqe_free; /* available wqe remain. */
225 uint16_t mbuf_off; /* data offset in current mbuf. */
226 uint16_t mbuf_nseg; /* number of remaining mbuf. */
229 /* TX queue descriptor. */
231 struct mlx5_txq_data {
232 uint16_t elts_head; /* Current counter in (*elts)[]. */
233 uint16_t elts_tail; /* Counter of first element awaiting completion. */
234 uint16_t elts_comp; /* elts index since last completion request. */
235 uint16_t elts_s; /* Number of mbuf elements. */
236 uint16_t elts_m; /* Mask for mbuf elements indices. */
237 /* Fields related to elts mbuf storage. */
238 uint16_t wqe_ci; /* Consumer index for work queue. */
239 uint16_t wqe_pi; /* Producer index for work queue. */
240 uint16_t wqe_s; /* Number of WQ elements. */
241 uint16_t wqe_m; /* Mask Number for WQ elements. */
242 uint16_t wqe_comp; /* WQE index since last completion request. */
243 uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */
244 /* WQ related fields. */
245 uint16_t cq_ci; /* Consumer index for completion queue. */
247 uint16_t cq_pi; /* Counter of issued CQE "always" requests. */
249 uint16_t cqe_s; /* Number of CQ elements. */
250 uint16_t cqe_m; /* Mask for CQ indices. */
251 /* CQ related fields. */
252 uint16_t elts_n:4; /* elts[] length (in log2). */
253 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
254 uint16_t wqe_n:4; /* Number of WQ elements (in log2). */
255 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
256 uint16_t tunnel_en:1;
257 /* When set TX offload for tunneled packets are supported. */
258 uint16_t swp_en:1; /* Whether SW parser is enabled. */
259 uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
260 uint16_t inlen_send; /* Ordinary send data inline size. */
261 uint16_t inlen_empw; /* eMPW max packet size to inline. */
262 uint16_t inlen_mode; /* Minimal data length to inline. */
263 uint32_t qp_num_8s; /* QP number shifted by 8. */
264 uint64_t offloads; /* Offloads for Tx Queue. */
265 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
266 struct mlx5_wqe *wqes; /* Work queue. */
267 struct mlx5_wqe *wqes_end; /* Work queue array limit. */
268 volatile struct mlx5_cqe *cqes; /* Completion queue. */
269 volatile uint32_t *qp_db; /* Work queue doorbell. */
270 volatile uint32_t *cq_db; /* Completion queue doorbell. */
271 uint16_t port_id; /* Port ID of device. */
272 uint16_t idx; /* Queue index. */
273 struct mlx5_txq_stats stats; /* TX queue counters. */
275 rte_spinlock_t *uar_lock;
276 /* UAR access lock required for 32bit implementations */
278 struct rte_mbuf *elts[0];
279 /* Storage for queued packets, must be the last field. */
280 } __rte_cache_aligned;
282 /* Verbs Rx queue elements. */
283 struct mlx5_txq_ibv {
284 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
285 rte_atomic32_t refcnt; /* Reference counter. */
286 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
287 struct ibv_cq *cq; /* Completion Queue. */
288 struct ibv_qp *qp; /* Queue Pair. */
291 /* TX queue control descriptor. */
292 struct mlx5_txq_ctrl {
293 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
294 rte_atomic32_t refcnt; /* Reference counter. */
295 unsigned int socket; /* CPU socket ID for allocations. */
296 unsigned int max_inline_data; /* Max inline data. */
297 unsigned int max_tso_header; /* Max TSO header size. */
298 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
299 struct mlx5_priv *priv; /* Back pointer to private data. */
300 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
301 void *bf_reg; /* BlueFlame register from Verbs. */
302 uint16_t dump_file_n; /* Number of dump files. */
303 struct mlx5_txq_data txq; /* Data path structure. */
304 /* Must be the last field in the structure, contains elts[]. */
307 #define MLX5_TX_BFREG(txq) \
308 (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx])
312 extern uint8_t rss_hash_default_key[];
314 int mlx5_check_mprq_support(struct rte_eth_dev *dev);
315 int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
316 int mlx5_mprq_enabled(struct rte_eth_dev *dev);
317 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
318 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
319 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
320 unsigned int socket, const struct rte_eth_rxconf *conf,
321 struct rte_mempool *mp);
322 void mlx5_rx_queue_release(void *dpdk_rxq);
323 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
324 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
325 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
326 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
327 struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx);
328 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
329 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
330 uint16_t desc, unsigned int socket,
331 const struct rte_eth_rxconf *conf,
332 struct rte_mempool *mp);
333 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
334 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
335 int mlx5_rxq_verify(struct rte_eth_dev *dev);
336 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
337 int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
338 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
339 const uint8_t *rss_key, uint32_t rss_key_len,
340 uint64_t hash_fields,
341 const uint16_t *queues, uint32_t queues_n,
342 int tunnel __rte_unused);
343 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
344 const uint8_t *rss_key, uint32_t rss_key_len,
345 uint64_t hash_fields,
346 const uint16_t *queues, uint32_t queues_n);
347 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
348 int mlx5_hrxq_verify(struct rte_eth_dev *dev);
349 struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
350 void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
351 uint64_t mlx5_get_rx_port_offloads(struct rte_eth_dev *dev);
352 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
353 int mlx5_lro_on(struct rte_eth_dev *dev);
357 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
358 unsigned int socket, const struct rte_eth_txconf *conf);
359 void mlx5_tx_queue_release(void *dpdk_txq);
360 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
361 struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
362 struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
363 int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
364 int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
365 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
366 uint16_t desc, unsigned int socket,
367 const struct rte_eth_txconf *conf);
368 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
369 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
370 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
371 int mlx5_txq_verify(struct rte_eth_dev *dev);
372 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
373 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
377 extern uint32_t mlx5_ptype_table[];
378 extern uint8_t mlx5_cksum_table[];
379 extern uint8_t mlx5_swp_types_table[];
381 void mlx5_set_ptype_table(void);
382 void mlx5_set_cksum_table(void);
383 void mlx5_set_swp_types_table(void);
384 __rte_noinline uint16_t mlx5_tx_error_cqe_handle
385 (struct mlx5_txq_data *restrict txq,
386 volatile struct mlx5_err_cqe *err_cqe);
387 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
388 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
389 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq,
390 uint8_t mbuf_prepare);
391 void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
392 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
393 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
395 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
397 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
399 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
400 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
401 uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
402 void mlx5_dump_debug_information(const char *path, const char *title,
403 const void *buf, unsigned int len);
404 int mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
405 const struct mlx5_mp_arg_queue_state_modify *sm);
407 /* Vectorized version of mlx5_rxtx.c */
408 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
409 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
410 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
415 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
416 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
417 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
418 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
419 struct rte_mempool *mp);
420 int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova,
422 int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova,
426 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
427 * 64bit architectures.
430 * value to write in CPU endian format.
432 * Address to write to.
434 * Address of the lock to use for that UAR access.
436 static __rte_always_inline void
437 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
438 rte_spinlock_t *lock __rte_unused)
441 *(uint64_t *)addr = val;
442 #else /* !RTE_ARCH_64 */
443 rte_spinlock_lock(lock);
444 *(uint32_t *)addr = val;
446 *((uint32_t *)addr + 1) = val >> 32;
447 rte_spinlock_unlock(lock);
452 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
453 * 64bit architectures while guaranteeing the order of execution with the
454 * code being executed.
457 * value to write in CPU endian format.
459 * Address to write to.
461 * Address of the lock to use for that UAR access.
463 static __rte_always_inline void
464 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
467 __mlx5_uar_write64_relaxed(val, addr, lock);
470 /* Assist macros, used instead of directly calling the functions they wrap. */
472 #define mlx5_uar_write64_relaxed(val, dst, lock) \
473 __mlx5_uar_write64_relaxed(val, dst, NULL)
474 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
476 #define mlx5_uar_write64_relaxed(val, dst, lock) \
477 __mlx5_uar_write64_relaxed(val, dst, lock)
478 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
482 enum mlx5_cqe_status {
483 MLX5_CQE_STATUS_SW_OWN,
484 MLX5_CQE_STATUS_HW_OWN,
489 * Check whether CQE is valid.
494 * Size of completion queue.
501 static __rte_always_inline enum mlx5_cqe_status
502 check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n,
505 const uint16_t idx = ci & cqes_n;
506 const uint8_t op_own = cqe->op_own;
507 const uint8_t op_owner = MLX5_CQE_OWNER(op_own);
508 const uint8_t op_code = MLX5_CQE_OPCODE(op_own);
510 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
511 return MLX5_CQE_STATUS_HW_OWN;
513 if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
514 op_code == MLX5_CQE_REQ_ERR))
515 return MLX5_CQE_STATUS_ERR;
516 return MLX5_CQE_STATUS_SW_OWN;
520 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
521 * cloned mbuf is allocated is returned instead.
527 * Memory pool where data is located for given mbuf.
529 static inline struct rte_mempool *
530 mlx5_mb2mp(struct rte_mbuf *buf)
532 if (unlikely(RTE_MBUF_CLONED(buf)))
533 return rte_mbuf_from_indirect(buf)->pool;
538 * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
539 * as mempool is pre-configured and static.
542 * Pointer to Rx queue structure.
547 * Searched LKey on success, UINT32_MAX on no match.
549 static __rte_always_inline uint32_t
550 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
552 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
555 /* Linear search on MR cache array. */
556 lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
557 MLX5_MR_CACHE_N, addr);
558 if (likely(lkey != UINT32_MAX))
560 /* Take slower bottom-half (Binary Search) on miss. */
561 return mlx5_rx_addr2mr_bh(rxq, addr);
564 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
567 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
570 * Pointer to Tx queue structure.
575 * Searched LKey on success, UINT32_MAX on no match.
577 static __rte_always_inline uint32_t
578 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
580 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
581 uintptr_t addr = (uintptr_t)mb->buf_addr;
584 /* Check generation bit to see if there's any change on existing MRs. */
585 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
586 mlx5_mr_flush_local_cache(mr_ctrl);
587 /* Linear search on MR cache array. */
588 lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
589 MLX5_MR_CACHE_N, addr);
590 if (likely(lkey != UINT32_MAX))
592 /* Take slower bottom-half on miss. */
593 return mlx5_tx_mb2mr_bh(txq, mb);
597 * Ring TX queue doorbell and flush the update if requested.
600 * Pointer to TX queue structure.
602 * Pointer to the last WQE posted in the NIC.
604 * Request for write memory barrier after BlueFlame update.
606 static __rte_always_inline void
607 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
610 uint64_t *dst = MLX5_TX_BFREG(txq);
611 volatile uint64_t *src = ((volatile uint64_t *)wqe);
614 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
615 /* Ensure ordering between DB record and BF copy. */
617 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
623 * Ring TX queue doorbell and flush the update by write memory barrier.
626 * Pointer to TX queue structure.
628 * Pointer to the last WQE posted in the NIC.
630 static __rte_always_inline void
631 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
633 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
636 #endif /* RTE_PMD_MLX5_RXTX_H_ */