1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_H_
7 #define RTE_PMD_MLX5_RXTX_H_
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 #include <rte_hexdump.h>
28 #include <rte_atomic.h>
29 #include <rte_spinlock.h>
31 #include <rte_bus_pci.h>
33 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
39 #include "mlx5_glue.h"
41 /* Support tunnel matching. */
42 #define MLX5_FLOW_TUNNEL 5
44 struct mlx5_rxq_stats {
45 #ifdef MLX5_PMD_SOFT_COUNTERS
46 uint64_t ipackets; /**< Total of successfully received packets. */
47 uint64_t ibytes; /**< Total of successfully received bytes. */
49 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
50 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
53 struct mlx5_txq_stats {
54 #ifdef MLX5_PMD_SOFT_COUNTERS
55 uint64_t opackets; /**< Total of successfully sent packets. */
56 uint64_t obytes; /**< Total of successfully sent bytes. */
58 uint64_t oerrors; /**< Total number of failed transmitted packets. */
63 /* Compressed CQE context. */
65 uint16_t ai; /* Array index. */
66 uint16_t ca; /* Current array index. */
67 uint16_t na; /* Next array index. */
68 uint16_t cq_ci; /* The next CQE. */
69 uint32_t cqe_cnt; /* Number of CQEs. */
72 /* Multi-Packet RQ buffer header. */
73 struct mlx5_mprq_buf {
74 struct rte_mempool *mp;
75 rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
76 uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
77 } __rte_cache_aligned;
79 /* Get pointer to the first stride. */
80 #define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
82 enum mlx5_rxq_err_state {
83 MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
84 MLX5_RXQ_ERR_STATE_NEED_RESET,
85 MLX5_RXQ_ERR_STATE_NEED_READY,
88 /* RX queue descriptor. */
89 struct mlx5_rxq_data {
90 unsigned int csum:1; /* Enable checksum offloading. */
91 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
92 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
93 unsigned int crc_present:1; /* CRC must be subtracted. */
94 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
95 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
96 unsigned int elts_n:4; /* Log 2 of Mbufs. */
97 unsigned int rss_hash:1; /* RSS hash result is enabled. */
98 unsigned int mark:1; /* Marked flow available on the queue. */
99 unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
100 unsigned int strd_sz_n:4; /* Log 2 of stride size. */
101 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
102 unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
103 unsigned int :4; /* Remaining bits. */
104 volatile uint32_t *rq_db;
105 volatile uint32_t *cq_db;
108 uint16_t consumed_strd; /* Number of consumed strides in WQE. */
111 uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
113 struct rxq_zip zip; /* Compressed context. */
114 uint16_t decompressed;
115 /* Number of ready mbufs decompressed from the CQ. */
117 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
118 uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
120 volatile struct mlx5_cqe(*cqes)[];
123 struct rte_mbuf *(*elts)[];
124 struct mlx5_mprq_buf *(*mprq_bufs)[];
126 struct rte_mempool *mp;
127 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
128 struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
129 uint16_t idx; /* Queue index. */
130 struct mlx5_rxq_stats stats;
131 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
132 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
133 void *cq_uar; /* CQ user access region. */
134 uint32_t cqn; /* CQ number. */
135 uint8_t cq_arm_sn; /* CQ arm seq number. */
137 rte_spinlock_t *uar_lock_cq;
138 /* CQ (UAR) access lock required for 32bit implementations */
140 uint32_t tunnel; /* Tunnel information. */
141 } __rte_cache_aligned;
143 /* Verbs Rx queue elements. */
144 struct mlx5_rxq_ibv {
145 LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
146 rte_atomic32_t refcnt; /* Reference counter. */
147 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
148 struct ibv_cq *cq; /* Completion Queue. */
149 struct ibv_wq *wq; /* Work Queue. */
150 struct ibv_comp_channel *channel;
153 /* RX queue control descriptor. */
154 struct mlx5_rxq_ctrl {
155 struct mlx5_rxq_data rxq; /* Data path structure. */
156 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
157 rte_atomic32_t refcnt; /* Reference counter. */
158 struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
159 struct mlx5_priv *priv; /* Back pointer to private data. */
160 unsigned int socket; /* CPU socket ID for allocations. */
161 unsigned int irq:1; /* Whether IRQ is enabled. */
162 uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
163 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
164 uint32_t wqn; /* WQ number. */
165 uint16_t dump_file_n; /* Number of dump files. */
168 /* Indirection table. */
169 struct mlx5_ind_table_ibv {
170 LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
171 rte_atomic32_t refcnt; /* Reference counter. */
172 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
173 uint32_t queues_n; /**< Number of queues in the list. */
174 uint16_t queues[]; /**< Queue list. */
179 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
180 rte_atomic32_t refcnt; /* Reference counter. */
181 struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
182 struct ibv_qp *qp; /* Verbs queue pair. */
183 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
184 void *action; /* DV QP action pointer. */
186 uint64_t hash_fields; /* Verbs Hash fields. */
187 uint32_t rss_key_len; /* Hash key length in bytes. */
188 uint8_t rss_key[]; /* Hash key. */
191 /* TX queue descriptor. */
193 struct mlx5_txq_data {
194 uint16_t elts_head; /* Current counter in (*elts)[]. */
195 uint16_t elts_tail; /* Counter of first element awaiting completion. */
196 uint16_t elts_comp; /* Counter since last completion request. */
197 uint16_t mpw_comp; /* WQ index since last completion request. */
198 uint16_t cq_ci; /* Consumer index for completion queue. */
200 uint16_t cq_pi; /* Producer index for completion queue. */
202 uint16_t wqe_ci; /* Consumer index for work queue. */
203 uint16_t wqe_pi; /* Producer index for work queue. */
204 uint16_t elts_n:4; /* (*elts)[] length (in log2). */
205 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
206 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
207 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
208 uint16_t tunnel_en:1;
209 /* When set TX offload for tunneled packets are supported. */
210 uint16_t swp_en:1; /* Whether SW parser is enabled. */
211 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
212 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
213 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
214 uint32_t qp_num_8s; /* QP number shifted by 8. */
215 uint64_t offloads; /* Offloads for Tx Queue. */
216 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
217 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
218 volatile void *wqes; /* Work queue (use volatile to write into). */
219 volatile uint32_t *qp_db; /* Work queue doorbell. */
220 volatile uint32_t *cq_db; /* Completion queue doorbell. */
221 struct rte_mbuf *(*elts)[]; /* TX elements. */
222 uint16_t port_id; /* Port ID of device. */
223 uint16_t idx; /* Queue index. */
224 struct mlx5_txq_stats stats; /* TX queue counters. */
226 rte_spinlock_t *uar_lock;
227 /* UAR access lock required for 32bit implementations */
229 } __rte_cache_aligned;
231 /* Verbs Rx queue elements. */
232 struct mlx5_txq_ibv {
233 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
234 rte_atomic32_t refcnt; /* Reference counter. */
235 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
236 struct ibv_cq *cq; /* Completion Queue. */
237 struct ibv_qp *qp; /* Queue Pair. */
240 /* TX queue control descriptor. */
241 struct mlx5_txq_ctrl {
242 struct mlx5_txq_data txq; /* Data path structure. */
243 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
244 rte_atomic32_t refcnt; /* Reference counter. */
245 unsigned int socket; /* CPU socket ID for allocations. */
246 unsigned int max_inline_data; /* Max inline data. */
247 unsigned int max_tso_header; /* Max TSO header size. */
248 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
249 struct mlx5_priv *priv; /* Back pointer to private data. */
250 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
251 void *bf_reg; /* BlueFlame register from Verbs. */
252 uint32_t cqn; /* CQ number. */
253 uint16_t dump_file_n; /* Number of dump files. */
256 #define MLX5_TX_BFREG(txq) \
257 (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx])
261 extern uint8_t rss_hash_default_key[];
263 int mlx5_check_mprq_support(struct rte_eth_dev *dev);
264 int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
265 int mlx5_mprq_enabled(struct rte_eth_dev *dev);
266 int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
267 int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
268 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
269 unsigned int socket, const struct rte_eth_rxconf *conf,
270 struct rte_mempool *mp);
271 void mlx5_rx_queue_release(void *dpdk_rxq);
272 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
273 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
274 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
275 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
276 struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
277 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
278 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
279 uint16_t desc, unsigned int socket,
280 const struct rte_eth_rxconf *conf,
281 struct rte_mempool *mp);
282 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
283 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
284 int mlx5_rxq_verify(struct rte_eth_dev *dev);
285 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
286 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
287 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
288 const uint8_t *rss_key, uint32_t rss_key_len,
289 uint64_t hash_fields,
290 const uint16_t *queues, uint32_t queues_n,
291 int tunnel __rte_unused);
292 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
293 const uint8_t *rss_key, uint32_t rss_key_len,
294 uint64_t hash_fields,
295 const uint16_t *queues, uint32_t queues_n);
296 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
297 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
298 struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
299 void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
300 uint64_t mlx5_get_rx_port_offloads(void);
301 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
305 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
306 unsigned int socket, const struct rte_eth_txconf *conf);
307 void mlx5_tx_queue_release(void *dpdk_txq);
308 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
309 struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
310 struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
311 int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
312 int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
313 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
314 uint16_t desc, unsigned int socket,
315 const struct rte_eth_txconf *conf);
316 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
317 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
318 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
319 int mlx5_txq_verify(struct rte_eth_dev *dev);
320 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
321 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
325 extern uint32_t mlx5_ptype_table[];
326 extern uint8_t mlx5_cksum_table[];
327 extern uint8_t mlx5_swp_types_table[];
329 void mlx5_set_ptype_table(void);
330 void mlx5_set_cksum_table(void);
331 void mlx5_set_swp_types_table(void);
332 uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
334 uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
336 uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
338 uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
340 __rte_noinline uint16_t mlx5_tx_error_cqe_handle(struct mlx5_txq_data *txq,
341 volatile struct mlx5_err_cqe *err_cqe);
342 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
343 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
344 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq,
345 uint8_t mbuf_prepare);
346 void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
347 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
348 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
350 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
352 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
354 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
355 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
356 uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
357 void mlx5_dump_debug_information(const char *path, const char *title,
358 const void *buf, unsigned int len);
359 int mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
360 const struct mlx5_mp_arg_queue_state_modify *sm);
362 /* Vectorized version of mlx5_rxtx.c */
363 int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
364 int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
365 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
366 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
367 uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
369 uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
371 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
376 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
377 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
378 uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
379 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
380 struct rte_mempool *mp);
381 int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova,
383 int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova,
387 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
388 * 64bit architectures.
391 * value to write in CPU endian format.
393 * Address to write to.
395 * Address of the lock to use for that UAR access.
397 static __rte_always_inline void
398 __mlx5_uar_write64_relaxed(uint64_t val, void *addr,
399 rte_spinlock_t *lock __rte_unused)
402 *(uint64_t *)addr = val;
403 #else /* !RTE_ARCH_64 */
404 rte_spinlock_lock(lock);
405 *(uint32_t *)addr = val;
407 *((uint32_t *)addr + 1) = val >> 32;
408 rte_spinlock_unlock(lock);
413 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
414 * 64bit architectures while guaranteeing the order of execution with the
415 * code being executed.
418 * value to write in CPU endian format.
420 * Address to write to.
422 * Address of the lock to use for that UAR access.
424 static __rte_always_inline void
425 __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
428 __mlx5_uar_write64_relaxed(val, addr, lock);
431 /* Assist macros, used instead of directly calling the functions they wrap. */
433 #define mlx5_uar_write64_relaxed(val, dst, lock) \
434 __mlx5_uar_write64_relaxed(val, dst, NULL)
435 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
437 #define mlx5_uar_write64_relaxed(val, dst, lock) \
438 __mlx5_uar_write64_relaxed(val, dst, lock)
439 #define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
443 enum mlx5_cqe_status {
444 MLX5_CQE_STATUS_SW_OWN,
445 MLX5_CQE_STATUS_HW_OWN,
450 * Check whether CQE is valid.
455 * Size of completion queue.
462 static __rte_always_inline enum mlx5_cqe_status
463 check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n,
466 const uint16_t idx = ci & cqes_n;
467 const uint8_t op_own = cqe->op_own;
468 const uint8_t op_owner = MLX5_CQE_OWNER(op_own);
469 const uint8_t op_code = MLX5_CQE_OPCODE(op_own);
471 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
472 return MLX5_CQE_STATUS_HW_OWN;
474 if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
475 op_code == MLX5_CQE_REQ_ERR))
476 return MLX5_CQE_STATUS_ERR;
477 return MLX5_CQE_STATUS_SW_OWN;
481 * Return the address of the WQE.
484 * Pointer to TX queue structure.
486 * WQE consumer index.
491 static inline uintptr_t *
492 tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
494 ci &= ((1 << txq->wqe_n) - 1);
495 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
499 * Handle the next CQE.
502 * Pointer to TX queue structure.
505 * The last Tx buffer element to free.
507 static __rte_always_inline uint16_t
508 mlx5_tx_cqe_handle(struct mlx5_txq_data *txq)
510 const unsigned int cqe_n = 1 << txq->cqe_n;
511 const unsigned int cqe_cnt = cqe_n - 1;
514 volatile struct mlx5_cqe *cqe;
515 volatile struct mlx5_err_cqe *err_cqe;
517 .cqe = &(*txq->cqes)[txq->cq_ci & cqe_cnt],
519 int ret = check_cqe(u.cqe, cqe_n, txq->cq_ci);
521 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
522 if (unlikely(ret == MLX5_CQE_STATUS_ERR))
523 last_elts = mlx5_tx_error_cqe_handle(txq, u.err_cqe);
525 /* Do not release buffers. */
526 return txq->elts_tail;
528 uint16_t new_wqe_pi = rte_be_to_cpu_16(u.cqe->wqe_counter);
529 volatile struct mlx5_wqe_ctrl *ctrl =
530 (volatile struct mlx5_wqe_ctrl *)
531 tx_mlx5_wqe(txq, new_wqe_pi);
533 /* Release completion burst buffers. */
534 last_elts = ctrl->ctrl3;
535 txq->wqe_pi = new_wqe_pi;
538 rte_compiler_barrier();
539 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
544 * Manage TX completions.
546 * When sending a burst, mlx5_tx_burst() posts several WRs.
549 * Pointer to TX queue structure.
551 static __rte_always_inline void
552 mlx5_tx_complete(struct mlx5_txq_data *txq)
554 const uint16_t elts_n = 1 << txq->elts_n;
555 const uint16_t elts_m = elts_n - 1;
556 uint16_t elts_free = txq->elts_tail;
558 struct rte_mbuf *m, *free[elts_n];
559 struct rte_mempool *pool = NULL;
560 unsigned int blk_n = 0;
562 elts_tail = mlx5_tx_cqe_handle(txq);
563 assert((elts_tail & elts_m) < (1 << txq->wqe_n));
565 while (elts_free != elts_tail) {
566 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
567 if (likely(m != NULL)) {
568 if (likely(m->pool == pool)) {
571 if (likely(pool != NULL))
572 rte_mempool_put_bulk(pool,
582 rte_mempool_put_bulk(pool, (void *)free, blk_n);
584 elts_free = txq->elts_tail;
586 while (elts_free != elts_tail) {
587 memset(&(*txq->elts)[elts_free & elts_m],
589 sizeof((*txq->elts)[elts_free & elts_m]));
593 txq->elts_tail = elts_tail;
597 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
598 * cloned mbuf is allocated is returned instead.
604 * Memory pool where data is located for given mbuf.
606 static inline struct rte_mempool *
607 mlx5_mb2mp(struct rte_mbuf *buf)
609 if (unlikely(RTE_MBUF_CLONED(buf)))
610 return rte_mbuf_from_indirect(buf)->pool;
615 * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
616 * as mempool is pre-configured and static.
619 * Pointer to Rx queue structure.
624 * Searched LKey on success, UINT32_MAX on no match.
626 static __rte_always_inline uint32_t
627 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
629 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
632 /* Linear search on MR cache array. */
633 lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
634 MLX5_MR_CACHE_N, addr);
635 if (likely(lkey != UINT32_MAX))
637 /* Take slower bottom-half (Binary Search) on miss. */
638 return mlx5_rx_addr2mr_bh(rxq, addr);
641 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
644 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
647 * Pointer to Tx queue structure.
652 * Searched LKey on success, UINT32_MAX on no match.
654 static __rte_always_inline uint32_t
655 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
657 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
658 uintptr_t addr = (uintptr_t)mb->buf_addr;
661 /* Check generation bit to see if there's any change on existing MRs. */
662 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
663 mlx5_mr_flush_local_cache(mr_ctrl);
664 /* Linear search on MR cache array. */
665 lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
666 MLX5_MR_CACHE_N, addr);
667 if (likely(lkey != UINT32_MAX))
669 /* Take slower bottom-half on miss. */
670 return mlx5_tx_mb2mr_bh(txq, mb);
674 * Ring TX queue doorbell and flush the update if requested.
677 * Pointer to TX queue structure.
679 * Pointer to the last WQE posted in the NIC.
681 * Request for write memory barrier after BlueFlame update.
683 static __rte_always_inline void
684 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
687 uint64_t *dst = MLX5_TX_BFREG(txq);
688 volatile uint64_t *src = ((volatile uint64_t *)wqe);
691 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
692 /* Ensure ordering between DB record and BF copy. */
694 mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
700 * Ring TX queue doorbell and flush the update by write memory barrier.
703 * Pointer to TX queue structure.
705 * Pointer to the last WQE posted in the NIC.
707 static __rte_always_inline void
708 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
710 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
714 * Convert mbuf to Verb SWP.
717 * Pointer to the Tx queue.
719 * Pointer to the mbuf.
721 * Pointer to the SWP header offsets.
723 * Pointer to the SWP header types.
725 static __rte_always_inline void
726 txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
727 uint8_t *offsets, uint8_t *swp_types)
729 const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT;
730 const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
731 const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG;
732 const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK;
733 const uint64_t inner_ip =
734 buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6);
735 const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
740 if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP &&
741 tunnel != PKT_TX_TUNNEL_IP)))
744 * The index should have:
745 * bit[0:1] = PKT_TX_L4_MASK
746 * bit[4] = PKT_TX_IPV6
747 * bit[8] = PKT_TX_OUTER_IPV6
748 * bit[9] = PKT_TX_OUTER_UDP
750 idx = (buf->ol_flags & ol_flags_mask) >> 52;
751 if (tunnel == PKT_TX_TUNNEL_UDP)
753 *swp_types = mlx5_swp_types_table[idx];
755 * Set offsets for SW parser. Since ConnectX-5, SW parser just
756 * complements HW parser. SW parser starts to engage only if HW parser
757 * can't reach a header. For the older devices, HW parser will not kick
758 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
759 * should be set regardless of HW offload.
761 off = buf->outer_l2_len + (vlan ? sizeof(struct rte_vlan_hdr) : 0);
762 offsets[1] = off >> 1; /* Outer L3 offset. */
763 off += buf->outer_l3_len;
764 if (tunnel == PKT_TX_TUNNEL_UDP)
765 offsets[0] = off >> 1; /* Outer L4 offset. */
768 offsets[3] = off >> 1; /* Inner L3 offset. */
769 if (csum_flags == PKT_TX_TCP_CKSUM || tso ||
770 csum_flags == PKT_TX_UDP_CKSUM) {
772 offsets[2] = off >> 1; /* Inner L4 offset. */
778 * Convert the Checksum offloads to Verbs.
781 * Pointer to the mbuf.
784 * Converted checksum flags.
786 static __rte_always_inline uint8_t
787 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
790 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
791 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
792 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
795 * The index should have:
796 * bit[0] = PKT_TX_TCP_SEG
797 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
798 * bit[4] = PKT_TX_IP_CKSUM
799 * bit[8] = PKT_TX_OUTER_IP_CKSUM
802 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
803 return mlx5_cksum_table[idx];
807 * Count the number of contiguous single segment packets.
810 * Pointer to array of packets.
815 * Number of contiguous single segment packets.
817 static __rte_always_inline unsigned int
818 txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
824 /* Count the number of contiguous single segment packets. */
825 for (pos = 0; pos < pkts_n; ++pos)
826 if (NB_SEGS(pkts[pos]) > 1)
832 * Count the number of contiguous multi-segment packets.
835 * Pointer to array of packets.
840 * Number of contiguous multi-segment packets.
842 static __rte_always_inline unsigned int
843 txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
849 /* Count the number of contiguous multi-segment packets. */
850 for (pos = 0; pos < pkts_n; ++pos)
851 if (NB_SEGS(pkts[pos]) == 1)
856 #endif /* RTE_PMD_MLX5_RXTX_H_ */