1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_H_
7 #define RTE_PMD_MLX5_RXTX_H_
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 #include <rte_hexdump.h>
28 #include <rte_atomic.h>
30 #include "mlx5_utils.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_defs.h"
37 struct mlx5_rxq_stats {
38 unsigned int idx; /**< Mapping index. */
39 #ifdef MLX5_PMD_SOFT_COUNTERS
40 uint64_t ipackets; /**< Total of successfully received packets. */
41 uint64_t ibytes; /**< Total of successfully received bytes. */
43 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
44 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
47 struct mlx5_txq_stats {
48 unsigned int idx; /**< Mapping index. */
49 #ifdef MLX5_PMD_SOFT_COUNTERS
50 uint64_t opackets; /**< Total of successfully sent packets. */
51 uint64_t obytes; /**< Total of successfully sent bytes. */
53 uint64_t oerrors; /**< Total number of failed transmitted packets. */
58 /* Compressed CQE context. */
60 uint16_t ai; /* Array index. */
61 uint16_t ca; /* Current array index. */
62 uint16_t na; /* Next array index. */
63 uint16_t cq_ci; /* The next CQE. */
64 uint32_t cqe_cnt; /* Number of CQEs. */
67 /* RX queue descriptor. */
68 struct mlx5_rxq_data {
69 unsigned int csum:1; /* Enable checksum offloading. */
70 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
71 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
72 unsigned int crc_present:1; /* CRC must be subtracted. */
73 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
74 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
75 unsigned int elts_n:4; /* Log 2 of Mbufs. */
76 unsigned int rss_hash:1; /* RSS hash result is enabled. */
77 unsigned int mark:1; /* Marked flow available on the queue. */
78 unsigned int :15; /* Remaining bits. */
79 volatile uint32_t *rq_db;
80 volatile uint32_t *cq_db;
85 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
86 volatile struct mlx5_wqe_data_seg(*wqes)[];
87 volatile struct mlx5_cqe(*cqes)[];
88 struct rxq_zip zip; /* Compressed context. */
89 struct rte_mbuf *(*elts)[];
90 struct rte_mempool *mp;
91 struct mlx5_rxq_stats stats;
92 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
93 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
94 void *cq_uar; /* CQ user access region. */
95 uint32_t cqn; /* CQ number. */
96 uint8_t cq_arm_sn; /* CQ arm seq number. */
97 uint32_t tunnel; /* Tunnel information. */
98 } __rte_cache_aligned;
100 /* Verbs Rx queue elements. */
101 struct mlx5_rxq_ibv {
102 LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
103 rte_atomic32_t refcnt; /* Reference counter. */
104 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
105 struct ibv_cq *cq; /* Completion Queue. */
106 struct ibv_wq *wq; /* Work Queue. */
107 struct ibv_comp_channel *channel;
110 /* RX queue control descriptor. */
111 struct mlx5_rxq_ctrl {
112 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
113 rte_atomic32_t refcnt; /* Reference counter. */
114 struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
115 struct priv *priv; /* Back pointer to private data. */
116 struct mlx5_rxq_data rxq; /* Data path structure. */
117 unsigned int socket; /* CPU socket ID for allocations. */
118 uint32_t tunnel_types[16]; /* Tunnel type counter. */
119 unsigned int irq:1; /* Whether IRQ is enabled. */
120 uint16_t idx; /* Queue index. */
123 /* Indirection table. */
124 struct mlx5_ind_table_ibv {
125 LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
126 rte_atomic32_t refcnt; /* Reference counter. */
127 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
128 uint32_t queues_n; /**< Number of queues in the list. */
129 uint16_t queues[]; /**< Queue list. */
134 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
135 rte_atomic32_t refcnt; /* Reference counter. */
136 struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
137 struct ibv_qp *qp; /* Verbs queue pair. */
138 uint64_t hash_fields; /* Verbs Hash fields. */
139 uint32_t tunnel; /* Tunnel type. */
140 uint32_t rss_level; /* RSS on tunnel level. */
141 uint32_t rss_key_len; /* Hash key length in bytes. */
142 uint8_t rss_key[]; /* Hash key. */
145 /* TX queue descriptor. */
147 struct mlx5_txq_data {
148 uint16_t elts_head; /* Current counter in (*elts)[]. */
149 uint16_t elts_tail; /* Counter of first element awaiting completion. */
150 uint16_t elts_comp; /* Counter since last completion request. */
151 uint16_t mpw_comp; /* WQ index since last completion request. */
152 uint16_t cq_ci; /* Consumer index for completion queue. */
154 uint16_t cq_pi; /* Producer index for completion queue. */
156 uint16_t wqe_ci; /* Consumer index for work queue. */
157 uint16_t wqe_pi; /* Producer index for work queue. */
158 uint16_t elts_n:4; /* (*elts)[] length (in log2). */
159 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
160 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
161 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
162 uint16_t tunnel_en:1;
163 /* When set TX offload for tunneled packets are supported. */
164 uint16_t swp_en:1; /* Whether SW parser is enabled. */
165 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
166 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
167 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
168 uint32_t qp_num_8s; /* QP number shifted by 8. */
169 uint64_t offloads; /* Offloads for Tx Queue. */
170 struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
171 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
172 volatile void *wqes; /* Work queue (use volatile to write into). */
173 volatile uint32_t *qp_db; /* Work queue doorbell. */
174 volatile uint32_t *cq_db; /* Completion queue doorbell. */
175 volatile void *bf_reg; /* Blueflame register remapped. */
176 struct rte_mbuf *(*elts)[]; /* TX elements. */
177 struct mlx5_txq_stats stats; /* TX queue counters. */
178 } __rte_cache_aligned;
180 /* Verbs Rx queue elements. */
181 struct mlx5_txq_ibv {
182 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
183 rte_atomic32_t refcnt; /* Reference counter. */
184 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
185 struct ibv_cq *cq; /* Completion Queue. */
186 struct ibv_qp *qp; /* Queue Pair. */
189 /* TX queue control descriptor. */
190 struct mlx5_txq_ctrl {
191 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
192 rte_atomic32_t refcnt; /* Reference counter. */
193 unsigned int socket; /* CPU socket ID for allocations. */
194 unsigned int max_inline_data; /* Max inline data. */
195 unsigned int max_tso_header; /* Max TSO header size. */
196 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
197 struct priv *priv; /* Back pointer to private data. */
198 struct mlx5_txq_data txq; /* Data path structure. */
199 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
200 volatile void *bf_reg_orig; /* Blueflame register from verbs. */
201 uint16_t idx; /* Queue index. */
206 extern uint8_t rss_hash_default_key[];
207 extern const size_t rss_hash_default_key_len;
209 void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
210 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
211 unsigned int socket, const struct rte_eth_rxconf *conf,
212 struct rte_mempool *mp);
213 void mlx5_rx_queue_release(void *dpdk_rxq);
214 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
215 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
216 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
217 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
218 struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
219 struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
220 int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
221 int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
222 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
223 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
224 uint16_t desc, unsigned int socket,
225 const struct rte_eth_rxconf *conf,
226 struct rte_mempool *mp);
227 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
228 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
229 int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
230 int mlx5_rxq_verify(struct rte_eth_dev *dev);
231 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
232 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
233 const uint16_t *queues,
235 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
236 const uint16_t *queues,
238 int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
239 struct mlx5_ind_table_ibv *ind_tbl);
240 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
241 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
242 const uint8_t *rss_key, uint32_t rss_key_len,
243 uint64_t hash_fields,
244 const uint16_t *queues, uint32_t queues_n,
245 uint32_t tunnel, uint32_t rss_level);
246 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
247 const uint8_t *rss_key, uint32_t rss_key_len,
248 uint64_t hash_fields,
249 const uint16_t *queues, uint32_t queues_n,
250 uint32_t tunnel, uint32_t rss_level);
251 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
252 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
253 uint64_t mlx5_get_rx_port_offloads(void);
254 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
258 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
259 unsigned int socket, const struct rte_eth_txconf *conf);
260 void mlx5_tx_queue_release(void *dpdk_txq);
261 int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
262 struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
263 struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
264 int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
265 int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
266 int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
267 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
268 uint16_t desc, unsigned int socket,
269 const struct rte_eth_txconf *conf);
270 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
271 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
272 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
273 int mlx5_txq_verify(struct rte_eth_dev *dev);
274 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
275 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
279 extern uint32_t mlx5_ptype_table[];
280 extern uint8_t mlx5_cksum_table[];
281 extern uint8_t mlx5_swp_types_table[];
283 void mlx5_set_ptype_table(void);
284 void mlx5_set_cksum_table(void);
285 void mlx5_set_swp_types_table(void);
286 uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
288 uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
290 uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
292 uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
294 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
295 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
297 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
299 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
300 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
302 /* Vectorized version of mlx5_rxtx.c */
303 int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
304 int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
305 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
306 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
307 uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
309 uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
311 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
316 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
317 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
318 uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
322 * Verify or set magic value in CQE.
331 check_cqe_seen(volatile struct mlx5_cqe *cqe)
333 static const uint8_t magic[] = "seen";
334 volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
338 for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
339 if (!ret || (*buf)[i] != magic[i]) {
341 (*buf)[i] = magic[i];
348 * Check whether CQE is valid.
353 * Size of completion queue.
358 * 0 on success, 1 on failure.
360 static __rte_always_inline int
361 check_cqe(volatile struct mlx5_cqe *cqe,
362 unsigned int cqes_n, const uint16_t ci)
364 uint16_t idx = ci & cqes_n;
365 uint8_t op_own = cqe->op_own;
366 uint8_t op_owner = MLX5_CQE_OWNER(op_own);
367 uint8_t op_code = MLX5_CQE_OPCODE(op_own);
369 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
370 return 1; /* No CQE. */
372 if ((op_code == MLX5_CQE_RESP_ERR) ||
373 (op_code == MLX5_CQE_REQ_ERR)) {
374 volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
375 uint8_t syndrome = err_cqe->syndrome;
377 if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
378 (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
380 if (!check_cqe_seen(cqe)) {
382 "unexpected CQE error %u (0x%02x) syndrome"
384 op_code, op_code, syndrome);
385 rte_hexdump(stderr, "MLX5 Error CQE:",
386 (const void *)((uintptr_t)err_cqe),
390 } else if ((op_code != MLX5_CQE_RESP_SEND) &&
391 (op_code != MLX5_CQE_REQ)) {
392 if (!check_cqe_seen(cqe)) {
393 DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
395 rte_hexdump(stderr, "MLX5 CQE:",
396 (const void *)((uintptr_t)cqe),
406 * Return the address of the WQE.
409 * Pointer to TX queue structure.
411 * WQE consumer index.
416 static inline uintptr_t *
417 tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
419 ci &= ((1 << txq->wqe_n) - 1);
420 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
424 * Manage TX completions.
426 * When sending a burst, mlx5_tx_burst() posts several WRs.
429 * Pointer to TX queue structure.
431 static __rte_always_inline void
432 mlx5_tx_complete(struct mlx5_txq_data *txq)
434 const uint16_t elts_n = 1 << txq->elts_n;
435 const uint16_t elts_m = elts_n - 1;
436 const unsigned int cqe_n = 1 << txq->cqe_n;
437 const unsigned int cqe_cnt = cqe_n - 1;
438 uint16_t elts_free = txq->elts_tail;
440 uint16_t cq_ci = txq->cq_ci;
441 volatile struct mlx5_cqe *cqe = NULL;
442 volatile struct mlx5_wqe_ctrl *ctrl;
443 struct rte_mbuf *m, *free[elts_n];
444 struct rte_mempool *pool = NULL;
445 unsigned int blk_n = 0;
447 cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
448 if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
451 if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
452 (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
453 if (!check_cqe_seen(cqe)) {
454 DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
455 rte_hexdump(stderr, "MLX5 TXQ:",
456 (const void *)((uintptr_t)txq->wqes),
464 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
465 ctrl = (volatile struct mlx5_wqe_ctrl *)
466 tx_mlx5_wqe(txq, txq->wqe_pi);
467 elts_tail = ctrl->ctrl3;
468 assert((elts_tail & elts_m) < (1 << txq->wqe_n));
470 while (elts_free != elts_tail) {
471 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
472 if (likely(m != NULL)) {
473 if (likely(m->pool == pool)) {
476 if (likely(pool != NULL))
477 rte_mempool_put_bulk(pool,
487 rte_mempool_put_bulk(pool, (void *)free, blk_n);
489 elts_free = txq->elts_tail;
491 while (elts_free != elts_tail) {
492 memset(&(*txq->elts)[elts_free & elts_m],
494 sizeof((*txq->elts)[elts_free & elts_m]));
499 txq->elts_tail = elts_tail;
500 /* Update the consumer index. */
501 rte_compiler_barrier();
502 *txq->cq_db = rte_cpu_to_be_32(cq_ci);
506 * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
507 * as mempool is pre-configured and static.
510 * Pointer to Rx queue structure.
515 * Searched LKey on success, UINT32_MAX on no match.
517 static __rte_always_inline uint32_t
518 mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
520 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
523 /* Linear search on MR cache array. */
524 lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
525 MLX5_MR_CACHE_N, addr);
526 if (likely(lkey != UINT32_MAX))
528 /* Take slower bottom-half (Binary Search) on miss. */
529 return mlx5_rx_addr2mr_bh(rxq, addr);
532 #define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
535 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
538 * Pointer to Tx queue structure.
543 * Searched LKey on success, UINT32_MAX on no match.
545 static __rte_always_inline uint32_t
546 mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
548 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
551 /* Check generation bit to see if there's any change on existing MRs. */
552 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
553 mlx5_mr_flush_local_cache(mr_ctrl);
554 /* Linear search on MR cache array. */
555 lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
556 MLX5_MR_CACHE_N, addr);
557 if (likely(lkey != UINT32_MAX))
559 /* Take slower bottom-half (binary search) on miss. */
560 return mlx5_tx_addr2mr_bh(txq, addr);
563 #define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
566 * Ring TX queue doorbell and flush the update if requested.
569 * Pointer to TX queue structure.
571 * Pointer to the last WQE posted in the NIC.
573 * Request for write memory barrier after BlueFlame update.
575 static __rte_always_inline void
576 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
579 uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
580 volatile uint64_t *src = ((volatile uint64_t *)wqe);
583 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
584 /* Ensure ordering between DB record and BF copy. */
592 * Ring TX queue doorbell and flush the update by write memory barrier.
595 * Pointer to TX queue structure.
597 * Pointer to the last WQE posted in the NIC.
599 static __rte_always_inline void
600 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
602 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
606 * Convert mbuf to Verb SWP.
609 * Pointer to the Tx queue.
611 * Pointer to the mbuf.
613 * TSO offloads enabled.
615 * VLAN offloads enabled
617 * Pointer to the SWP header offsets.
619 * Pointer to the SWP header types.
621 static __rte_always_inline void
622 txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
623 uint8_t tso, uint64_t vlan,
624 uint8_t *offsets, uint8_t *swp_types)
626 uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
629 const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
632 if (likely(!tunnel || !txq->swp_en ||
633 (tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)))
636 * The index should have:
637 * bit[0:1] = PKT_TX_L4_MASK
638 * bit[4] = PKT_TX_IPV6
639 * bit[8] = PKT_TX_OUTER_IPV6
640 * bit[9] = PKT_TX_OUTER_UDP
642 idx = (buf->ol_flags & ol_flags_mask) >> 52;
643 if (tunnel == PKT_TX_TUNNEL_UDP)
645 *swp_types = mlx5_swp_types_table[idx];
647 off = buf->outer_l2_len + (vlan ? 4 : 0); /* Outer L3 offset. */
648 if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM))
649 offsets[1] = off >> 1;
650 off += buf->outer_l3_len; /* Outer L4 offset. */
651 if (tunnel == PKT_TX_TUNNEL_UDP)
652 offsets[0] = off >> 1;
653 off += buf->l2_len; /* Inner L3 offset. */
654 if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM))
655 offsets[3] = off >> 1;
656 off += buf->l3_len; /* Inner L4 offset. */
657 if (tso || ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
658 ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM))
659 offsets[2] = off >> 1;
663 * Convert the Checksum offloads to Verbs.
666 * Pointer to the mbuf.
669 * Converted checksum flags.
671 static __rte_always_inline uint8_t
672 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
675 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
676 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
677 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
680 * The index should have:
681 * bit[0] = PKT_TX_TCP_SEG
682 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
683 * bit[4] = PKT_TX_IP_CKSUM
684 * bit[8] = PKT_TX_OUTER_IP_CKSUM
687 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
688 return mlx5_cksum_table[idx];
692 * Count the number of contiguous single segment packets.
695 * Pointer to array of packets.
700 * Number of contiguous single segment packets.
702 static __rte_always_inline unsigned int
703 txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
709 /* Count the number of contiguous single segment packets. */
710 for (pos = 0; pos < pkts_n; ++pos)
711 if (NB_SEGS(pkts[pos]) > 1)
717 * Count the number of contiguous multi-segment packets.
720 * Pointer to array of packets.
725 * Number of contiguous multi-segment packets.
727 static __rte_always_inline unsigned int
728 txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
734 /* Count the number of contiguous multi-segment packets. */
735 for (pos = 0; pos < pkts_n; ++pos)
736 if (NB_SEGS(pkts[pos]) == 1)
741 #endif /* RTE_PMD_MLX5_RXTX_H_ */