1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_H_
7 #define RTE_PMD_MLX5_RXTX_H_
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 #include <rte_hexdump.h>
28 #include <rte_atomic.h>
30 #include "mlx5_utils.h"
32 #include "mlx5_autoconf.h"
33 #include "mlx5_defs.h"
36 struct mlx5_rxq_stats {
37 unsigned int idx; /**< Mapping index. */
38 #ifdef MLX5_PMD_SOFT_COUNTERS
39 uint64_t ipackets; /**< Total of successfully received packets. */
40 uint64_t ibytes; /**< Total of successfully received bytes. */
42 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
43 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
46 struct mlx5_txq_stats {
47 unsigned int idx; /**< Mapping index. */
48 #ifdef MLX5_PMD_SOFT_COUNTERS
49 uint64_t opackets; /**< Total of successfully sent packets. */
50 uint64_t obytes; /**< Total of successfully sent bytes. */
52 uint64_t oerrors; /**< Total number of failed transmitted packets. */
57 /* Memory region queue object. */
59 LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
60 rte_atomic32_t refcnt; /*<< Reference counter. */
61 uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
62 uintptr_t start; /* Start address of MR */
63 uintptr_t end; /* End address of MR */
64 struct ibv_mr *mr; /*<< Memory Region. */
65 struct rte_mempool *mp; /*<< Memory Pool. */
68 /* Compressed CQE context. */
70 uint16_t ai; /* Array index. */
71 uint16_t ca; /* Current array index. */
72 uint16_t na; /* Next array index. */
73 uint16_t cq_ci; /* The next CQE. */
74 uint32_t cqe_cnt; /* Number of CQEs. */
77 /* RX queue descriptor. */
78 struct mlx5_rxq_data {
79 unsigned int csum:1; /* Enable checksum offloading. */
80 unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
81 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
82 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
83 unsigned int crc_present:1; /* CRC must be subtracted. */
84 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
85 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
86 unsigned int elts_n:4; /* Log 2 of Mbufs. */
87 unsigned int rss_hash:1; /* RSS hash result is enabled. */
88 unsigned int mark:1; /* Marked flow available on the queue. */
89 unsigned int :15; /* Remaining bits. */
90 volatile uint32_t *rq_db;
91 volatile uint32_t *cq_db;
96 volatile struct mlx5_wqe_data_seg(*wqes)[];
97 volatile struct mlx5_cqe(*cqes)[];
98 struct rxq_zip zip; /* Compressed context. */
99 struct rte_mbuf *(*elts)[];
100 struct rte_mempool *mp;
101 struct mlx5_rxq_stats stats;
102 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
103 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
104 void *cq_uar; /* CQ user access region. */
105 uint32_t cqn; /* CQ number. */
106 uint8_t cq_arm_sn; /* CQ arm seq number. */
107 } __rte_cache_aligned;
109 /* Verbs Rx queue elements. */
110 struct mlx5_rxq_ibv {
111 LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
112 rte_atomic32_t refcnt; /* Reference counter. */
113 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
114 struct ibv_cq *cq; /* Completion Queue. */
115 struct ibv_wq *wq; /* Work Queue. */
116 struct ibv_comp_channel *channel;
117 struct mlx5_mr *mr; /* Memory Region (for mp). */
120 /* RX queue control descriptor. */
121 struct mlx5_rxq_ctrl {
122 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
123 rte_atomic32_t refcnt; /* Reference counter. */
124 struct priv *priv; /* Back pointer to private data. */
125 struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
126 struct mlx5_rxq_data rxq; /* Data path structure. */
127 unsigned int socket; /* CPU socket ID for allocations. */
128 unsigned int irq:1; /* Whether IRQ is enabled. */
129 uint16_t idx; /* Queue index. */
132 /* Indirection table. */
133 struct mlx5_ind_table_ibv {
134 LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
135 rte_atomic32_t refcnt; /* Reference counter. */
136 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
137 uint16_t queues_n; /**< Number of queues in the list. */
138 uint16_t queues[]; /**< Queue list. */
143 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
144 rte_atomic32_t refcnt; /* Reference counter. */
145 struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
146 struct ibv_qp *qp; /* Verbs queue pair. */
147 uint64_t hash_fields; /* Verbs Hash fields. */
148 uint8_t rss_key_len; /* Hash key length in bytes. */
149 uint8_t rss_key[]; /* Hash key. */
152 /* TX queue descriptor. */
154 struct mlx5_txq_data {
155 uint16_t elts_head; /* Current counter in (*elts)[]. */
156 uint16_t elts_tail; /* Counter of first element awaiting completion. */
157 uint16_t elts_comp; /* Counter since last completion request. */
158 uint16_t mpw_comp; /* WQ index since last completion request. */
159 uint16_t cq_ci; /* Consumer index for completion queue. */
161 uint16_t cq_pi; /* Producer index for completion queue. */
163 uint16_t wqe_ci; /* Consumer index for work queue. */
164 uint16_t wqe_pi; /* Producer index for work queue. */
165 uint16_t elts_n:4; /* (*elts)[] length (in log2). */
166 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
167 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
168 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
169 uint16_t tunnel_en:1;
170 /* When set TX offload for tunneled packets are supported. */
171 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
172 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
173 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
174 uint16_t mr_cache_idx; /* Index of last hit entry. */
175 uint32_t qp_num_8s; /* QP number shifted by 8. */
176 uint64_t offloads; /* Offloads for Tx Queue. */
177 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
178 volatile void *wqes; /* Work queue (use volatile to write into). */
179 volatile uint32_t *qp_db; /* Work queue doorbell. */
180 volatile uint32_t *cq_db; /* Completion queue doorbell. */
181 volatile void *bf_reg; /* Blueflame register remapped. */
182 struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
183 struct rte_mbuf *(*elts)[]; /* TX elements. */
184 struct mlx5_txq_stats stats; /* TX queue counters. */
185 } __rte_cache_aligned;
187 /* Verbs Rx queue elements. */
188 struct mlx5_txq_ibv {
189 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
190 rte_atomic32_t refcnt; /* Reference counter. */
191 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
192 struct ibv_cq *cq; /* Completion Queue. */
193 struct ibv_qp *qp; /* Queue Pair. */
196 /* TX queue control descriptor. */
197 struct mlx5_txq_ctrl {
198 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
199 rte_atomic32_t refcnt; /* Reference counter. */
200 struct priv *priv; /* Back pointer to private data. */
201 unsigned int socket; /* CPU socket ID for allocations. */
202 unsigned int max_inline_data; /* Max inline data. */
203 unsigned int max_tso_header; /* Max TSO header size. */
204 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
205 struct mlx5_txq_data txq; /* Data path structure. */
206 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
207 volatile void *bf_reg_orig; /* Blueflame register from verbs. */
208 uint16_t idx; /* Queue index. */
213 extern uint8_t rss_hash_default_key[];
214 extern const size_t rss_hash_default_key_len;
216 void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
217 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
218 unsigned int socket, const struct rte_eth_rxconf *conf,
219 struct rte_mempool *mp);
220 void mlx5_rx_queue_release(void *dpdk_rxq);
221 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
222 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
223 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
224 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
225 struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
226 struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
227 int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
228 int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
229 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
230 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
231 uint16_t desc, unsigned int socket,
232 const struct rte_eth_rxconf *conf,
233 struct rte_mempool *mp);
234 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
235 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
236 int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
237 int mlx5_rxq_verify(struct rte_eth_dev *dev);
238 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
239 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
242 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
245 int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
246 struct mlx5_ind_table_ibv *ind_tbl);
247 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
248 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key,
249 uint8_t rss_key_len, uint64_t hash_fields,
250 uint16_t queues[], uint16_t queues_n);
251 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key,
252 uint8_t rss_key_len, uint64_t hash_fields,
253 uint16_t queues[], uint16_t queues_n);
254 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
255 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
256 uint64_t mlx5_get_rx_port_offloads(void);
257 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
261 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
262 unsigned int socket, const struct rte_eth_txconf *conf);
263 void mlx5_tx_queue_release(void *dpdk_txq);
264 int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
265 struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
266 struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
267 int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
268 int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
269 int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
270 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
271 uint16_t desc, unsigned int socket,
272 const struct rte_eth_txconf *conf);
273 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
274 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
275 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
276 int mlx5_txq_verify(struct rte_eth_dev *dev);
277 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
278 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
282 extern uint32_t mlx5_ptype_table[];
284 void mlx5_set_ptype_table(void);
285 uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
287 uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
289 uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
291 uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
293 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
294 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
296 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
298 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
299 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
301 /* Vectorized version of mlx5_rxtx.c */
302 int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
303 int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
304 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
305 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
306 uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
308 uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
310 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
315 void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg);
316 struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq,
317 struct rte_mempool *mp, unsigned int idx);
321 * Verify or set magic value in CQE.
330 check_cqe_seen(volatile struct mlx5_cqe *cqe)
332 static const uint8_t magic[] = "seen";
333 volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
337 for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
338 if (!ret || (*buf)[i] != magic[i]) {
340 (*buf)[i] = magic[i];
347 * Check whether CQE is valid.
352 * Size of completion queue.
357 * 0 on success, 1 on failure.
359 static __rte_always_inline int
360 check_cqe(volatile struct mlx5_cqe *cqe,
361 unsigned int cqes_n, const uint16_t ci)
363 uint16_t idx = ci & cqes_n;
364 uint8_t op_own = cqe->op_own;
365 uint8_t op_owner = MLX5_CQE_OWNER(op_own);
366 uint8_t op_code = MLX5_CQE_OPCODE(op_own);
368 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
369 return 1; /* No CQE. */
371 if ((op_code == MLX5_CQE_RESP_ERR) ||
372 (op_code == MLX5_CQE_REQ_ERR)) {
373 volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
374 uint8_t syndrome = err_cqe->syndrome;
376 if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
377 (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
379 if (!check_cqe_seen(cqe)) {
381 "unexpected CQE error %u (0x%02x) syndrome"
383 op_code, op_code, syndrome);
384 rte_hexdump(stderr, "MLX5 Error CQE:",
385 (const void *)((uintptr_t)err_cqe),
389 } else if ((op_code != MLX5_CQE_RESP_SEND) &&
390 (op_code != MLX5_CQE_REQ)) {
391 if (!check_cqe_seen(cqe)) {
392 DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
394 rte_hexdump(stderr, "MLX5 CQE:",
395 (const void *)((uintptr_t)cqe),
405 * Return the address of the WQE.
408 * Pointer to TX queue structure.
410 * WQE consumer index.
415 static inline uintptr_t *
416 tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
418 ci &= ((1 << txq->wqe_n) - 1);
419 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
423 * Manage TX completions.
425 * When sending a burst, mlx5_tx_burst() posts several WRs.
428 * Pointer to TX queue structure.
430 static __rte_always_inline void
431 mlx5_tx_complete(struct mlx5_txq_data *txq)
433 const uint16_t elts_n = 1 << txq->elts_n;
434 const uint16_t elts_m = elts_n - 1;
435 const unsigned int cqe_n = 1 << txq->cqe_n;
436 const unsigned int cqe_cnt = cqe_n - 1;
437 uint16_t elts_free = txq->elts_tail;
439 uint16_t cq_ci = txq->cq_ci;
440 volatile struct mlx5_cqe *cqe = NULL;
441 volatile struct mlx5_wqe_ctrl *ctrl;
442 struct rte_mbuf *m, *free[elts_n];
443 struct rte_mempool *pool = NULL;
444 unsigned int blk_n = 0;
446 cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
447 if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
450 if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
451 (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
452 if (!check_cqe_seen(cqe)) {
453 DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
454 rte_hexdump(stderr, "MLX5 TXQ:",
455 (const void *)((uintptr_t)txq->wqes),
463 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
464 ctrl = (volatile struct mlx5_wqe_ctrl *)
465 tx_mlx5_wqe(txq, txq->wqe_pi);
466 elts_tail = ctrl->ctrl3;
467 assert((elts_tail & elts_m) < (1 << txq->wqe_n));
469 while (elts_free != elts_tail) {
470 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
471 if (likely(m != NULL)) {
472 if (likely(m->pool == pool)) {
475 if (likely(pool != NULL))
476 rte_mempool_put_bulk(pool,
486 rte_mempool_put_bulk(pool, (void *)free, blk_n);
488 elts_free = txq->elts_tail;
490 while (elts_free != elts_tail) {
491 memset(&(*txq->elts)[elts_free & elts_m],
493 sizeof((*txq->elts)[elts_free & elts_m]));
498 txq->elts_tail = elts_tail;
499 /* Update the consumer index. */
500 rte_compiler_barrier();
501 *txq->cq_db = rte_cpu_to_be_32(cq_ci);
505 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
506 * the cloned mbuf is allocated is returned instead.
512 * Memory pool where data is located for given mbuf.
514 static struct rte_mempool *
515 mlx5_tx_mb2mp(struct rte_mbuf *buf)
517 if (unlikely(RTE_MBUF_INDIRECT(buf)))
518 return rte_mbuf_from_indirect(buf)->pool;
523 * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
524 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
525 * remove an entry first.
528 * Pointer to TX queue structure.
530 * Memory Pool for which a Memory Region lkey must be returned.
533 * mr->lkey on success, (uint32_t)-1 on failure.
535 static __rte_always_inline uint32_t
536 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
538 uint16_t i = txq->mr_cache_idx;
539 uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
542 assert(i < RTE_DIM(txq->mp2mr));
543 if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr))
544 return txq->mp2mr[i]->lkey;
545 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
546 if (unlikely(txq->mp2mr[i] == NULL ||
547 txq->mp2mr[i]->mr == NULL)) {
548 /* Unknown MP, add a new MR for it. */
551 if (txq->mp2mr[i]->start <= addr &&
552 txq->mp2mr[i]->end > addr) {
553 assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
554 txq->mr_cache_idx = i;
555 return txq->mp2mr[i]->lkey;
558 mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
560 * Request the reference to use in this queue, the original one is
561 * kept by the control plane.
564 rte_atomic32_inc(&mr->refcnt);
565 txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i;
568 struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
570 DRV_LOG(WARNING, "failed to register mempool 0x%p(%s)",
571 (void *)mp, mp->name);
577 * Ring TX queue doorbell and flush the update if requested.
580 * Pointer to TX queue structure.
582 * Pointer to the last WQE posted in the NIC.
584 * Request for write memory barrier after BlueFlame update.
586 static __rte_always_inline void
587 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
590 uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
591 volatile uint64_t *src = ((volatile uint64_t *)wqe);
594 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
595 /* Ensure ordering between DB record and BF copy. */
603 * Ring TX queue doorbell and flush the update by write memory barrier.
606 * Pointer to TX queue structure.
608 * Pointer to the last WQE posted in the NIC.
610 static __rte_always_inline void
611 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
613 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
617 * Convert the Checksum offloads to Verbs.
620 * Pointer to the Tx queue.
622 * Pointer to the mbuf.
625 * the converted cs_flags.
627 static __rte_always_inline uint8_t
628 txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
630 uint8_t cs_flags = 0;
632 /* Should we enable HW CKSUM offload */
634 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
635 PKT_TX_OUTER_IP_CKSUM)) {
636 if (txq_data->tunnel_en &&
638 (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
639 cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
640 MLX5_ETH_WQE_L4_INNER_CSUM;
641 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
642 cs_flags |= MLX5_ETH_WQE_L3_CSUM;
644 cs_flags = MLX5_ETH_WQE_L3_CSUM |
645 MLX5_ETH_WQE_L4_CSUM;
652 * Count the number of contiguous single segment packets.
655 * Pointer to array of packets.
660 * Number of contiguous single segment packets.
662 static __rte_always_inline unsigned int
663 txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
669 /* Count the number of contiguous single segment packets. */
670 for (pos = 0; pos < pkts_n; ++pos)
671 if (NB_SEGS(pkts[pos]) > 1)
677 * Count the number of contiguous multi-segment packets.
680 * Pointer to array of packets.
685 * Number of contiguous multi-segment packets.
687 static __rte_always_inline unsigned int
688 txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
694 /* Count the number of contiguous multi-segment packets. */
695 for (pos = 0; pos < pkts_n; ++pos)
696 if (NB_SEGS(pkts[pos]) == 1)
701 #endif /* RTE_PMD_MLX5_RXTX_H_ */