4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_RXTX_H_
35 #define RTE_PMD_MLX5_RXTX_H_
39 #include <sys/queue.h>
42 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
44 #pragma GCC diagnostic ignored "-Wpedantic"
46 #include <infiniband/verbs.h>
47 #include <infiniband/mlx5dv.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_mempool.h>
54 #include <rte_common.h>
55 #include <rte_hexdump.h>
56 #include <rte_atomic.h>
58 #include "mlx5_utils.h"
60 #include "mlx5_autoconf.h"
61 #include "mlx5_defs.h"
64 struct mlx5_rxq_stats {
65 unsigned int idx; /**< Mapping index. */
66 #ifdef MLX5_PMD_SOFT_COUNTERS
67 uint64_t ipackets; /**< Total of successfully received packets. */
68 uint64_t ibytes; /**< Total of successfully received bytes. */
70 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
71 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
74 struct mlx5_txq_stats {
75 unsigned int idx; /**< Mapping index. */
76 #ifdef MLX5_PMD_SOFT_COUNTERS
77 uint64_t opackets; /**< Total of successfully sent packets. */
78 uint64_t obytes; /**< Total of successfully sent bytes. */
80 uint64_t oerrors; /**< Total number of failed transmitted packets. */
85 /* Memory region queue object. */
87 LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
88 rte_atomic32_t refcnt; /*<< Reference counter. */
89 uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
90 uintptr_t start; /* Start address of MR */
91 uintptr_t end; /* End address of MR */
92 struct ibv_mr *mr; /*<< Memory Region. */
93 struct rte_mempool *mp; /*<< Memory Pool. */
96 /* Compressed CQE context. */
98 uint16_t ai; /* Array index. */
99 uint16_t ca; /* Current array index. */
100 uint16_t na; /* Next array index. */
101 uint16_t cq_ci; /* The next CQE. */
102 uint32_t cqe_cnt; /* Number of CQEs. */
105 /* RX queue descriptor. */
106 struct mlx5_rxq_data {
107 unsigned int csum:1; /* Enable checksum offloading. */
108 unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
109 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
110 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
111 unsigned int crc_present:1; /* CRC must be subtracted. */
112 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
113 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
114 unsigned int elts_n:4; /* Log 2 of Mbufs. */
115 unsigned int rss_hash:1; /* RSS hash result is enabled. */
116 unsigned int mark:1; /* Marked flow available on the queue. */
117 unsigned int pending_err:1; /* CQE error needs to be handled. */
118 unsigned int :14; /* Remaining bits. */
119 volatile uint32_t *rq_db;
120 volatile uint32_t *cq_db;
125 volatile struct mlx5_wqe_data_seg(*wqes)[];
126 volatile struct mlx5_cqe(*cqes)[];
127 struct rxq_zip zip; /* Compressed context. */
128 struct rte_mbuf *(*elts)[];
129 struct rte_mempool *mp;
130 struct mlx5_rxq_stats stats;
131 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
132 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
133 void *cq_uar; /* CQ user access region. */
134 uint32_t cqn; /* CQ number. */
135 uint8_t cq_arm_sn; /* CQ arm seq number. */
136 } __rte_cache_aligned;
138 /* Verbs Rx queue elements. */
139 struct mlx5_rxq_ibv {
140 LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
141 rte_atomic32_t refcnt; /* Reference counter. */
142 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
143 struct ibv_cq *cq; /* Completion Queue. */
144 struct ibv_wq *wq; /* Work Queue. */
145 struct ibv_comp_channel *channel;
146 struct mlx5_mr *mr; /* Memory Region (for mp). */
149 /* RX queue control descriptor. */
150 struct mlx5_rxq_ctrl {
151 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
152 rte_atomic32_t refcnt; /* Reference counter. */
153 struct priv *priv; /* Back pointer to private data. */
154 struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
155 struct mlx5_rxq_data rxq; /* Data path structure. */
156 unsigned int socket; /* CPU socket ID for allocations. */
157 unsigned int irq:1; /* Whether IRQ is enabled. */
160 /* Indirection table. */
161 struct mlx5_ind_table_ibv {
162 LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
163 rte_atomic32_t refcnt; /* Reference counter. */
164 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
165 uint16_t queues_n; /**< Number of queues in the list. */
166 uint16_t queues[]; /**< Queue list. */
171 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
172 rte_atomic32_t refcnt; /* Reference counter. */
173 struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
174 struct ibv_qp *qp; /* Verbs queue pair. */
175 uint64_t hash_fields; /* Verbs Hash fields. */
176 uint8_t rss_key_len; /* Hash key length in bytes. */
177 uint8_t rss_key[]; /* Hash key. */
180 /* TX queue descriptor. */
182 struct mlx5_txq_data {
183 uint16_t elts_head; /* Current counter in (*elts)[]. */
184 uint16_t elts_tail; /* Counter of first element awaiting completion. */
185 uint16_t elts_comp; /* Counter since last completion request. */
186 uint16_t mpw_comp; /* WQ index since last completion request. */
187 uint16_t cq_ci; /* Consumer index for completion queue. */
189 uint16_t cq_pi; /* Producer index for completion queue. */
191 uint16_t wqe_ci; /* Consumer index for work queue. */
192 uint16_t wqe_pi; /* Producer index for work queue. */
193 uint16_t elts_n:4; /* (*elts)[] length (in log2). */
194 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
195 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
196 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
197 uint16_t tunnel_en:1;
198 /* When set TX offload for tunneled packets are supported. */
199 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
200 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
201 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
202 uint16_t mr_cache_idx; /* Index of last hit entry. */
203 uint32_t qp_num_8s; /* QP number shifted by 8. */
204 uint32_t flags; /* Flags for Tx Queue. */
205 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
206 volatile void *wqes; /* Work queue (use volatile to write into). */
207 volatile uint32_t *qp_db; /* Work queue doorbell. */
208 volatile uint32_t *cq_db; /* Completion queue doorbell. */
209 volatile void *bf_reg; /* Blueflame register. */
210 struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
211 struct rte_mbuf *(*elts)[]; /* TX elements. */
212 struct mlx5_txq_stats stats; /* TX queue counters. */
213 } __rte_cache_aligned;
215 /* Verbs Rx queue elements. */
216 struct mlx5_txq_ibv {
217 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
218 rte_atomic32_t refcnt; /* Reference counter. */
219 struct ibv_cq *cq; /* Completion Queue. */
220 struct ibv_qp *qp; /* Queue Pair. */
223 /* TX queue control descriptor. */
224 struct mlx5_txq_ctrl {
225 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
226 rte_atomic32_t refcnt; /* Reference counter. */
227 struct priv *priv; /* Back pointer to private data. */
228 unsigned int socket; /* CPU socket ID for allocations. */
229 unsigned int max_inline_data; /* Max inline data. */
230 unsigned int max_tso_header; /* Max TSO header size. */
231 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
232 struct mlx5_txq_data txq; /* Data path structure. */
233 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
238 extern uint8_t rss_hash_default_key[];
239 extern const size_t rss_hash_default_key_len;
241 void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *);
242 int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
243 const struct rte_eth_rxconf *, struct rte_mempool *);
244 void mlx5_rx_queue_release(void *);
245 int priv_rx_intr_vec_enable(struct priv *priv);
246 void priv_rx_intr_vec_disable(struct priv *priv);
247 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
248 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
249 struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t);
250 struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t);
251 int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *);
252 int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
253 int mlx5_priv_rxq_ibv_verify(struct priv *);
254 struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
255 uint16_t, unsigned int,
256 struct rte_mempool *);
257 struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
258 int mlx5_priv_rxq_release(struct priv *, uint16_t);
259 int mlx5_priv_rxq_releasable(struct priv *, uint16_t);
260 int mlx5_priv_rxq_verify(struct priv *);
261 int rxq_alloc_elts(struct mlx5_rxq_ctrl *);
262 struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *,
265 struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *,
268 int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *);
269 int mlx5_priv_ind_table_ibv_verify(struct priv *);
270 struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t,
271 uint64_t, uint16_t [], uint16_t);
272 struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
273 uint64_t, uint16_t [], uint16_t);
274 int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
275 int mlx5_priv_hrxq_ibv_verify(struct priv *);
279 int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
280 const struct rte_eth_txconf *);
281 void mlx5_tx_queue_release(void *);
282 int priv_tx_uar_remap(struct priv *priv, int fd);
283 struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t);
284 struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t);
285 int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *);
286 int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *);
287 int mlx5_priv_txq_ibv_verify(struct priv *);
288 struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t,
289 uint16_t, unsigned int,
290 const struct rte_eth_txconf *);
291 struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t);
292 int mlx5_priv_txq_release(struct priv *, uint16_t);
293 int mlx5_priv_txq_releasable(struct priv *, uint16_t);
294 int mlx5_priv_txq_verify(struct priv *);
295 void txq_alloc_elts(struct mlx5_txq_ctrl *);
299 extern uint32_t mlx5_ptype_table[];
301 void mlx5_set_ptype_table(void);
302 uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
303 uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
304 uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
305 uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t);
306 uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
307 uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
308 uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
309 int mlx5_rx_descriptor_status(void *, uint16_t);
310 int mlx5_tx_descriptor_status(void *, uint16_t);
312 /* Vectorized version of mlx5_rxtx.c */
313 int priv_check_raw_vec_tx_support(struct priv *);
314 int priv_check_vec_tx_support(struct priv *);
315 int rxq_check_vec_support(struct mlx5_rxq_data *);
316 int priv_check_vec_rx_support(struct priv *);
317 uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
318 uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
319 uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
323 void mlx5_mp2mr_iter(struct rte_mempool *, void *);
324 struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *,
325 struct rte_mempool *, unsigned int);
326 struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *,
331 * Verify or set magic value in CQE.
340 check_cqe_seen(volatile struct mlx5_cqe *cqe)
342 static const uint8_t magic[] = "seen";
343 volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
347 for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
348 if (!ret || (*buf)[i] != magic[i]) {
350 (*buf)[i] = magic[i];
357 * Check whether CQE is valid.
362 * Size of completion queue.
367 * 0 on success, 1 on failure.
369 static __rte_always_inline int
370 check_cqe(volatile struct mlx5_cqe *cqe,
371 unsigned int cqes_n, const uint16_t ci)
373 uint16_t idx = ci & cqes_n;
374 uint8_t op_own = cqe->op_own;
375 uint8_t op_owner = MLX5_CQE_OWNER(op_own);
376 uint8_t op_code = MLX5_CQE_OPCODE(op_own);
378 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
379 return 1; /* No CQE. */
381 if ((op_code == MLX5_CQE_RESP_ERR) ||
382 (op_code == MLX5_CQE_REQ_ERR)) {
383 volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
384 uint8_t syndrome = err_cqe->syndrome;
386 if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
387 (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
389 if (!check_cqe_seen(cqe)) {
390 ERROR("unexpected CQE error %u (0x%02x)"
392 op_code, op_code, syndrome);
393 rte_hexdump(stderr, "MLX5 Error CQE:",
394 (const void *)((uintptr_t)err_cqe),
398 } else if ((op_code != MLX5_CQE_RESP_SEND) &&
399 (op_code != MLX5_CQE_REQ)) {
400 if (!check_cqe_seen(cqe)) {
401 ERROR("unexpected CQE opcode %u (0x%02x)",
403 rte_hexdump(stderr, "MLX5 CQE:",
404 (const void *)((uintptr_t)cqe),
414 * Return the address of the WQE.
417 * Pointer to TX queue structure.
419 * WQE consumer index.
424 static inline uintptr_t *
425 tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
427 ci &= ((1 << txq->wqe_n) - 1);
428 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
432 * Manage TX completions.
434 * When sending a burst, mlx5_tx_burst() posts several WRs.
437 * Pointer to TX queue structure.
439 static __rte_always_inline void
440 mlx5_tx_complete(struct mlx5_txq_data *txq)
442 const uint16_t elts_n = 1 << txq->elts_n;
443 const uint16_t elts_m = elts_n - 1;
444 const unsigned int cqe_n = 1 << txq->cqe_n;
445 const unsigned int cqe_cnt = cqe_n - 1;
446 uint16_t elts_free = txq->elts_tail;
448 uint16_t cq_ci = txq->cq_ci;
449 volatile struct mlx5_cqe *cqe = NULL;
450 volatile struct mlx5_wqe_ctrl *ctrl;
451 struct rte_mbuf *m, *free[elts_n];
452 struct rte_mempool *pool = NULL;
453 unsigned int blk_n = 0;
455 cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
456 if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
459 if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
460 (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
461 if (!check_cqe_seen(cqe)) {
462 ERROR("unexpected error CQE, TX stopped");
463 rte_hexdump(stderr, "MLX5 TXQ:",
464 (const void *)((uintptr_t)txq->wqes),
472 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
473 ctrl = (volatile struct mlx5_wqe_ctrl *)
474 tx_mlx5_wqe(txq, txq->wqe_pi);
475 elts_tail = ctrl->ctrl3;
476 assert((elts_tail & elts_m) < (1 << txq->wqe_n));
478 while (elts_free != elts_tail) {
479 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
480 if (likely(m != NULL)) {
481 if (likely(m->pool == pool)) {
484 if (likely(pool != NULL))
485 rte_mempool_put_bulk(pool,
495 rte_mempool_put_bulk(pool, (void *)free, blk_n);
497 elts_free = txq->elts_tail;
499 while (elts_free != elts_tail) {
500 memset(&(*txq->elts)[elts_free & elts_m],
502 sizeof((*txq->elts)[elts_free & elts_m]));
507 txq->elts_tail = elts_tail;
508 /* Update the consumer index. */
509 rte_compiler_barrier();
510 *txq->cq_db = rte_cpu_to_be_32(cq_ci);
514 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
515 * the cloned mbuf is allocated is returned instead.
521 * Memory pool where data is located for given mbuf.
523 static struct rte_mempool *
524 mlx5_tx_mb2mp(struct rte_mbuf *buf)
526 if (unlikely(RTE_MBUF_INDIRECT(buf)))
527 return rte_mbuf_from_indirect(buf)->pool;
532 * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
533 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
534 * remove an entry first.
537 * Pointer to TX queue structure.
539 * Memory Pool for which a Memory Region lkey must be returned.
542 * mr->lkey on success, (uint32_t)-1 on failure.
544 static __rte_always_inline uint32_t
545 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
547 uint16_t i = txq->mr_cache_idx;
548 uintptr_t addr = rte_pktmbuf_mtod_offset(mb, uintptr_t, DATA_LEN(mb));
551 assert(i < RTE_DIM(txq->mp2mr));
552 if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end >= addr))
553 return txq->mp2mr[i]->lkey;
554 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
555 if (unlikely(txq->mp2mr[i]->mr == NULL)) {
556 /* Unknown MP, add a new MR for it. */
559 if (txq->mp2mr[i]->start <= addr &&
560 txq->mp2mr[i]->end >= addr) {
561 assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
562 assert(rte_cpu_to_be_32(txq->mp2mr[i]->mr->lkey) ==
563 txq->mp2mr[i]->lkey);
564 txq->mr_cache_idx = i;
565 return txq->mp2mr[i]->lkey;
568 txq->mr_cache_idx = 0;
569 mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
571 * Request the reference to use in this queue, the original one is
572 * kept by the control plane.
575 rte_atomic32_inc(&mr->refcnt);
582 * Ring TX queue doorbell and flush the update if requested.
585 * Pointer to TX queue structure.
587 * Pointer to the last WQE posted in the NIC.
589 * Request for write memory barrier after BlueFlame update.
591 static __rte_always_inline void
592 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
595 uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
596 volatile uint64_t *src = ((volatile uint64_t *)wqe);
599 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
600 /* Ensure ordering between DB record and BF copy. */
608 * Ring TX queue doorbell and flush the update by write memory barrier.
611 * Pointer to TX queue structure.
613 * Pointer to the last WQE posted in the NIC.
615 static __rte_always_inline void
616 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
618 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
622 * Convert the Checksum offloads to Verbs.
625 * Pointer to the Tx queue.
627 * Pointer to the mbuf.
630 * the converted cs_flags.
632 static __rte_always_inline uint8_t
633 txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
635 uint8_t cs_flags = 0;
637 /* Should we enable HW CKSUM offload */
639 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
640 PKT_TX_OUTER_IP_CKSUM)) {
641 if (txq_data->tunnel_en &&
643 (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
644 cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
645 MLX5_ETH_WQE_L4_INNER_CSUM;
646 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
647 cs_flags |= MLX5_ETH_WQE_L3_CSUM;
649 cs_flags = MLX5_ETH_WQE_L3_CSUM |
650 MLX5_ETH_WQE_L4_CSUM;
657 * Count the number of contiguous single segment packets.
660 * Pointer to array of packets.
665 * Number of contiguous single segment packets.
667 static __rte_always_inline unsigned int
668 txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
674 /* Count the number of contiguous single segment packets. */
675 for (pos = 0; pos < pkts_n; ++pos)
676 if (NB_SEGS(pkts[pos]) > 1)
682 * Count the number of contiguous multi-segment packets.
685 * Pointer to array of packets.
690 * Number of contiguous multi-segment packets.
692 static __rte_always_inline unsigned int
693 txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
699 /* Count the number of contiguous multi-segment packets. */
700 for (pos = 0; pos < pkts_n; ++pos)
701 if (NB_SEGS(pkts[pos]) == 1)
706 #endif /* RTE_PMD_MLX5_RXTX_H_ */