1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_RXTX_H_
7 #define RTE_PMD_MLX5_RXTX_H_
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 #include <rte_hexdump.h>
28 #include <rte_atomic.h>
30 #include "mlx5_utils.h"
32 #include "mlx5_autoconf.h"
33 #include "mlx5_defs.h"
36 struct mlx5_rxq_stats {
37 unsigned int idx; /**< Mapping index. */
38 #ifdef MLX5_PMD_SOFT_COUNTERS
39 uint64_t ipackets; /**< Total of successfully received packets. */
40 uint64_t ibytes; /**< Total of successfully received bytes. */
42 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
43 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
46 struct mlx5_txq_stats {
47 unsigned int idx; /**< Mapping index. */
48 #ifdef MLX5_PMD_SOFT_COUNTERS
49 uint64_t opackets; /**< Total of successfully sent packets. */
50 uint64_t obytes; /**< Total of successfully sent bytes. */
52 uint64_t oerrors; /**< Total number of failed transmitted packets. */
57 /* Compressed CQE context. */
59 uint16_t ai; /* Array index. */
60 uint16_t ca; /* Current array index. */
61 uint16_t na; /* Next array index. */
62 uint16_t cq_ci; /* The next CQE. */
63 uint32_t cqe_cnt; /* Number of CQEs. */
66 /* RX queue descriptor. */
67 struct mlx5_rxq_data {
68 unsigned int csum:1; /* Enable checksum offloading. */
69 unsigned int hw_timestamp:1; /* Enable HW timestamp. */
70 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
71 unsigned int crc_present:1; /* CRC must be subtracted. */
72 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
73 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
74 unsigned int elts_n:4; /* Log 2 of Mbufs. */
75 unsigned int rss_hash:1; /* RSS hash result is enabled. */
76 unsigned int mark:1; /* Marked flow available on the queue. */
77 unsigned int :15; /* Remaining bits. */
78 volatile uint32_t *rq_db;
79 volatile uint32_t *cq_db;
84 volatile struct mlx5_wqe_data_seg(*wqes)[];
85 volatile struct mlx5_cqe(*cqes)[];
86 struct rxq_zip zip; /* Compressed context. */
87 struct rte_mbuf *(*elts)[];
88 struct rte_mempool *mp;
89 struct mlx5_rxq_stats stats;
90 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
91 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
92 void *cq_uar; /* CQ user access region. */
93 uint32_t cqn; /* CQ number. */
94 uint8_t cq_arm_sn; /* CQ arm seq number. */
95 uint32_t tunnel; /* Tunnel information. */
96 } __rte_cache_aligned;
98 /* Verbs Rx queue elements. */
100 LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
101 rte_atomic32_t refcnt; /* Reference counter. */
102 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
103 struct ibv_cq *cq; /* Completion Queue. */
104 struct ibv_wq *wq; /* Work Queue. */
105 struct ibv_comp_channel *channel;
108 /* RX queue control descriptor. */
109 struct mlx5_rxq_ctrl {
110 LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
111 rte_atomic32_t refcnt; /* Reference counter. */
112 struct priv *priv; /* Back pointer to private data. */
113 struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
114 struct mlx5_rxq_data rxq; /* Data path structure. */
115 unsigned int socket; /* CPU socket ID for allocations. */
116 uint32_t tunnel_types[16]; /* Tunnel type counter. */
117 unsigned int irq:1; /* Whether IRQ is enabled. */
118 uint16_t idx; /* Queue index. */
121 /* Indirection table. */
122 struct mlx5_ind_table_ibv {
123 LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
124 rte_atomic32_t refcnt; /* Reference counter. */
125 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
126 uint32_t queues_n; /**< Number of queues in the list. */
127 uint16_t queues[]; /**< Queue list. */
132 LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
133 rte_atomic32_t refcnt; /* Reference counter. */
134 struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
135 struct ibv_qp *qp; /* Verbs queue pair. */
136 uint64_t hash_fields; /* Verbs Hash fields. */
137 uint32_t tunnel; /* Tunnel type. */
138 uint32_t rss_level; /* RSS on tunnel level. */
139 uint32_t rss_key_len; /* Hash key length in bytes. */
140 uint8_t rss_key[]; /* Hash key. */
143 /* TX queue descriptor. */
145 struct mlx5_txq_data {
146 uint16_t elts_head; /* Current counter in (*elts)[]. */
147 uint16_t elts_tail; /* Counter of first element awaiting completion. */
148 uint16_t elts_comp; /* Counter since last completion request. */
149 uint16_t mpw_comp; /* WQ index since last completion request. */
150 uint16_t cq_ci; /* Consumer index for completion queue. */
152 uint16_t cq_pi; /* Producer index for completion queue. */
154 uint16_t wqe_ci; /* Consumer index for work queue. */
155 uint16_t wqe_pi; /* Producer index for work queue. */
156 uint16_t elts_n:4; /* (*elts)[] length (in log2). */
157 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
158 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
159 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
160 uint16_t tunnel_en:1;
161 /* When set TX offload for tunneled packets are supported. */
162 uint16_t swp_en:1; /* Whether SW parser is enabled. */
163 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
164 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
165 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
166 uint32_t qp_num_8s; /* QP number shifted by 8. */
167 uint64_t offloads; /* Offloads for Tx Queue. */
168 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
169 volatile void *wqes; /* Work queue (use volatile to write into). */
170 volatile uint32_t *qp_db; /* Work queue doorbell. */
171 volatile uint32_t *cq_db; /* Completion queue doorbell. */
172 volatile void *bf_reg; /* Blueflame register remapped. */
173 struct rte_mbuf *(*elts)[]; /* TX elements. */
174 struct mlx5_txq_stats stats; /* TX queue counters. */
175 } __rte_cache_aligned;
177 /* Verbs Rx queue elements. */
178 struct mlx5_txq_ibv {
179 LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
180 rte_atomic32_t refcnt; /* Reference counter. */
181 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
182 struct ibv_cq *cq; /* Completion Queue. */
183 struct ibv_qp *qp; /* Queue Pair. */
186 /* TX queue control descriptor. */
187 struct mlx5_txq_ctrl {
188 LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
189 rte_atomic32_t refcnt; /* Reference counter. */
190 struct priv *priv; /* Back pointer to private data. */
191 unsigned int socket; /* CPU socket ID for allocations. */
192 unsigned int max_inline_data; /* Max inline data. */
193 unsigned int max_tso_header; /* Max TSO header size. */
194 struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
195 struct mlx5_txq_data txq; /* Data path structure. */
196 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
197 volatile void *bf_reg_orig; /* Blueflame register from verbs. */
198 uint16_t idx; /* Queue index. */
203 extern uint8_t rss_hash_default_key[];
204 extern const size_t rss_hash_default_key_len;
206 void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
207 int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
208 unsigned int socket, const struct rte_eth_rxconf *conf,
209 struct rte_mempool *mp);
210 void mlx5_rx_queue_release(void *dpdk_rxq);
211 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
212 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
213 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
214 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
215 struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
216 struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
217 int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
218 int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
219 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
220 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
221 uint16_t desc, unsigned int socket,
222 const struct rte_eth_rxconf *conf,
223 struct rte_mempool *mp);
224 struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
225 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
226 int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
227 int mlx5_rxq_verify(struct rte_eth_dev *dev);
228 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
229 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
230 const uint16_t *queues,
232 struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
233 const uint16_t *queues,
235 int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
236 struct mlx5_ind_table_ibv *ind_tbl);
237 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
238 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
239 const uint8_t *rss_key, uint32_t rss_key_len,
240 uint64_t hash_fields,
241 const uint16_t *queues, uint32_t queues_n,
242 uint32_t tunnel, uint32_t rss_level);
243 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
244 const uint8_t *rss_key, uint32_t rss_key_len,
245 uint64_t hash_fields,
246 const uint16_t *queues, uint32_t queues_n,
247 uint32_t tunnel, uint32_t rss_level);
248 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
249 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
250 uint64_t mlx5_get_rx_port_offloads(void);
251 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
255 int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
256 unsigned int socket, const struct rte_eth_txconf *conf);
257 void mlx5_tx_queue_release(void *dpdk_txq);
258 int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
259 struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
260 struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
261 int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
262 int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
263 int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
264 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
265 uint16_t desc, unsigned int socket,
266 const struct rte_eth_txconf *conf);
267 struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
268 int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
269 int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
270 int mlx5_txq_verify(struct rte_eth_dev *dev);
271 void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
272 uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
276 extern uint32_t mlx5_ptype_table[];
277 extern uint8_t mlx5_cksum_table[];
278 extern uint8_t mlx5_swp_types_table[];
280 void mlx5_set_ptype_table(void);
281 void mlx5_set_cksum_table(void);
282 void mlx5_set_swp_types_table(void);
283 uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
285 uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
287 uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
289 uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
291 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
292 uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
294 uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
296 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
297 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
299 /* Vectorized version of mlx5_rxtx.c */
300 int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
301 int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
302 int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
303 int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
304 uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
306 uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
308 uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
313 * Verify or set magic value in CQE.
322 check_cqe_seen(volatile struct mlx5_cqe *cqe)
324 static const uint8_t magic[] = "seen";
325 volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
329 for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
330 if (!ret || (*buf)[i] != magic[i]) {
332 (*buf)[i] = magic[i];
339 * Check whether CQE is valid.
344 * Size of completion queue.
349 * 0 on success, 1 on failure.
351 static __rte_always_inline int
352 check_cqe(volatile struct mlx5_cqe *cqe,
353 unsigned int cqes_n, const uint16_t ci)
355 uint16_t idx = ci & cqes_n;
356 uint8_t op_own = cqe->op_own;
357 uint8_t op_owner = MLX5_CQE_OWNER(op_own);
358 uint8_t op_code = MLX5_CQE_OPCODE(op_own);
360 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
361 return 1; /* No CQE. */
363 if ((op_code == MLX5_CQE_RESP_ERR) ||
364 (op_code == MLX5_CQE_REQ_ERR)) {
365 volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
366 uint8_t syndrome = err_cqe->syndrome;
368 if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
369 (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
371 if (!check_cqe_seen(cqe)) {
373 "unexpected CQE error %u (0x%02x) syndrome"
375 op_code, op_code, syndrome);
376 rte_hexdump(stderr, "MLX5 Error CQE:",
377 (const void *)((uintptr_t)err_cqe),
381 } else if ((op_code != MLX5_CQE_RESP_SEND) &&
382 (op_code != MLX5_CQE_REQ)) {
383 if (!check_cqe_seen(cqe)) {
384 DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
386 rte_hexdump(stderr, "MLX5 CQE:",
387 (const void *)((uintptr_t)cqe),
397 * Return the address of the WQE.
400 * Pointer to TX queue structure.
402 * WQE consumer index.
407 static inline uintptr_t *
408 tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
410 ci &= ((1 << txq->wqe_n) - 1);
411 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
415 * Manage TX completions.
417 * When sending a burst, mlx5_tx_burst() posts several WRs.
420 * Pointer to TX queue structure.
422 static __rte_always_inline void
423 mlx5_tx_complete(struct mlx5_txq_data *txq)
425 const uint16_t elts_n = 1 << txq->elts_n;
426 const uint16_t elts_m = elts_n - 1;
427 const unsigned int cqe_n = 1 << txq->cqe_n;
428 const unsigned int cqe_cnt = cqe_n - 1;
429 uint16_t elts_free = txq->elts_tail;
431 uint16_t cq_ci = txq->cq_ci;
432 volatile struct mlx5_cqe *cqe = NULL;
433 volatile struct mlx5_wqe_ctrl *ctrl;
434 struct rte_mbuf *m, *free[elts_n];
435 struct rte_mempool *pool = NULL;
436 unsigned int blk_n = 0;
438 cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
439 if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
442 if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
443 (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
444 if (!check_cqe_seen(cqe)) {
445 DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
446 rte_hexdump(stderr, "MLX5 TXQ:",
447 (const void *)((uintptr_t)txq->wqes),
455 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
456 ctrl = (volatile struct mlx5_wqe_ctrl *)
457 tx_mlx5_wqe(txq, txq->wqe_pi);
458 elts_tail = ctrl->ctrl3;
459 assert((elts_tail & elts_m) < (1 << txq->wqe_n));
461 while (elts_free != elts_tail) {
462 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
463 if (likely(m != NULL)) {
464 if (likely(m->pool == pool)) {
467 if (likely(pool != NULL))
468 rte_mempool_put_bulk(pool,
478 rte_mempool_put_bulk(pool, (void *)free, blk_n);
480 elts_free = txq->elts_tail;
482 while (elts_free != elts_tail) {
483 memset(&(*txq->elts)[elts_free & elts_m],
485 sizeof((*txq->elts)[elts_free & elts_m]));
490 txq->elts_tail = elts_tail;
491 /* Update the consumer index. */
492 rte_compiler_barrier();
493 *txq->cq_db = rte_cpu_to_be_32(cq_ci);
496 static __rte_always_inline uint32_t
497 mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
505 * Ring TX queue doorbell and flush the update if requested.
508 * Pointer to TX queue structure.
510 * Pointer to the last WQE posted in the NIC.
512 * Request for write memory barrier after BlueFlame update.
514 static __rte_always_inline void
515 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
518 uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
519 volatile uint64_t *src = ((volatile uint64_t *)wqe);
522 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
523 /* Ensure ordering between DB record and BF copy. */
531 * Ring TX queue doorbell and flush the update by write memory barrier.
534 * Pointer to TX queue structure.
536 * Pointer to the last WQE posted in the NIC.
538 static __rte_always_inline void
539 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
541 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
545 * Convert mbuf to Verb SWP.
548 * Pointer to the Tx queue.
550 * Pointer to the mbuf.
552 * TSO offloads enabled.
554 * VLAN offloads enabled
556 * Pointer to the SWP header offsets.
558 * Pointer to the SWP header types.
560 static __rte_always_inline void
561 txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
562 uint8_t tso, uint64_t vlan,
563 uint8_t *offsets, uint8_t *swp_types)
565 uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
568 const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
571 if (likely(!tunnel || !txq->swp_en ||
572 (tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)))
575 * The index should have:
576 * bit[0:1] = PKT_TX_L4_MASK
577 * bit[4] = PKT_TX_IPV6
578 * bit[8] = PKT_TX_OUTER_IPV6
579 * bit[9] = PKT_TX_OUTER_UDP
581 idx = (buf->ol_flags & ol_flags_mask) >> 52;
582 if (tunnel == PKT_TX_TUNNEL_UDP)
584 *swp_types = mlx5_swp_types_table[idx];
586 off = buf->outer_l2_len + (vlan ? 4 : 0); /* Outer L3 offset. */
587 if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM))
588 offsets[1] = off >> 1;
589 off += buf->outer_l3_len; /* Outer L4 offset. */
590 if (tunnel == PKT_TX_TUNNEL_UDP)
591 offsets[0] = off >> 1;
592 off += buf->l2_len; /* Inner L3 offset. */
593 if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM))
594 offsets[3] = off >> 1;
595 off += buf->l3_len; /* Inner L4 offset. */
596 if (tso || ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
597 ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM))
598 offsets[2] = off >> 1;
602 * Convert the Checksum offloads to Verbs.
605 * Pointer to the mbuf.
608 * Converted checksum flags.
610 static __rte_always_inline uint8_t
611 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
614 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
615 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
616 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
619 * The index should have:
620 * bit[0] = PKT_TX_TCP_SEG
621 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
622 * bit[4] = PKT_TX_IP_CKSUM
623 * bit[8] = PKT_TX_OUTER_IP_CKSUM
626 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
627 return mlx5_cksum_table[idx];
631 * Count the number of contiguous single segment packets.
634 * Pointer to array of packets.
639 * Number of contiguous single segment packets.
641 static __rte_always_inline unsigned int
642 txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
648 /* Count the number of contiguous single segment packets. */
649 for (pos = 0; pos < pkts_n; ++pos)
650 if (NB_SEGS(pkts[pos]) > 1)
656 * Count the number of contiguous multi-segment packets.
659 * Pointer to array of packets.
664 * Number of contiguous multi-segment packets.
666 static __rte_always_inline unsigned int
667 txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
673 /* Count the number of contiguous multi-segment packets. */
674 for (pos = 0; pos < pkts_n; ++pos)
675 if (NB_SEGS(pkts[pos]) == 1)
680 #endif /* RTE_PMD_MLX5_RXTX_H_ */