4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_RXTX_H_
35 #define RTE_PMD_MLX5_RXTX_H_
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-Wpedantic"
45 #include <infiniband/verbs.h>
46 #include <infiniband/mlx5dv.h>
48 #pragma GCC diagnostic error "-Wpedantic"
52 #include <rte_mempool.h>
53 #include <rte_common.h>
54 #include <rte_hexdump.h>
56 #include "mlx5_utils.h"
58 #include "mlx5_autoconf.h"
59 #include "mlx5_defs.h"
62 struct mlx5_rxq_stats {
63 unsigned int idx; /**< Mapping index. */
64 #ifdef MLX5_PMD_SOFT_COUNTERS
65 uint64_t ipackets; /**< Total of successfully received packets. */
66 uint64_t ibytes; /**< Total of successfully received bytes. */
68 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
69 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
72 struct mlx5_txq_stats {
73 unsigned int idx; /**< Mapping index. */
74 #ifdef MLX5_PMD_SOFT_COUNTERS
75 uint64_t opackets; /**< Total of successfully sent packets. */
76 uint64_t obytes; /**< Total of successfully sent bytes. */
78 uint64_t oerrors; /**< Total number of failed transmitted packets. */
81 /* Flow director queue structure. */
83 struct ibv_qp *qp; /* Associated RX QP. */
84 struct ibv_rwq_ind_table *ind_table; /* Indirection table. */
85 struct ibv_wq *wq; /* Work queue. */
86 struct ibv_cq *cq; /* Completion queue. */
91 /* Compressed CQE context. */
93 uint16_t ai; /* Array index. */
94 uint16_t ca; /* Current array index. */
95 uint16_t na; /* Next array index. */
96 uint16_t cq_ci; /* The next CQE. */
97 uint32_t cqe_cnt; /* Number of CQEs. */
100 /* RX queue descriptor. */
102 unsigned int csum:1; /* Enable checksum offloading. */
103 unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
104 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
105 unsigned int crc_present:1; /* CRC must be subtracted. */
106 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
107 unsigned int cqe_n:4; /* Log 2 of CQ elements. */
108 unsigned int elts_n:4; /* Log 2 of Mbufs. */
109 unsigned int port_id:8;
110 unsigned int rss_hash:1; /* RSS hash result is enabled. */
111 unsigned int mark:1; /* Marked flow available on the queue. */
112 unsigned int pending_err:1; /* CQE error needs to be handled. */
113 unsigned int :7; /* Remaining bits. */
114 volatile uint32_t *rq_db;
115 volatile uint32_t *cq_db;
119 volatile struct mlx5_wqe_data_seg(*wqes)[];
120 volatile struct mlx5_cqe(*cqes)[];
121 struct rxq_zip zip; /* Compressed context. */
122 struct rte_mbuf *(*elts)[];
123 struct rte_mempool *mp;
124 struct mlx5_rxq_stats stats;
125 uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
126 struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
127 void *cq_uar; /* CQ user access region. */
128 uint32_t cqn; /* CQ number. */
129 uint8_t cq_arm_sn; /* CQ arm seq number. */
130 } __rte_cache_aligned;
132 /* RX queue control descriptor. */
134 struct priv *priv; /* Back pointer to private data. */
135 struct ibv_cq *cq; /* Completion Queue. */
136 struct ibv_wq *wq; /* Work Queue. */
137 struct fdir_queue *fdir_queue; /* Flow director queue. */
138 struct ibv_mr *mr; /* Memory Region (for mp). */
139 struct ibv_comp_channel *channel;
140 unsigned int socket; /* CPU socket ID for allocations. */
141 struct rxq rxq; /* Data path structure. */
144 /* Hash RX queue types. */
155 /* Flow structure with Ethernet specification. It is packed to prevent padding
156 * between attr and spec as this layout is expected by libibverbs. */
157 struct flow_attr_spec_eth {
158 struct ibv_flow_attr attr;
159 struct ibv_flow_spec_eth spec;
160 } __attribute__((packed));
162 /* Define a struct flow_attr_spec_eth object as an array of at least
163 * "size" bytes. Room after the first index is normally used to store
164 * extra flow specifications. */
165 #define FLOW_ATTR_SPEC_ETH(name, size) \
166 struct flow_attr_spec_eth name \
167 [((size) / sizeof(struct flow_attr_spec_eth)) + \
168 !!((size) % sizeof(struct flow_attr_spec_eth))]
170 /* Initialization data for hash RX queue. */
171 struct hash_rxq_init {
172 uint64_t hash_fields; /* Fields that participate in the hash. */
173 uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
174 unsigned int flow_priority; /* Flow priority to use. */
177 enum ibv_flow_spec_type type;
180 struct ibv_flow_spec_tcp_udp tcp_udp;
181 struct ibv_flow_spec_ipv4 ipv4;
182 struct ibv_flow_spec_ipv6 ipv6;
183 struct ibv_flow_spec_eth eth;
184 } flow_spec; /* Flow specification template. */
185 const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
188 /* Initialization data for indirection table. */
189 struct ind_table_init {
190 unsigned int max_size; /* Maximum number of WQs. */
191 /* Hash RX queues using this table. */
192 unsigned int hash_types;
193 unsigned int hash_types_n;
196 /* Initialization data for special flows. */
197 struct special_flow_init {
198 uint8_t dst_mac_val[6];
199 uint8_t dst_mac_mask[6];
200 unsigned int hash_types;
201 unsigned int per_vlan:1;
204 enum hash_rxq_flow_type {
205 HASH_RXQ_FLOW_TYPE_PROMISC,
206 HASH_RXQ_FLOW_TYPE_ALLMULTI,
207 HASH_RXQ_FLOW_TYPE_BROADCAST,
208 HASH_RXQ_FLOW_TYPE_IPV6MULTI,
209 HASH_RXQ_FLOW_TYPE_MAC,
213 static inline const char *
214 hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
217 case HASH_RXQ_FLOW_TYPE_PROMISC:
218 return "promiscuous";
219 case HASH_RXQ_FLOW_TYPE_ALLMULTI:
220 return "allmulticast";
221 case HASH_RXQ_FLOW_TYPE_BROADCAST:
223 case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
224 return "IPv6 multicast";
225 case HASH_RXQ_FLOW_TYPE_MAC:
233 struct priv *priv; /* Back pointer to private data. */
234 struct ibv_qp *qp; /* Hash RX QP. */
235 enum hash_rxq_type type; /* Hash RX queue type. */
236 /* MAC flow steering rules, one per VLAN ID. */
237 struct ibv_flow *mac_flow
238 [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
239 struct ibv_flow *special_flow
240 [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS];
243 /* TX queue descriptor. */
246 uint16_t elts_head; /* Current counter in (*elts)[]. */
247 uint16_t elts_tail; /* Counter of first element awaiting completion. */
248 uint16_t elts_comp; /* Counter since last completion request. */
249 uint16_t mpw_comp; /* WQ index since last completion request. */
250 uint16_t cq_ci; /* Consumer index for completion queue. */
251 uint16_t cq_pi; /* Producer index for completion queue. */
252 uint16_t wqe_ci; /* Consumer index for work queue. */
253 uint16_t wqe_pi; /* Producer index for work queue. */
254 uint16_t elts_n:4; /* (*elts)[] length (in log2). */
255 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
256 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
257 uint16_t inline_en:1; /* When set inline is enabled. */
258 uint16_t tso_en:1; /* When set hardware TSO is enabled. */
259 uint16_t tunnel_en:1;
260 /* When set TX offload for tunneled packets are supported. */
261 uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
262 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
263 uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
264 uint32_t qp_num_8s; /* QP number shifted by 8. */
265 uint32_t flags; /* Flags for Tx Queue. */
266 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
267 volatile void *wqes; /* Work queue (use volatile to write into). */
268 volatile uint32_t *qp_db; /* Work queue doorbell. */
269 volatile uint32_t *cq_db; /* Completion queue doorbell. */
270 volatile void *bf_reg; /* Blueflame register. */
272 uintptr_t start; /* Start address of MR */
273 uintptr_t end; /* End address of MR */
274 struct ibv_mr *mr; /* Memory Region (for mp). */
275 uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */
276 } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
277 uint16_t mr_cache_idx; /* Index of last hit entry. */
278 struct rte_mbuf *(*elts)[]; /* TX elements. */
279 struct mlx5_txq_stats stats; /* TX queue counters. */
280 } __rte_cache_aligned;
282 /* TX queue control descriptor. */
284 struct priv *priv; /* Back pointer to private data. */
285 struct ibv_cq *cq; /* Completion Queue. */
286 struct ibv_qp *qp; /* Queue Pair. */
287 unsigned int socket; /* CPU socket ID for allocations. */
288 struct txq txq; /* Data path structure. */
293 extern const struct hash_rxq_init hash_rxq_init[];
294 extern const unsigned int hash_rxq_init_n;
296 extern uint8_t rss_hash_default_key[];
297 extern const size_t rss_hash_default_key_len;
299 size_t priv_flow_attr(struct priv *, struct ibv_flow_attr *,
300 size_t, enum hash_rxq_type);
301 int priv_create_hash_rxqs(struct priv *);
302 void priv_destroy_hash_rxqs(struct priv *);
303 int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
304 int priv_rehash_flows(struct priv *);
305 void rxq_cleanup(struct rxq_ctrl *);
306 int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
307 const struct rte_eth_rxconf *, struct rte_mempool *);
308 void mlx5_rx_queue_release(void *);
309 int priv_rx_intr_vec_enable(struct priv *priv);
310 void priv_rx_intr_vec_disable(struct priv *priv);
311 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
312 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
316 void txq_cleanup(struct txq_ctrl *);
317 int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t,
318 unsigned int, const struct rte_eth_txconf *);
319 int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
320 const struct rte_eth_txconf *);
321 void mlx5_tx_queue_release(void *);
325 extern uint32_t mlx5_ptype_table[];
327 void mlx5_set_ptype_table(void);
328 uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
329 uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
330 uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
331 uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t);
332 uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
333 uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
334 uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
335 int mlx5_rx_descriptor_status(void *, uint16_t);
336 int mlx5_tx_descriptor_status(void *, uint16_t);
338 /* Vectorized version of mlx5_rxtx.c */
339 int priv_check_raw_vec_tx_support(struct priv *);
340 int priv_check_vec_tx_support(struct priv *);
341 int rxq_check_vec_support(struct rxq *);
342 int priv_check_vec_rx_support(struct priv *);
343 uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
344 uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
345 uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
349 struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
350 void txq_mp2mr_iter(struct rte_mempool *, void *);
351 uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
355 * Verify or set magic value in CQE.
364 check_cqe_seen(volatile struct mlx5_cqe *cqe)
366 static const uint8_t magic[] = "seen";
367 volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
371 for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
372 if (!ret || (*buf)[i] != magic[i]) {
374 (*buf)[i] = magic[i];
381 * Check whether CQE is valid.
386 * Size of completion queue.
391 * 0 on success, 1 on failure.
393 static __rte_always_inline int
394 check_cqe(volatile struct mlx5_cqe *cqe,
395 unsigned int cqes_n, const uint16_t ci)
397 uint16_t idx = ci & cqes_n;
398 uint8_t op_own = cqe->op_own;
399 uint8_t op_owner = MLX5_CQE_OWNER(op_own);
400 uint8_t op_code = MLX5_CQE_OPCODE(op_own);
402 if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
403 return 1; /* No CQE. */
405 if ((op_code == MLX5_CQE_RESP_ERR) ||
406 (op_code == MLX5_CQE_REQ_ERR)) {
407 volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
408 uint8_t syndrome = err_cqe->syndrome;
410 if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
411 (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
413 if (!check_cqe_seen(cqe)) {
414 ERROR("unexpected CQE error %u (0x%02x)"
416 op_code, op_code, syndrome);
417 rte_hexdump(stderr, "MLX5 Error CQE:",
418 (const void *)((uintptr_t)err_cqe),
422 } else if ((op_code != MLX5_CQE_RESP_SEND) &&
423 (op_code != MLX5_CQE_REQ)) {
424 if (!check_cqe_seen(cqe)) {
425 ERROR("unexpected CQE opcode %u (0x%02x)",
427 rte_hexdump(stderr, "MLX5 CQE:",
428 (const void *)((uintptr_t)cqe),
438 * Return the address of the WQE.
441 * Pointer to TX queue structure.
443 * WQE consumer index.
448 static inline uintptr_t *
449 tx_mlx5_wqe(struct txq *txq, uint16_t ci)
451 ci &= ((1 << txq->wqe_n) - 1);
452 return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
456 * Manage TX completions.
458 * When sending a burst, mlx5_tx_burst() posts several WRs.
461 * Pointer to TX queue structure.
463 static __rte_always_inline void
464 mlx5_tx_complete(struct txq *txq)
466 const uint16_t elts_n = 1 << txq->elts_n;
467 const uint16_t elts_m = elts_n - 1;
468 const unsigned int cqe_n = 1 << txq->cqe_n;
469 const unsigned int cqe_cnt = cqe_n - 1;
470 uint16_t elts_free = txq->elts_tail;
472 uint16_t cq_ci = txq->cq_ci;
473 volatile struct mlx5_cqe *cqe = NULL;
474 volatile struct mlx5_wqe_ctrl *ctrl;
475 struct rte_mbuf *m, *free[elts_n];
476 struct rte_mempool *pool = NULL;
477 unsigned int blk_n = 0;
479 cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
480 if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
483 if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
484 (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
485 if (!check_cqe_seen(cqe)) {
486 ERROR("unexpected error CQE, TX stopped");
487 rte_hexdump(stderr, "MLX5 TXQ:",
488 (const void *)((uintptr_t)txq->wqes),
496 txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
497 ctrl = (volatile struct mlx5_wqe_ctrl *)
498 tx_mlx5_wqe(txq, txq->wqe_pi);
499 elts_tail = ctrl->ctrl3;
500 assert((elts_tail & elts_m) < (1 << txq->wqe_n));
502 while (elts_free != elts_tail) {
503 m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
504 if (likely(m != NULL)) {
505 if (likely(m->pool == pool)) {
508 if (likely(pool != NULL))
509 rte_mempool_put_bulk(pool,
519 rte_mempool_put_bulk(pool, (void *)free, blk_n);
521 elts_free = txq->elts_tail;
523 while (elts_free != elts_tail) {
524 memset(&(*txq->elts)[elts_free & elts_m],
526 sizeof((*txq->elts)[elts_free & elts_m]));
531 txq->elts_tail = elts_tail;
532 /* Update the consumer index. */
534 *txq->cq_db = rte_cpu_to_be_32(cq_ci);
538 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
539 * the cloned mbuf is allocated is returned instead.
545 * Memory pool where data is located for given mbuf.
547 static struct rte_mempool *
548 mlx5_tx_mb2mp(struct rte_mbuf *buf)
550 if (unlikely(RTE_MBUF_INDIRECT(buf)))
551 return rte_mbuf_from_indirect(buf)->pool;
556 * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
557 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
558 * remove an entry first.
561 * Pointer to TX queue structure.
563 * Memory Pool for which a Memory Region lkey must be returned.
566 * mr->lkey on success, (uint32_t)-1 on failure.
568 static __rte_always_inline uint32_t
569 mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
571 uint16_t i = txq->mr_cache_idx;
572 uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
574 assert(i < RTE_DIM(txq->mp2mr));
575 if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr))
576 return txq->mp2mr[i].lkey;
577 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
578 if (unlikely(txq->mp2mr[i].mr == NULL)) {
579 /* Unknown MP, add a new MR for it. */
582 if (txq->mp2mr[i].start <= addr &&
583 txq->mp2mr[i].end >= addr) {
584 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
585 assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) ==
587 txq->mr_cache_idx = i;
588 return txq->mp2mr[i].lkey;
591 txq->mr_cache_idx = 0;
592 return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
596 * Ring TX queue doorbell.
599 * Pointer to TX queue structure.
601 * Pointer to the last WQE posted in the NIC.
603 static __rte_always_inline void
604 mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
606 uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
607 volatile uint64_t *src = ((volatile uint64_t *)wqe);
610 *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
611 /* Ensure ordering between DB record and BF copy. */
616 #endif /* RTE_PMD_MLX5_RXTX_H_ */