1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56 (unsigned int)sizeof(rss_hash_default_key),
57 "wrong RSS default key size.");
60 * Check whether Multi-Packet RQ can be enabled for the device.
63 * Pointer to Ethernet device.
66 * 1 if supported, negative errno value if not.
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
71 struct mlx5_priv *priv = dev->data->dev_private;
73 if (priv->config.mprq.enabled &&
74 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
80 * Check whether Multi-Packet RQ is enabled for the Rx queue.
83 * Pointer to receive queue structure.
86 * 0 if disabled, otherwise enabled.
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
91 return rxq->strd_num_n > 0;
95 * Check whether Multi-Packet RQ is enabled for the device.
98 * Pointer to Ethernet device.
101 * 0 if disabled, otherwise enabled.
104 mlx5_mprq_enabled(struct rte_eth_dev *dev)
106 struct mlx5_priv *priv = dev->data->dev_private;
110 if (mlx5_check_mprq_support(dev) < 0)
112 /* All the configured queues should be enabled. */
113 for (i = 0; i < priv->rxqs_n; ++i) {
114 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
118 if (mlx5_rxq_mprq_enabled(rxq))
121 /* Multi-Packet RQ can't be partially configured. */
122 assert(n == 0 || n == priv->rxqs_n);
123 return n == priv->rxqs_n;
127 * Check whether LRO is supported and enabled for the device.
130 * Pointer to Ethernet device.
133 * 0 if disabled, 1 if enabled.
136 mlx5_lro_on(struct rte_eth_dev *dev)
138 return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
142 * Allocate RX queue elements for Multi-Packet RQ.
145 * Pointer to RX queue structure.
148 * 0 on success, a negative errno value otherwise and rte_errno is set.
151 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
153 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
154 unsigned int wqe_n = 1 << rxq->elts_n;
158 /* Iterate on segments. */
159 for (i = 0; i <= wqe_n; ++i) {
160 struct mlx5_mprq_buf *buf;
162 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
163 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
168 (*rxq->mprq_bufs)[i] = buf;
170 rxq->mprq_repl = buf;
173 "port %u Rx queue %u allocated and configured %u segments",
174 rxq->port_id, rxq->idx, wqe_n);
177 err = rte_errno; /* Save rte_errno before cleanup. */
179 for (i = 0; (i != wqe_n); ++i) {
180 if ((*rxq->mprq_bufs)[i] != NULL)
181 rte_mempool_put(rxq->mprq_mp,
182 (*rxq->mprq_bufs)[i]);
183 (*rxq->mprq_bufs)[i] = NULL;
185 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
186 rxq->port_id, rxq->idx);
187 rte_errno = err; /* Restore rte_errno. */
192 * Allocate RX queue elements for Single-Packet RQ.
195 * Pointer to RX queue structure.
198 * 0 on success, errno value on failure.
201 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
203 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
204 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
208 /* Iterate on segments. */
209 for (i = 0; (i != elts_n); ++i) {
210 struct rte_mbuf *buf;
212 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
214 DRV_LOG(ERR, "port %u empty mbuf pool",
215 PORT_ID(rxq_ctrl->priv));
219 /* Headroom is reserved by rte_pktmbuf_alloc(). */
220 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
221 /* Buffer is supposed to be empty. */
222 assert(rte_pktmbuf_data_len(buf) == 0);
223 assert(rte_pktmbuf_pkt_len(buf) == 0);
225 /* Only the first segment keeps headroom. */
227 SET_DATA_OFF(buf, 0);
228 PORT(buf) = rxq_ctrl->rxq.port_id;
229 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
230 PKT_LEN(buf) = DATA_LEN(buf);
232 (*rxq_ctrl->rxq.elts)[i] = buf;
234 /* If Rx vector is activated. */
235 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
236 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
237 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
240 /* Initialize default rearm_data for vPMD. */
241 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
242 rte_mbuf_refcnt_set(mbuf_init, 1);
243 mbuf_init->nb_segs = 1;
244 mbuf_init->port = rxq->port_id;
246 * prevent compiler reordering:
247 * rearm_data covers previous fields.
249 rte_compiler_barrier();
250 rxq->mbuf_initializer =
251 *(uint64_t *)&mbuf_init->rearm_data;
252 /* Padding with a fake mbuf for vectorized Rx. */
253 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
254 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
257 "port %u Rx queue %u allocated and configured %u segments"
259 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
260 elts_n / (1 << rxq_ctrl->rxq.sges_n));
263 err = rte_errno; /* Save rte_errno before cleanup. */
265 for (i = 0; (i != elts_n); ++i) {
266 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
267 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
268 (*rxq_ctrl->rxq.elts)[i] = NULL;
270 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
271 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
272 rte_errno = err; /* Restore rte_errno. */
277 * Allocate RX queue elements.
280 * Pointer to RX queue structure.
283 * 0 on success, errno value on failure.
286 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
288 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
289 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
293 * Free RX queue elements for Multi-Packet RQ.
296 * Pointer to RX queue structure.
299 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
301 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
304 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
305 rxq->port_id, rxq->idx);
306 if (rxq->mprq_bufs == NULL)
308 assert(mlx5_rxq_check_vec_support(rxq) < 0);
309 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
310 if ((*rxq->mprq_bufs)[i] != NULL)
311 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
312 (*rxq->mprq_bufs)[i] = NULL;
314 if (rxq->mprq_repl != NULL) {
315 mlx5_mprq_buf_free(rxq->mprq_repl);
316 rxq->mprq_repl = NULL;
321 * Free RX queue elements for Single-Packet RQ.
324 * Pointer to RX queue structure.
327 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
329 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
330 const uint16_t q_n = (1 << rxq->elts_n);
331 const uint16_t q_mask = q_n - 1;
332 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
335 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
336 PORT_ID(rxq_ctrl->priv), rxq->idx);
337 if (rxq->elts == NULL)
340 * Some mbuf in the Ring belongs to the application. They cannot be
343 if (mlx5_rxq_check_vec_support(rxq) > 0) {
344 for (i = 0; i < used; ++i)
345 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
346 rxq->rq_pi = rxq->rq_ci;
348 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
349 if ((*rxq->elts)[i] != NULL)
350 rte_pktmbuf_free_seg((*rxq->elts)[i]);
351 (*rxq->elts)[i] = NULL;
356 * Free RX queue elements.
359 * Pointer to RX queue structure.
362 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
364 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
365 rxq_free_elts_mprq(rxq_ctrl);
367 rxq_free_elts_sprq(rxq_ctrl);
371 * Returns the per-queue supported offloads.
374 * Pointer to Ethernet device.
377 * Supported Rx offloads.
380 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
382 struct mlx5_priv *priv = dev->data->dev_private;
383 struct mlx5_dev_config *config = &priv->config;
384 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
385 DEV_RX_OFFLOAD_TIMESTAMP |
386 DEV_RX_OFFLOAD_JUMBO_FRAME);
388 if (config->hw_fcs_strip)
389 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
392 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
393 DEV_RX_OFFLOAD_UDP_CKSUM |
394 DEV_RX_OFFLOAD_TCP_CKSUM);
395 if (config->hw_vlan_strip)
396 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
402 * Returns the per-port supported offloads.
405 * Pointer to Ethernet device.
408 * Supported Rx offloads.
411 mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
413 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
415 if (MLX5_LRO_SUPPORTED(dev))
416 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
421 * Verify if the queue can be released.
424 * Pointer to Ethernet device.
429 * 1 if the queue can be released
430 * 0 if the queue can not be released, there are references to it.
431 * Negative errno and rte_errno is set if queue doesn't exist.
434 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
436 struct mlx5_priv *priv = dev->data->dev_private;
437 struct mlx5_rxq_ctrl *rxq_ctrl;
439 if (!(*priv->rxqs)[idx]) {
443 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
444 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
450 * Pointer to Ethernet device structure.
454 * Number of descriptors to configure in queue.
456 * NUMA socket on which memory must be allocated.
458 * Thresholds parameters.
460 * Memory pool for buffer allocations.
463 * 0 on success, a negative errno value otherwise and rte_errno is set.
466 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
467 unsigned int socket, const struct rte_eth_rxconf *conf,
468 struct rte_mempool *mp)
470 struct mlx5_priv *priv = dev->data->dev_private;
471 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
472 struct mlx5_rxq_ctrl *rxq_ctrl =
473 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
475 if (!rte_is_power_of_2(desc)) {
476 desc = 1 << log2above(desc);
478 "port %u increased number of descriptors in Rx queue %u"
479 " to the next power of two (%d)",
480 dev->data->port_id, idx, desc);
482 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
483 dev->data->port_id, idx, desc);
484 if (idx >= priv->rxqs_n) {
485 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
486 dev->data->port_id, idx, priv->rxqs_n);
487 rte_errno = EOVERFLOW;
490 if (!mlx5_rxq_releasable(dev, idx)) {
491 DRV_LOG(ERR, "port %u unable to release queue index %u",
492 dev->data->port_id, idx);
496 mlx5_rxq_release(dev, idx);
497 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
499 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
500 dev->data->port_id, idx);
504 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
505 dev->data->port_id, idx);
506 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
511 * DPDK callback to release a RX queue.
514 * Generic RX queue pointer.
517 mlx5_rx_queue_release(void *dpdk_rxq)
519 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
520 struct mlx5_rxq_ctrl *rxq_ctrl;
521 struct mlx5_priv *priv;
525 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
526 priv = rxq_ctrl->priv;
527 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
528 rte_panic("port %u Rx queue %u is still used by a flow and"
529 " cannot be removed\n",
530 PORT_ID(priv), rxq->idx);
531 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
535 * Get an Rx queue Verbs object.
538 * Pointer to Ethernet device.
540 * Queue index in DPDK Rx queue array
543 * The Verbs object if it exists.
545 static struct mlx5_rxq_ibv *
546 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
548 struct mlx5_priv *priv = dev->data->dev_private;
549 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
550 struct mlx5_rxq_ctrl *rxq_ctrl;
552 if (idx >= priv->rxqs_n)
556 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
558 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
559 return rxq_ctrl->ibv;
563 * Release an Rx verbs queue object.
566 * Verbs Rx queue object.
569 * 1 while a reference on it exists, 0 when freed.
572 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
577 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
578 rxq_free_elts(rxq_ibv->rxq_ctrl);
579 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
580 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
581 if (rxq_ibv->channel)
582 claim_zero(mlx5_glue->destroy_comp_channel
584 LIST_REMOVE(rxq_ibv, next);
592 * Allocate queue vector and fill epoll fd list for Rx interrupts.
595 * Pointer to Ethernet device.
598 * 0 on success, a negative errno value otherwise and rte_errno is set.
601 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
603 struct mlx5_priv *priv = dev->data->dev_private;
605 unsigned int rxqs_n = priv->rxqs_n;
606 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
607 unsigned int count = 0;
608 struct rte_intr_handle *intr_handle = dev->intr_handle;
610 if (!dev->data->dev_conf.intr_conf.rxq)
612 mlx5_rx_intr_vec_disable(dev);
613 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
614 if (intr_handle->intr_vec == NULL) {
616 "port %u failed to allocate memory for interrupt"
617 " vector, Rx interrupts will not be supported",
622 intr_handle->type = RTE_INTR_HANDLE_EXT;
623 for (i = 0; i != n; ++i) {
624 /* This rxq ibv must not be released in this function. */
625 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
630 /* Skip queues that cannot request interrupts. */
631 if (!rxq_ibv || !rxq_ibv->channel) {
632 /* Use invalid intr_vec[] index to disable entry. */
633 intr_handle->intr_vec[i] =
634 RTE_INTR_VEC_RXTX_OFFSET +
635 RTE_MAX_RXTX_INTR_VEC_ID;
638 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
640 "port %u too many Rx queues for interrupt"
641 " vector size (%d), Rx interrupts cannot be"
643 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
644 mlx5_rx_intr_vec_disable(dev);
648 fd = rxq_ibv->channel->fd;
649 flags = fcntl(fd, F_GETFL);
650 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
654 "port %u failed to make Rx interrupt file"
655 " descriptor %d non-blocking for queue index"
657 dev->data->port_id, fd, i);
658 mlx5_rx_intr_vec_disable(dev);
661 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
662 intr_handle->efds[count] = fd;
666 mlx5_rx_intr_vec_disable(dev);
668 intr_handle->nb_efd = count;
673 * Clean up Rx interrupts handler.
676 * Pointer to Ethernet device.
679 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
681 struct mlx5_priv *priv = dev->data->dev_private;
682 struct rte_intr_handle *intr_handle = dev->intr_handle;
684 unsigned int rxqs_n = priv->rxqs_n;
685 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
687 if (!dev->data->dev_conf.intr_conf.rxq)
689 if (!intr_handle->intr_vec)
691 for (i = 0; i != n; ++i) {
692 struct mlx5_rxq_ctrl *rxq_ctrl;
693 struct mlx5_rxq_data *rxq_data;
695 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
696 RTE_MAX_RXTX_INTR_VEC_ID)
699 * Need to access directly the queue to release the reference
700 * kept in mlx5_rx_intr_vec_enable().
702 rxq_data = (*priv->rxqs)[i];
703 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
705 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
708 rte_intr_free_epoll_fd(intr_handle);
709 if (intr_handle->intr_vec)
710 free(intr_handle->intr_vec);
711 intr_handle->nb_efd = 0;
712 intr_handle->intr_vec = NULL;
716 * MLX5 CQ notification .
719 * Pointer to receive queue structure.
721 * Sequence number per receive queue .
724 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
727 uint32_t doorbell_hi;
729 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
731 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
732 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
733 doorbell = (uint64_t)doorbell_hi << 32;
734 doorbell |= rxq->cqn;
735 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
736 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
737 cq_db_reg, rxq->uar_lock_cq);
741 * DPDK callback for Rx queue interrupt enable.
744 * Pointer to Ethernet device structure.
749 * 0 on success, a negative errno value otherwise and rte_errno is set.
752 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
754 struct mlx5_priv *priv = dev->data->dev_private;
755 struct mlx5_rxq_data *rxq_data;
756 struct mlx5_rxq_ctrl *rxq_ctrl;
758 rxq_data = (*priv->rxqs)[rx_queue_id];
763 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
765 struct mlx5_rxq_ibv *rxq_ibv;
767 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
772 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
773 mlx5_rxq_ibv_release(rxq_ibv);
779 * DPDK callback for Rx queue interrupt disable.
782 * Pointer to Ethernet device structure.
787 * 0 on success, a negative errno value otherwise and rte_errno is set.
790 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
792 struct mlx5_priv *priv = dev->data->dev_private;
793 struct mlx5_rxq_data *rxq_data;
794 struct mlx5_rxq_ctrl *rxq_ctrl;
795 struct mlx5_rxq_ibv *rxq_ibv = NULL;
796 struct ibv_cq *ev_cq;
800 rxq_data = (*priv->rxqs)[rx_queue_id];
805 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
808 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
813 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
814 if (ret || ev_cq != rxq_ibv->cq) {
818 rxq_data->cq_arm_sn++;
819 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
820 mlx5_rxq_ibv_release(rxq_ibv);
823 ret = rte_errno; /* Save rte_errno before cleanup. */
825 mlx5_rxq_ibv_release(rxq_ibv);
826 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
827 dev->data->port_id, rx_queue_id);
828 rte_errno = ret; /* Restore rte_errno. */
833 * Create the Rx queue Verbs object.
836 * Pointer to Ethernet device.
838 * Queue index in DPDK Rx queue array
841 * The Verbs object initialised, NULL otherwise and rte_errno is set.
843 struct mlx5_rxq_ibv *
844 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
846 struct mlx5_priv *priv = dev->data->dev_private;
847 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
848 struct mlx5_rxq_ctrl *rxq_ctrl =
849 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
850 struct ibv_wq_attr mod;
853 struct ibv_cq_init_attr_ex ibv;
854 struct mlx5dv_cq_init_attr mlx5;
857 struct ibv_wq_init_attr ibv;
858 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
859 struct mlx5dv_wq_init_attr mlx5;
864 unsigned int wqe_n = 1 << rxq_data->elts_n;
865 struct mlx5_rxq_ibv *tmpl = NULL;
866 struct mlx5dv_cq cq_info;
867 struct mlx5dv_rwq rwq;
869 struct mlx5dv_obj obj;
870 struct mlx5_dev_config *config = &priv->config;
871 const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
874 assert(!rxq_ctrl->ibv);
875 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
876 priv->verbs_alloc_ctx.obj = rxq_ctrl;
877 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
881 "port %u Rx queue %u cannot allocate verbs resources",
882 dev->data->port_id, rxq_data->idx);
886 tmpl->rxq_ctrl = rxq_ctrl;
888 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
889 if (!tmpl->channel) {
890 DRV_LOG(ERR, "port %u: comp channel creation failure",
897 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
900 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
902 .channel = tmpl->channel,
905 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
908 if (config->cqe_comp && !rxq_data->hw_timestamp) {
909 attr.cq.mlx5.comp_mask |=
910 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
911 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
912 attr.cq.mlx5.cqe_comp_res_format =
913 mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
914 MLX5DV_CQE_RES_FORMAT_HASH;
916 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
919 * For vectorized Rx, it must not be doubled in order to
920 * make cq_ci and rq_ci aligned.
922 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
923 attr.cq.ibv.cqe *= 2;
924 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
926 "port %u Rx CQE compression is disabled for HW"
930 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
931 if (config->cqe_pad) {
932 attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
933 attr.cq.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
936 tmpl->cq = mlx5_glue->cq_ex_to_cq
937 (mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv,
939 if (tmpl->cq == NULL) {
940 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
941 dev->data->port_id, idx);
945 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
946 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
947 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
948 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
949 attr.wq.ibv = (struct ibv_wq_init_attr){
950 .wq_context = NULL, /* Could be useful in the future. */
951 .wq_type = IBV_WQT_RQ,
952 /* Max number of outstanding WRs. */
953 .max_wr = wqe_n >> rxq_data->sges_n,
954 /* Max number of scatter/gather elements in a WR. */
955 .max_sge = 1 << rxq_data->sges_n,
959 IBV_WQ_FLAGS_CVLAN_STRIPPING |
961 .create_flags = (rxq_data->vlan_strip ?
962 IBV_WQ_FLAGS_CVLAN_STRIPPING :
965 /* By default, FCS (CRC) is stripped by hardware. */
966 if (rxq_data->crc_present) {
967 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
968 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
970 if (config->hw_padding) {
971 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
972 attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
973 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
974 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
975 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
976 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
979 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
980 attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
984 struct mlx5dv_striding_rq_init_attr *mprq_attr =
985 &attr.wq.mlx5.striding_rq_attrs;
987 attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
988 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
989 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
990 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
991 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
994 tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv,
997 tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv);
999 if (tmpl->wq == NULL) {
1000 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1001 dev->data->port_id, idx);
1006 * Make sure number of WRs*SGEs match expectations since a queue
1007 * cannot allocate more than "desc" buffers.
1009 if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1010 attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
1012 "port %u Rx queue %u requested %u*%u but got %u*%u"
1014 dev->data->port_id, idx,
1015 wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
1016 attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
1020 /* Change queue state to ready. */
1021 mod = (struct ibv_wq_attr){
1022 .attr_mask = IBV_WQ_ATTR_STATE,
1023 .wq_state = IBV_WQS_RDY,
1025 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1028 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
1029 dev->data->port_id, idx);
1033 obj.cq.in = tmpl->cq;
1034 obj.cq.out = &cq_info;
1035 obj.rwq.in = tmpl->wq;
1037 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
1042 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1044 "port %u wrong MLX5_CQE_SIZE environment variable"
1045 " value: it should be set to %u",
1046 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1050 /* Fill the rings. */
1051 rxq_data->wqes = rwq.buf;
1052 rxq_data->rq_db = rwq.dbrec;
1053 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1054 rxq_data->cq_db = cq_info.dbrec;
1055 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1056 rxq_data->cq_uar = cq_info.cq_uar;
1057 rxq_data->cqn = cq_info.cqn;
1058 rxq_data->cq_arm_sn = 0;
1059 mlx5_rxq_initialize(rxq_data);
1060 rxq_data->cq_ci = 0;
1061 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1062 idx, (void *)&tmpl);
1063 rte_atomic32_inc(&tmpl->refcnt);
1064 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
1065 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1069 ret = rte_errno; /* Save rte_errno before cleanup. */
1071 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1073 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1075 claim_zero(mlx5_glue->destroy_comp_channel
1078 rte_errno = ret; /* Restore rte_errno. */
1080 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1085 * Verify the Verbs Rx queue list is empty
1088 * Pointer to Ethernet device.
1091 * The number of object not released.
1094 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
1096 struct mlx5_priv *priv = dev->data->dev_private;
1098 struct mlx5_rxq_ibv *rxq_ibv;
1100 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
1101 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
1102 dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx);
1109 * Callback function to initialize mbufs for Multi-Packet RQ.
1112 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
1113 void *_m, unsigned int i __rte_unused)
1115 struct mlx5_mprq_buf *buf = _m;
1117 memset(_m, 0, sizeof(*buf));
1119 rte_atomic16_set(&buf->refcnt, 1);
1123 * Free mempool of Multi-Packet RQ.
1126 * Pointer to Ethernet device.
1129 * 0 on success, negative errno value on failure.
1132 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1134 struct mlx5_priv *priv = dev->data->dev_private;
1135 struct rte_mempool *mp = priv->mprq_mp;
1140 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1141 dev->data->port_id, mp->name);
1143 * If a buffer in the pool has been externally attached to a mbuf and it
1144 * is still in use by application, destroying the Rx queue can spoil
1145 * the packet. It is unlikely to happen but if application dynamically
1146 * creates and destroys with holding Rx packets, this can happen.
1148 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1149 * RQ isn't provided by application but managed by PMD.
1151 if (!rte_mempool_full(mp)) {
1153 "port %u mempool for Multi-Packet RQ is still in use",
1154 dev->data->port_id);
1158 rte_mempool_free(mp);
1159 /* Unset mempool for each Rx queue. */
1160 for (i = 0; i != priv->rxqs_n; ++i) {
1161 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1165 rxq->mprq_mp = NULL;
1167 priv->mprq_mp = NULL;
1172 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1173 * mempool. If already allocated, reuse it if there're enough elements.
1174 * Otherwise, resize it.
1177 * Pointer to Ethernet device.
1180 * 0 on success, negative errno value on failure.
1183 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1185 struct mlx5_priv *priv = dev->data->dev_private;
1186 struct rte_mempool *mp = priv->mprq_mp;
1187 char name[RTE_MEMPOOL_NAMESIZE];
1188 unsigned int desc = 0;
1189 unsigned int buf_len;
1190 unsigned int obj_num;
1191 unsigned int obj_size;
1192 unsigned int strd_num_n = 0;
1193 unsigned int strd_sz_n = 0;
1196 if (!mlx5_mprq_enabled(dev))
1198 /* Count the total number of descriptors configured. */
1199 for (i = 0; i != priv->rxqs_n; ++i) {
1200 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1204 desc += 1 << rxq->elts_n;
1205 /* Get the max number of strides. */
1206 if (strd_num_n < rxq->strd_num_n)
1207 strd_num_n = rxq->strd_num_n;
1208 /* Get the max size of a stride. */
1209 if (strd_sz_n < rxq->strd_sz_n)
1210 strd_sz_n = rxq->strd_sz_n;
1212 assert(strd_num_n && strd_sz_n);
1213 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1214 obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
1216 * Received packets can be either memcpy'd or externally referenced. In
1217 * case that the packet is attached to an mbuf as an external buffer, as
1218 * it isn't possible to predict how the buffers will be queued by
1219 * application, there's no option to exactly pre-allocate needed buffers
1220 * in advance but to speculatively prepares enough buffers.
1222 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1223 * received packets to buffers provided by application (rxq->mp) until
1224 * this Mempool gets available again.
1227 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1229 * rte_mempool_create_empty() has sanity check to refuse large cache
1230 * size compared to the number of elements.
1231 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1232 * constant number 2 instead.
1234 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1235 /* Check a mempool is already allocated and if it can be resued. */
1236 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1237 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1238 dev->data->port_id, mp->name);
1241 } else if (mp != NULL) {
1242 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1243 dev->data->port_id, mp->name);
1245 * If failed to free, which means it may be still in use, no way
1246 * but to keep using the existing one. On buffer underrun,
1247 * packets will be memcpy'd instead of external buffer
1250 if (mlx5_mprq_free_mp(dev)) {
1251 if (mp->elt_size >= obj_size)
1257 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1258 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1259 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
1260 dev->device->numa_node, 0);
1263 "port %u failed to allocate a mempool for"
1264 " Multi-Packet RQ, count=%u, size=%u",
1265 dev->data->port_id, obj_num, obj_size);
1271 /* Set mempool for each Rx queue. */
1272 for (i = 0; i != priv->rxqs_n; ++i) {
1273 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1279 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1280 dev->data->port_id);
1285 * Create a DPDK Rx queue.
1288 * Pointer to Ethernet device.
1292 * Number of descriptors to configure in queue.
1294 * NUMA socket on which memory must be allocated.
1297 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1299 struct mlx5_rxq_ctrl *
1300 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1301 unsigned int socket, const struct rte_eth_rxconf *conf,
1302 struct rte_mempool *mp)
1304 struct mlx5_priv *priv = dev->data->dev_private;
1305 struct mlx5_rxq_ctrl *tmpl;
1306 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1307 unsigned int mprq_stride_size;
1308 struct mlx5_dev_config *config = &priv->config;
1310 * Always allocate extra slots, even if eventually
1311 * the vector Rx will not be used.
1314 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1315 uint64_t offloads = conf->offloads |
1316 dev->data->dev_conf.rxmode.offloads;
1317 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1319 tmpl = rte_calloc_socket("RXQ", 1,
1321 desc_n * sizeof(struct rte_mbuf *),
1327 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1328 MLX5_MR_BTREE_CACHE_N, socket)) {
1329 /* rte_errno is already set. */
1332 tmpl->socket = socket;
1333 if (dev->data->dev_conf.intr_conf.rxq)
1336 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1337 * following conditions are met:
1338 * - MPRQ is enabled.
1339 * - The number of descs is more than the number of strides.
1340 * - max_rx_pkt_len plus overhead is less than the max size of a
1342 * Otherwise, enable Rx scatter if necessary.
1344 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1346 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1347 sizeof(struct rte_mbuf_ext_shared_info) +
1348 RTE_PKTMBUF_HEADROOM;
1350 desc > (1U << config->mprq.stride_num_n) &&
1351 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1352 /* TODO: Rx scatter isn't supported yet. */
1353 tmpl->rxq.sges_n = 0;
1354 /* Trim the number of descs needed. */
1355 desc >>= config->mprq.stride_num_n;
1356 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1357 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1358 config->mprq.min_stride_size_n);
1359 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1360 tmpl->rxq.mprq_max_memcpy_len =
1361 RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
1362 config->mprq.max_memcpy_len);
1364 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1365 " strd_num_n = %u, strd_sz_n = %u",
1366 dev->data->port_id, idx,
1367 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1368 } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
1369 (mb_len - RTE_PKTMBUF_HEADROOM)) {
1370 tmpl->rxq.sges_n = 0;
1371 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1373 RTE_PKTMBUF_HEADROOM +
1374 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1375 unsigned int sges_n;
1378 * Determine the number of SGEs needed for a full packet
1379 * and round it to the next power of two.
1381 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1382 tmpl->rxq.sges_n = sges_n;
1383 /* Make sure rxq.sges_n did not overflow. */
1384 size = mb_len * (1 << tmpl->rxq.sges_n);
1385 size -= RTE_PKTMBUF_HEADROOM;
1386 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1388 "port %u too many SGEs (%u) needed to handle"
1389 " requested maximum packet size %u",
1392 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1393 rte_errno = EOVERFLOW;
1398 "port %u the requested maximum Rx packet size (%u) is"
1399 " larger than a single mbuf (%u) and scattered mode has"
1400 " not been requested",
1402 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1403 mb_len - RTE_PKTMBUF_HEADROOM);
1405 if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1407 "port %u MPRQ is requested but cannot be enabled"
1408 " (requested: desc = %u, stride_sz = %u,"
1409 " supported: min_stride_num = %u, max_stride_sz = %u).",
1410 dev->data->port_id, desc, mprq_stride_size,
1411 (1 << config->mprq.stride_num_n),
1412 (1 << config->mprq.max_stride_size_n));
1413 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1414 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1415 if (desc % (1 << tmpl->rxq.sges_n)) {
1417 "port %u number of Rx queue descriptors (%u) is not a"
1418 " multiple of SGEs per packet (%u)",
1421 1 << tmpl->rxq.sges_n);
1425 /* Toggle RX checksum offload if hardware supports it. */
1426 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1427 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1428 /* Configure VLAN stripping. */
1429 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1430 /* By default, FCS (CRC) is stripped by hardware. */
1431 tmpl->rxq.crc_present = 0;
1432 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1433 if (config->hw_fcs_strip) {
1434 tmpl->rxq.crc_present = 1;
1437 "port %u CRC stripping has been disabled but will"
1438 " still be performed by hardware, make sure MLNX_OFED"
1439 " and firmware are up to date",
1440 dev->data->port_id);
1444 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1445 " incoming frames to hide it",
1447 tmpl->rxq.crc_present ? "disabled" : "enabled",
1448 tmpl->rxq.crc_present << 2);
1450 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1451 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1452 tmpl->rxq.port_id = dev->data->port_id;
1455 tmpl->rxq.elts_n = log2above(desc);
1456 tmpl->rxq.rq_repl_thresh =
1457 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1459 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1461 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1463 tmpl->rxq.idx = idx;
1464 rte_atomic32_inc(&tmpl->refcnt);
1465 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1476 * Pointer to Ethernet device.
1481 * A pointer to the queue if it exists, NULL otherwise.
1483 struct mlx5_rxq_ctrl *
1484 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1486 struct mlx5_priv *priv = dev->data->dev_private;
1487 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1489 if ((*priv->rxqs)[idx]) {
1490 rxq_ctrl = container_of((*priv->rxqs)[idx],
1491 struct mlx5_rxq_ctrl,
1493 mlx5_rxq_ibv_get(dev, idx);
1494 rte_atomic32_inc(&rxq_ctrl->refcnt);
1500 * Release a Rx queue.
1503 * Pointer to Ethernet device.
1508 * 1 while a reference on it exists, 0 when freed.
1511 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1513 struct mlx5_priv *priv = dev->data->dev_private;
1514 struct mlx5_rxq_ctrl *rxq_ctrl;
1516 if (!(*priv->rxqs)[idx])
1518 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1519 assert(rxq_ctrl->priv);
1520 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1521 rxq_ctrl->ibv = NULL;
1522 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1523 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1524 LIST_REMOVE(rxq_ctrl, next);
1526 (*priv->rxqs)[idx] = NULL;
1533 * Verify the Rx Queue list is empty
1536 * Pointer to Ethernet device.
1539 * The number of object not released.
1542 mlx5_rxq_verify(struct rte_eth_dev *dev)
1544 struct mlx5_priv *priv = dev->data->dev_private;
1545 struct mlx5_rxq_ctrl *rxq_ctrl;
1548 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1549 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1550 dev->data->port_id, rxq_ctrl->rxq.idx);
1557 * Create an indirection table.
1560 * Pointer to Ethernet device.
1562 * Queues entering in the indirection table.
1564 * Number of queues in the array.
1567 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1569 static struct mlx5_ind_table_ibv *
1570 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
1573 struct mlx5_priv *priv = dev->data->dev_private;
1574 struct mlx5_ind_table_ibv *ind_tbl;
1575 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1576 log2above(queues_n) :
1577 log2above(priv->config.ind_table_max_size);
1578 struct ibv_wq *wq[1 << wq_n];
1582 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1583 queues_n * sizeof(uint16_t), 0);
1588 for (i = 0; i != queues_n; ++i) {
1589 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1593 wq[i] = rxq->ibv->wq;
1594 ind_tbl->queues[i] = queues[i];
1596 ind_tbl->queues_n = queues_n;
1597 /* Finalise indirection table. */
1598 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1600 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1602 &(struct ibv_rwq_ind_table_init_attr){
1603 .log_ind_tbl_size = wq_n,
1607 if (!ind_tbl->ind_table) {
1611 rte_atomic32_inc(&ind_tbl->refcnt);
1612 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1616 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1621 * Get an indirection table.
1624 * Pointer to Ethernet device.
1626 * Queues entering in the indirection table.
1628 * Number of queues in the array.
1631 * An indirection table if found.
1633 static struct mlx5_ind_table_ibv *
1634 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
1637 struct mlx5_priv *priv = dev->data->dev_private;
1638 struct mlx5_ind_table_ibv *ind_tbl;
1640 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1641 if ((ind_tbl->queues_n == queues_n) &&
1642 (memcmp(ind_tbl->queues, queues,
1643 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1650 rte_atomic32_inc(&ind_tbl->refcnt);
1651 for (i = 0; i != ind_tbl->queues_n; ++i)
1652 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1658 * Release an indirection table.
1661 * Pointer to Ethernet device.
1663 * Indirection table to release.
1666 * 1 while a reference on it exists, 0 when freed.
1669 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1670 struct mlx5_ind_table_ibv *ind_tbl)
1674 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1675 claim_zero(mlx5_glue->destroy_rwq_ind_table
1676 (ind_tbl->ind_table));
1677 for (i = 0; i != ind_tbl->queues_n; ++i)
1678 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1679 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1680 LIST_REMOVE(ind_tbl, next);
1688 * Verify the Rx Queue list is empty
1691 * Pointer to Ethernet device.
1694 * The number of object not released.
1697 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1699 struct mlx5_priv *priv = dev->data->dev_private;
1700 struct mlx5_ind_table_ibv *ind_tbl;
1703 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1705 "port %u Verbs indirection table %p still referenced",
1706 dev->data->port_id, (void *)ind_tbl);
1713 * Create an Rx Hash queue.
1716 * Pointer to Ethernet device.
1718 * RSS key for the Rx hash queue.
1719 * @param rss_key_len
1721 * @param hash_fields
1722 * Verbs protocol hash field to make the RSS on.
1724 * Queues entering in hash queue. In case of empty hash_fields only the
1725 * first queue index will be taken for the indirection table.
1732 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1735 mlx5_hrxq_new(struct rte_eth_dev *dev,
1736 const uint8_t *rss_key, uint32_t rss_key_len,
1737 uint64_t hash_fields,
1738 const uint16_t *queues, uint32_t queues_n,
1739 int tunnel __rte_unused)
1741 struct mlx5_priv *priv = dev->data->dev_private;
1742 struct mlx5_hrxq *hrxq;
1743 struct mlx5_ind_table_ibv *ind_tbl;
1745 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1746 struct mlx5dv_qp_init_attr qp_init_attr;
1750 queues_n = hash_fields ? queues_n : 1;
1751 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1753 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1758 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1759 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
1761 qp_init_attr.comp_mask =
1762 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1763 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
1765 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1766 if (dev->data->dev_conf.lpbk_mode) {
1767 /* Allow packet sent from NIC loop back w/o source MAC check. */
1768 qp_init_attr.comp_mask |=
1769 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1770 qp_init_attr.create_flags |=
1771 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
1774 qp = mlx5_glue->dv_create_qp
1776 &(struct ibv_qp_init_attr_ex){
1777 .qp_type = IBV_QPT_RAW_PACKET,
1779 IBV_QP_INIT_ATTR_PD |
1780 IBV_QP_INIT_ATTR_IND_TABLE |
1781 IBV_QP_INIT_ATTR_RX_HASH,
1782 .rx_hash_conf = (struct ibv_rx_hash_conf){
1783 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1784 .rx_hash_key_len = rss_key_len,
1785 .rx_hash_key = (void *)(uintptr_t)rss_key,
1786 .rx_hash_fields_mask = hash_fields,
1788 .rwq_ind_tbl = ind_tbl->ind_table,
1793 qp = mlx5_glue->create_qp_ex
1795 &(struct ibv_qp_init_attr_ex){
1796 .qp_type = IBV_QPT_RAW_PACKET,
1798 IBV_QP_INIT_ATTR_PD |
1799 IBV_QP_INIT_ATTR_IND_TABLE |
1800 IBV_QP_INIT_ATTR_RX_HASH,
1801 .rx_hash_conf = (struct ibv_rx_hash_conf){
1802 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1803 .rx_hash_key_len = rss_key_len,
1804 .rx_hash_key = (void *)(uintptr_t)rss_key,
1805 .rx_hash_fields_mask = hash_fields,
1807 .rwq_ind_tbl = ind_tbl->ind_table,
1815 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1818 hrxq->ind_table = ind_tbl;
1820 hrxq->rss_key_len = rss_key_len;
1821 hrxq->hash_fields = hash_fields;
1822 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1823 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1824 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
1825 if (!hrxq->action) {
1830 rte_atomic32_inc(&hrxq->refcnt);
1831 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1834 err = rte_errno; /* Save rte_errno before cleanup. */
1835 mlx5_ind_table_ibv_release(dev, ind_tbl);
1837 claim_zero(mlx5_glue->destroy_qp(qp));
1838 rte_errno = err; /* Restore rte_errno. */
1843 * Get an Rx Hash queue.
1846 * Pointer to Ethernet device.
1848 * RSS configuration for the Rx hash queue.
1850 * Queues entering in hash queue. In case of empty hash_fields only the
1851 * first queue index will be taken for the indirection table.
1856 * An hash Rx queue on success.
1859 mlx5_hrxq_get(struct rte_eth_dev *dev,
1860 const uint8_t *rss_key, uint32_t rss_key_len,
1861 uint64_t hash_fields,
1862 const uint16_t *queues, uint32_t queues_n)
1864 struct mlx5_priv *priv = dev->data->dev_private;
1865 struct mlx5_hrxq *hrxq;
1867 queues_n = hash_fields ? queues_n : 1;
1868 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1869 struct mlx5_ind_table_ibv *ind_tbl;
1871 if (hrxq->rss_key_len != rss_key_len)
1873 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1875 if (hrxq->hash_fields != hash_fields)
1877 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1880 if (ind_tbl != hrxq->ind_table) {
1881 mlx5_ind_table_ibv_release(dev, ind_tbl);
1884 rte_atomic32_inc(&hrxq->refcnt);
1891 * Release the hash Rx queue.
1894 * Pointer to Ethernet device.
1896 * Pointer to Hash Rx queue to release.
1899 * 1 while a reference on it exists, 0 when freed.
1902 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1904 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1905 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1906 mlx5_glue->destroy_flow_action(hrxq->action);
1908 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1909 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1910 LIST_REMOVE(hrxq, next);
1914 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1919 * Verify the Rx Queue list is empty
1922 * Pointer to Ethernet device.
1925 * The number of object not released.
1928 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1930 struct mlx5_priv *priv = dev->data->dev_private;
1931 struct mlx5_hrxq *hrxq;
1934 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1936 "port %u Verbs hash Rx queue %p still referenced",
1937 dev->data->port_id, (void *)hrxq);
1944 * Create a drop Rx queue Verbs object.
1947 * Pointer to Ethernet device.
1950 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1952 static struct mlx5_rxq_ibv *
1953 mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
1955 struct mlx5_priv *priv = dev->data->dev_private;
1956 struct ibv_context *ctx = priv->sh->ctx;
1958 struct ibv_wq *wq = NULL;
1959 struct mlx5_rxq_ibv *rxq;
1961 if (priv->drop_queue.rxq)
1962 return priv->drop_queue.rxq;
1963 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
1965 DEBUG("port %u cannot allocate CQ for drop queue",
1966 dev->data->port_id);
1970 wq = mlx5_glue->create_wq(ctx,
1971 &(struct ibv_wq_init_attr){
1972 .wq_type = IBV_WQT_RQ,
1979 DEBUG("port %u cannot allocate WQ for drop queue",
1980 dev->data->port_id);
1984 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
1986 DEBUG("port %u cannot allocate drop Rx queue memory",
1987 dev->data->port_id);
1993 priv->drop_queue.rxq = rxq;
1997 claim_zero(mlx5_glue->destroy_wq(wq));
1999 claim_zero(mlx5_glue->destroy_cq(cq));
2004 * Release a drop Rx queue Verbs object.
2007 * Pointer to Ethernet device.
2010 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2013 mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
2015 struct mlx5_priv *priv = dev->data->dev_private;
2016 struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
2019 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2021 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2023 priv->drop_queue.rxq = NULL;
2027 * Create a drop indirection table.
2030 * Pointer to Ethernet device.
2033 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2035 static struct mlx5_ind_table_ibv *
2036 mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
2038 struct mlx5_priv *priv = dev->data->dev_private;
2039 struct mlx5_ind_table_ibv *ind_tbl;
2040 struct mlx5_rxq_ibv *rxq;
2041 struct mlx5_ind_table_ibv tmpl;
2043 rxq = mlx5_rxq_ibv_drop_new(dev);
2046 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2048 &(struct ibv_rwq_ind_table_init_attr){
2049 .log_ind_tbl_size = 0,
2050 .ind_tbl = &rxq->wq,
2053 if (!tmpl.ind_table) {
2054 DEBUG("port %u cannot allocate indirection table for drop"
2056 dev->data->port_id);
2060 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2065 ind_tbl->ind_table = tmpl.ind_table;
2068 mlx5_rxq_ibv_drop_release(dev);
2073 * Release a drop indirection table.
2076 * Pointer to Ethernet device.
2079 mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
2081 struct mlx5_priv *priv = dev->data->dev_private;
2082 struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
2084 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2085 mlx5_rxq_ibv_drop_release(dev);
2087 priv->drop_queue.hrxq->ind_table = NULL;
2091 * Create a drop Rx Hash queue.
2094 * Pointer to Ethernet device.
2097 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2100 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2102 struct mlx5_priv *priv = dev->data->dev_private;
2103 struct mlx5_ind_table_ibv *ind_tbl;
2105 struct mlx5_hrxq *hrxq;
2107 if (priv->drop_queue.hrxq) {
2108 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2109 return priv->drop_queue.hrxq;
2111 ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
2114 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2115 &(struct ibv_qp_init_attr_ex){
2116 .qp_type = IBV_QPT_RAW_PACKET,
2118 IBV_QP_INIT_ATTR_PD |
2119 IBV_QP_INIT_ATTR_IND_TABLE |
2120 IBV_QP_INIT_ATTR_RX_HASH,
2121 .rx_hash_conf = (struct ibv_rx_hash_conf){
2123 IBV_RX_HASH_FUNC_TOEPLITZ,
2124 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2125 .rx_hash_key = rss_hash_default_key,
2126 .rx_hash_fields_mask = 0,
2128 .rwq_ind_tbl = ind_tbl->ind_table,
2132 DEBUG("port %u cannot allocate QP for drop queue",
2133 dev->data->port_id);
2137 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2140 "port %u cannot allocate memory for drop queue",
2141 dev->data->port_id);
2145 hrxq->ind_table = ind_tbl;
2147 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2148 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2149 if (!hrxq->action) {
2154 priv->drop_queue.hrxq = hrxq;
2155 rte_atomic32_set(&hrxq->refcnt, 1);
2159 mlx5_ind_table_ibv_drop_release(dev);
2164 * Release a drop hash Rx queue.
2167 * Pointer to Ethernet device.
2170 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2172 struct mlx5_priv *priv = dev->data->dev_private;
2173 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2175 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2176 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2177 mlx5_glue->destroy_flow_action(hrxq->action);
2179 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2180 mlx5_ind_table_ibv_drop_release(dev);
2182 priv->drop_queue.hrxq = NULL;