1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_malloc.h>
26 #include "mlx5_defs.h"
28 #include "mlx5_common_os.h"
29 #include "mlx5_rxtx.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
32 #include "mlx5_flow.h"
35 /* Default RSS hash key also used for ConnectX-3. */
36 uint8_t rss_hash_default_key[] = {
37 0x2c, 0xc6, 0x81, 0xd1,
38 0x5b, 0xdb, 0xf4, 0xf7,
39 0xfc, 0xa2, 0x83, 0x19,
40 0xdb, 0x1a, 0x3e, 0x94,
41 0x6b, 0x9e, 0x38, 0xd9,
42 0x2c, 0x9c, 0x03, 0xd1,
43 0xad, 0x99, 0x44, 0xa7,
44 0xd9, 0x56, 0x3d, 0x59,
45 0x06, 0x3c, 0x25, 0xf3,
46 0xfc, 0x1f, 0xdc, 0x2a,
49 /* Length of the default RSS hash key. */
50 static_assert(MLX5_RSS_HASH_KEY_LEN ==
51 (unsigned int)sizeof(rss_hash_default_key),
52 "wrong RSS default key size.");
55 * Check whether Multi-Packet RQ can be enabled for the device.
58 * Pointer to Ethernet device.
61 * 1 if supported, negative errno value if not.
64 mlx5_check_mprq_support(struct rte_eth_dev *dev)
66 struct mlx5_priv *priv = dev->data->dev_private;
68 if (priv->config.mprq.enabled &&
69 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
75 * Check whether Multi-Packet RQ is enabled for the Rx queue.
78 * Pointer to receive queue structure.
81 * 0 if disabled, otherwise enabled.
84 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
86 return rxq->strd_num_n > 0;
90 * Check whether Multi-Packet RQ is enabled for the device.
93 * Pointer to Ethernet device.
96 * 0 if disabled, otherwise enabled.
99 mlx5_mprq_enabled(struct rte_eth_dev *dev)
101 struct mlx5_priv *priv = dev->data->dev_private;
106 if (mlx5_check_mprq_support(dev) < 0)
108 /* All the configured queues should be enabled. */
109 for (i = 0; i < priv->rxqs_n; ++i) {
110 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
111 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
112 (rxq, struct mlx5_rxq_ctrl, rxq);
114 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
117 if (mlx5_rxq_mprq_enabled(rxq))
120 /* Multi-Packet RQ can't be partially configured. */
121 MLX5_ASSERT(n == 0 || n == n_ibv);
126 * Allocate RX queue elements for Multi-Packet RQ.
129 * Pointer to RX queue structure.
132 * 0 on success, a negative errno value otherwise and rte_errno is set.
135 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
137 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
138 unsigned int wqe_n = 1 << rxq->elts_n;
142 /* Iterate on segments. */
143 for (i = 0; i <= wqe_n; ++i) {
144 struct mlx5_mprq_buf *buf;
146 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
147 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
152 (*rxq->mprq_bufs)[i] = buf;
154 rxq->mprq_repl = buf;
157 "port %u Rx queue %u allocated and configured %u segments",
158 rxq->port_id, rxq->idx, wqe_n);
161 err = rte_errno; /* Save rte_errno before cleanup. */
163 for (i = 0; (i != wqe_n); ++i) {
164 if ((*rxq->mprq_bufs)[i] != NULL)
165 rte_mempool_put(rxq->mprq_mp,
166 (*rxq->mprq_bufs)[i]);
167 (*rxq->mprq_bufs)[i] = NULL;
169 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
170 rxq->port_id, rxq->idx);
171 rte_errno = err; /* Restore rte_errno. */
176 * Allocate RX queue elements for Single-Packet RQ.
179 * Pointer to RX queue structure.
182 * 0 on success, errno value on failure.
185 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
187 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
188 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
192 /* Iterate on segments. */
193 for (i = 0; (i != elts_n); ++i) {
194 struct rte_mbuf *buf;
196 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
198 DRV_LOG(ERR, "port %u empty mbuf pool",
199 PORT_ID(rxq_ctrl->priv));
203 /* Headroom is reserved by rte_pktmbuf_alloc(). */
204 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
205 /* Buffer is supposed to be empty. */
206 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
207 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
208 MLX5_ASSERT(!buf->next);
209 /* Only the first segment keeps headroom. */
211 SET_DATA_OFF(buf, 0);
212 PORT(buf) = rxq_ctrl->rxq.port_id;
213 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
214 PKT_LEN(buf) = DATA_LEN(buf);
216 (*rxq_ctrl->rxq.elts)[i] = buf;
218 /* If Rx vector is activated. */
219 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
220 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
221 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
222 struct rte_pktmbuf_pool_private *priv =
223 (struct rte_pktmbuf_pool_private *)
224 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
227 /* Initialize default rearm_data for vPMD. */
228 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
229 rte_mbuf_refcnt_set(mbuf_init, 1);
230 mbuf_init->nb_segs = 1;
231 mbuf_init->port = rxq->port_id;
232 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
233 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
235 * prevent compiler reordering:
236 * rearm_data covers previous fields.
238 rte_compiler_barrier();
239 rxq->mbuf_initializer =
240 *(rte_xmm_t *)&mbuf_init->rearm_data;
241 /* Padding with a fake mbuf for vectorized Rx. */
242 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
243 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
246 "port %u Rx queue %u allocated and configured %u segments"
248 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
249 elts_n / (1 << rxq_ctrl->rxq.sges_n));
252 err = rte_errno; /* Save rte_errno before cleanup. */
254 for (i = 0; (i != elts_n); ++i) {
255 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
256 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
257 (*rxq_ctrl->rxq.elts)[i] = NULL;
259 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
260 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
261 rte_errno = err; /* Restore rte_errno. */
266 * Allocate RX queue elements.
269 * Pointer to RX queue structure.
272 * 0 on success, errno value on failure.
275 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
277 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
278 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
282 * Free RX queue elements for Multi-Packet RQ.
285 * Pointer to RX queue structure.
288 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
290 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
293 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
294 rxq->port_id, rxq->idx);
295 if (rxq->mprq_bufs == NULL)
297 MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
298 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
299 if ((*rxq->mprq_bufs)[i] != NULL)
300 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
301 (*rxq->mprq_bufs)[i] = NULL;
303 if (rxq->mprq_repl != NULL) {
304 mlx5_mprq_buf_free(rxq->mprq_repl);
305 rxq->mprq_repl = NULL;
310 * Free RX queue elements for Single-Packet RQ.
313 * Pointer to RX queue structure.
316 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
318 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
319 const uint16_t q_n = (1 << rxq->elts_n);
320 const uint16_t q_mask = q_n - 1;
321 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
324 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
325 PORT_ID(rxq_ctrl->priv), rxq->idx);
326 if (rxq->elts == NULL)
329 * Some mbuf in the Ring belongs to the application. They cannot be
332 if (mlx5_rxq_check_vec_support(rxq) > 0) {
333 for (i = 0; i < used; ++i)
334 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
335 rxq->rq_pi = rxq->rq_ci;
337 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
338 if ((*rxq->elts)[i] != NULL)
339 rte_pktmbuf_free_seg((*rxq->elts)[i]);
340 (*rxq->elts)[i] = NULL;
345 * Free RX queue elements.
348 * Pointer to RX queue structure.
351 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
353 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
354 rxq_free_elts_mprq(rxq_ctrl);
356 rxq_free_elts_sprq(rxq_ctrl);
360 * Returns the per-queue supported offloads.
363 * Pointer to Ethernet device.
366 * Supported Rx offloads.
369 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
371 struct mlx5_priv *priv = dev->data->dev_private;
372 struct mlx5_dev_config *config = &priv->config;
373 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
374 DEV_RX_OFFLOAD_TIMESTAMP |
375 DEV_RX_OFFLOAD_JUMBO_FRAME |
376 DEV_RX_OFFLOAD_RSS_HASH);
378 if (config->hw_fcs_strip)
379 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
382 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
383 DEV_RX_OFFLOAD_UDP_CKSUM |
384 DEV_RX_OFFLOAD_TCP_CKSUM);
385 if (config->hw_vlan_strip)
386 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
387 if (MLX5_LRO_SUPPORTED(dev))
388 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
394 * Returns the per-port supported offloads.
397 * Supported Rx offloads.
400 mlx5_get_rx_port_offloads(void)
402 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
408 * Verify if the queue can be released.
411 * Pointer to Ethernet device.
416 * 1 if the queue can be released
417 * 0 if the queue can not be released, there are references to it.
418 * Negative errno and rte_errno is set if queue doesn't exist.
421 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
423 struct mlx5_priv *priv = dev->data->dev_private;
424 struct mlx5_rxq_ctrl *rxq_ctrl;
426 if (!(*priv->rxqs)[idx]) {
430 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
431 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
435 * Rx queue presetup checks.
438 * Pointer to Ethernet device structure.
442 * Number of descriptors to configure in queue.
445 * 0 on success, a negative errno value otherwise and rte_errno is set.
448 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
450 struct mlx5_priv *priv = dev->data->dev_private;
452 if (!rte_is_power_of_2(*desc)) {
453 *desc = 1 << log2above(*desc);
455 "port %u increased number of descriptors in Rx queue %u"
456 " to the next power of two (%d)",
457 dev->data->port_id, idx, *desc);
459 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
460 dev->data->port_id, idx, *desc);
461 if (idx >= priv->rxqs_n) {
462 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
463 dev->data->port_id, idx, priv->rxqs_n);
464 rte_errno = EOVERFLOW;
467 if (!mlx5_rxq_releasable(dev, idx)) {
468 DRV_LOG(ERR, "port %u unable to release queue index %u",
469 dev->data->port_id, idx);
473 mlx5_rxq_release(dev, idx);
480 * Pointer to Ethernet device structure.
484 * Number of descriptors to configure in queue.
486 * NUMA socket on which memory must be allocated.
488 * Thresholds parameters.
490 * Memory pool for buffer allocations.
493 * 0 on success, a negative errno value otherwise and rte_errno is set.
496 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
497 unsigned int socket, const struct rte_eth_rxconf *conf,
498 struct rte_mempool *mp)
500 struct mlx5_priv *priv = dev->data->dev_private;
501 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
502 struct mlx5_rxq_ctrl *rxq_ctrl =
503 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
506 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
509 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
511 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
512 dev->data->port_id, idx);
516 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
517 dev->data->port_id, idx);
518 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
525 * Pointer to Ethernet device structure.
529 * Number of descriptors to configure in queue.
530 * @param hairpin_conf
531 * Hairpin configuration parameters.
534 * 0 on success, a negative errno value otherwise and rte_errno is set.
537 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
539 const struct rte_eth_hairpin_conf *hairpin_conf)
541 struct mlx5_priv *priv = dev->data->dev_private;
542 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
543 struct mlx5_rxq_ctrl *rxq_ctrl =
544 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
547 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
550 if (hairpin_conf->peer_count != 1 ||
551 hairpin_conf->peers[0].port != dev->data->port_id ||
552 hairpin_conf->peers[0].queue >= priv->txqs_n) {
553 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
554 " invalid hairpind configuration", dev->data->port_id,
559 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
561 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
562 dev->data->port_id, idx);
566 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
567 dev->data->port_id, idx);
568 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
573 * DPDK callback to release a RX queue.
576 * Generic RX queue pointer.
579 mlx5_rx_queue_release(void *dpdk_rxq)
581 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
582 struct mlx5_rxq_ctrl *rxq_ctrl;
583 struct mlx5_priv *priv;
587 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
588 priv = rxq_ctrl->priv;
589 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
590 rte_panic("port %u Rx queue %u is still used by a flow and"
591 " cannot be removed\n",
592 PORT_ID(priv), rxq->idx);
593 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
597 * Get an Rx queue Verbs/DevX object.
600 * Pointer to Ethernet device.
602 * Queue index in DPDK Rx queue array
605 * The Verbs/DevX object if it exists.
607 static struct mlx5_rxq_obj *
608 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
610 struct mlx5_priv *priv = dev->data->dev_private;
611 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
612 struct mlx5_rxq_ctrl *rxq_ctrl;
614 if (idx >= priv->rxqs_n)
618 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
620 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
621 return rxq_ctrl->obj;
625 * Release the resources allocated for an RQ DevX object.
628 * DevX Rx queue object.
631 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
633 if (rxq_ctrl->rxq.wqes) {
634 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
635 rxq_ctrl->rxq.wqes = NULL;
637 if (rxq_ctrl->wq_umem) {
638 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
639 rxq_ctrl->wq_umem = NULL;
644 * Release an Rx hairpin related resources.
647 * Hairpin Rx queue object.
650 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
652 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
654 MLX5_ASSERT(rxq_obj);
655 rq_attr.state = MLX5_RQC_STATE_RST;
656 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
657 mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
658 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
662 * Release an Rx verbs/DevX queue object.
665 * Verbs/DevX Rx queue object.
668 * 1 while a reference on it exists, 0 when freed.
671 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
673 MLX5_ASSERT(rxq_obj);
674 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
675 switch (rxq_obj->type) {
676 case MLX5_RXQ_OBJ_TYPE_IBV:
677 MLX5_ASSERT(rxq_obj->wq);
678 MLX5_ASSERT(rxq_obj->cq);
679 rxq_free_elts(rxq_obj->rxq_ctrl);
680 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
681 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
683 case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
684 MLX5_ASSERT(rxq_obj->cq);
685 MLX5_ASSERT(rxq_obj->rq);
686 rxq_free_elts(rxq_obj->rxq_ctrl);
687 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
688 rxq_release_rq_resources(rxq_obj->rxq_ctrl);
689 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
691 case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
692 MLX5_ASSERT(rxq_obj->rq);
693 rxq_obj_hairpin_release(rxq_obj);
696 if (rxq_obj->channel)
697 claim_zero(mlx5_glue->destroy_comp_channel
699 LIST_REMOVE(rxq_obj, next);
707 * Allocate queue vector and fill epoll fd list for Rx interrupts.
710 * Pointer to Ethernet device.
713 * 0 on success, a negative errno value otherwise and rte_errno is set.
716 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
718 struct mlx5_priv *priv = dev->data->dev_private;
720 unsigned int rxqs_n = priv->rxqs_n;
721 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
722 unsigned int count = 0;
723 struct rte_intr_handle *intr_handle = dev->intr_handle;
725 if (!dev->data->dev_conf.intr_conf.rxq)
727 mlx5_rx_intr_vec_disable(dev);
728 intr_handle->intr_vec = mlx5_malloc(0,
729 n * sizeof(intr_handle->intr_vec[0]),
731 if (intr_handle->intr_vec == NULL) {
733 "port %u failed to allocate memory for interrupt"
734 " vector, Rx interrupts will not be supported",
739 intr_handle->type = RTE_INTR_HANDLE_EXT;
740 for (i = 0; i != n; ++i) {
741 /* This rxq obj must not be released in this function. */
742 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
747 /* Skip queues that cannot request interrupts. */
748 if (!rxq_obj || !rxq_obj->channel) {
749 /* Use invalid intr_vec[] index to disable entry. */
750 intr_handle->intr_vec[i] =
751 RTE_INTR_VEC_RXTX_OFFSET +
752 RTE_MAX_RXTX_INTR_VEC_ID;
755 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
757 "port %u too many Rx queues for interrupt"
758 " vector size (%d), Rx interrupts cannot be"
760 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
761 mlx5_rx_intr_vec_disable(dev);
765 fd = rxq_obj->channel->fd;
766 flags = fcntl(fd, F_GETFL);
767 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
771 "port %u failed to make Rx interrupt file"
772 " descriptor %d non-blocking for queue index"
774 dev->data->port_id, fd, i);
775 mlx5_rx_intr_vec_disable(dev);
778 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
779 intr_handle->efds[count] = fd;
783 mlx5_rx_intr_vec_disable(dev);
785 intr_handle->nb_efd = count;
790 * Clean up Rx interrupts handler.
793 * Pointer to Ethernet device.
796 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
798 struct mlx5_priv *priv = dev->data->dev_private;
799 struct rte_intr_handle *intr_handle = dev->intr_handle;
801 unsigned int rxqs_n = priv->rxqs_n;
802 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
804 if (!dev->data->dev_conf.intr_conf.rxq)
806 if (!intr_handle->intr_vec)
808 for (i = 0; i != n; ++i) {
809 struct mlx5_rxq_ctrl *rxq_ctrl;
810 struct mlx5_rxq_data *rxq_data;
812 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
813 RTE_MAX_RXTX_INTR_VEC_ID)
816 * Need to access directly the queue to release the reference
817 * kept in mlx5_rx_intr_vec_enable().
819 rxq_data = (*priv->rxqs)[i];
820 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
822 mlx5_rxq_obj_release(rxq_ctrl->obj);
825 rte_intr_free_epoll_fd(intr_handle);
826 if (intr_handle->intr_vec)
827 mlx5_free(intr_handle->intr_vec);
828 intr_handle->nb_efd = 0;
829 intr_handle->intr_vec = NULL;
833 * MLX5 CQ notification .
836 * Pointer to receive queue structure.
838 * Sequence number per receive queue .
841 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
844 uint32_t doorbell_hi;
846 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
848 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
849 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
850 doorbell = (uint64_t)doorbell_hi << 32;
851 doorbell |= rxq->cqn;
852 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
853 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
854 cq_db_reg, rxq->uar_lock_cq);
858 * DPDK callback for Rx queue interrupt enable.
861 * Pointer to Ethernet device structure.
866 * 0 on success, a negative errno value otherwise and rte_errno is set.
869 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
871 struct mlx5_priv *priv = dev->data->dev_private;
872 struct mlx5_rxq_data *rxq_data;
873 struct mlx5_rxq_ctrl *rxq_ctrl;
875 rxq_data = (*priv->rxqs)[rx_queue_id];
880 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
882 struct mlx5_rxq_obj *rxq_obj;
884 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
889 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
890 mlx5_rxq_obj_release(rxq_obj);
896 * DPDK callback for Rx queue interrupt disable.
899 * Pointer to Ethernet device structure.
904 * 0 on success, a negative errno value otherwise and rte_errno is set.
907 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
909 struct mlx5_priv *priv = dev->data->dev_private;
910 struct mlx5_rxq_data *rxq_data;
911 struct mlx5_rxq_ctrl *rxq_ctrl;
912 struct mlx5_rxq_obj *rxq_obj = NULL;
913 struct ibv_cq *ev_cq;
917 rxq_data = (*priv->rxqs)[rx_queue_id];
922 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
925 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
930 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
931 if (ret || ev_cq != rxq_obj->cq) {
935 rxq_data->cq_arm_sn++;
936 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
937 mlx5_rxq_obj_release(rxq_obj);
940 ret = rte_errno; /* Save rte_errno before cleanup. */
942 mlx5_rxq_obj_release(rxq_obj);
943 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
944 dev->data->port_id, rx_queue_id);
945 rte_errno = ret; /* Restore rte_errno. */
950 * Create a CQ Verbs object.
953 * Pointer to Ethernet device.
955 * Pointer to device private data.
957 * Pointer to Rx queue data.
959 * Number of CQEs in CQ.
961 * Pointer to Rx queue object data.
964 * The Verbs object initialised, NULL otherwise and rte_errno is set.
966 static struct ibv_cq *
967 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
968 struct mlx5_rxq_data *rxq_data,
969 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
972 struct ibv_cq_init_attr_ex ibv;
973 struct mlx5dv_cq_init_attr mlx5;
976 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
978 .channel = rxq_obj->channel,
981 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
984 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
986 cq_attr.mlx5.comp_mask |=
987 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
988 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
989 cq_attr.mlx5.cqe_comp_res_format =
990 mlx5_rxq_mprq_enabled(rxq_data) ?
991 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
992 MLX5DV_CQE_RES_FORMAT_HASH;
994 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
997 * For vectorized Rx, it must not be doubled in order to
998 * make cq_ci and rq_ci aligned.
1000 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1001 cq_attr.ibv.cqe *= 2;
1002 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1004 "port %u Rx CQE compression is disabled for HW"
1006 dev->data->port_id);
1007 } else if (priv->config.cqe_comp && rxq_data->lro) {
1009 "port %u Rx CQE compression is disabled for LRO",
1010 dev->data->port_id);
1012 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1013 if (priv->config.cqe_pad) {
1014 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1015 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1018 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1024 * Create a WQ Verbs object.
1027 * Pointer to Ethernet device.
1029 * Pointer to device private data.
1031 * Pointer to Rx queue data.
1033 * Queue index in DPDK Rx queue array
1035 * Number of WQEs in WQ.
1037 * Pointer to Rx queue object data.
1040 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1042 static struct ibv_wq *
1043 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1044 struct mlx5_rxq_data *rxq_data, uint16_t idx,
1045 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1048 struct ibv_wq_init_attr ibv;
1049 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1050 struct mlx5dv_wq_init_attr mlx5;
1054 wq_attr.ibv = (struct ibv_wq_init_attr){
1055 .wq_context = NULL, /* Could be useful in the future. */
1056 .wq_type = IBV_WQT_RQ,
1057 /* Max number of outstanding WRs. */
1058 .max_wr = wqe_n >> rxq_data->sges_n,
1059 /* Max number of scatter/gather elements in a WR. */
1060 .max_sge = 1 << rxq_data->sges_n,
1063 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1064 .create_flags = (rxq_data->vlan_strip ?
1065 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1067 /* By default, FCS (CRC) is stripped by hardware. */
1068 if (rxq_data->crc_present) {
1069 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1070 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1072 if (priv->config.hw_padding) {
1073 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1074 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1075 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1076 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1077 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1078 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1081 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1082 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1085 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1086 struct mlx5dv_striding_rq_init_attr *mprq_attr =
1087 &wq_attr.mlx5.striding_rq_attrs;
1089 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1090 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1091 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1092 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1093 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1096 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1099 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1103 * Make sure number of WRs*SGEs match expectations since a queue
1104 * cannot allocate more than "desc" buffers.
1106 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1107 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1109 "port %u Rx queue %u requested %u*%u but got"
1111 dev->data->port_id, idx,
1112 wqe_n >> rxq_data->sges_n,
1113 (1 << rxq_data->sges_n),
1114 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1115 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1124 * Fill common fields of create RQ attributes structure.
1127 * Pointer to Rx queue data.
1129 * CQ number to use with this RQ.
1131 * RQ attributes structure to fill..
1134 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1135 struct mlx5_devx_create_rq_attr *rq_attr)
1137 rq_attr->state = MLX5_RQC_STATE_RST;
1138 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1140 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1144 * Fill common fields of DevX WQ attributes structure.
1147 * Pointer to device private data.
1149 * Pointer to Rx queue control structure.
1151 * WQ attributes structure to fill..
1154 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1155 struct mlx5_devx_wq_attr *wq_attr)
1157 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1158 MLX5_WQ_END_PAD_MODE_ALIGN :
1159 MLX5_WQ_END_PAD_MODE_NONE;
1160 wq_attr->pd = priv->sh->pdn;
1161 wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1162 wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1163 wq_attr->dbr_umem_valid = 1;
1164 wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1165 wq_attr->wq_umem_valid = 1;
1169 * Create a RQ object using DevX.
1172 * Pointer to Ethernet device.
1174 * Queue index in DPDK Rx queue array
1176 * CQ number to use with this RQ.
1179 * The DevX object initialised, NULL otherwise and rte_errno is set.
1181 static struct mlx5_devx_obj *
1182 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1184 struct mlx5_priv *priv = dev->data->dev_private;
1185 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1186 struct mlx5_rxq_ctrl *rxq_ctrl =
1187 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1188 struct mlx5_devx_create_rq_attr rq_attr;
1189 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1190 uint32_t wq_size = 0;
1191 uint32_t wqe_size = 0;
1192 uint32_t log_wqe_size = 0;
1194 struct mlx5_devx_obj *rq;
1196 memset(&rq_attr, 0, sizeof(rq_attr));
1197 /* Fill RQ attributes. */
1198 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1199 rq_attr.flush_in_error_en = 1;
1200 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1201 /* Fill WQ attributes for this RQ. */
1202 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1203 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1205 * Number of strides in each WQE:
1206 * 512*2^single_wqe_log_num_of_strides.
1208 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1209 rxq_data->strd_num_n -
1210 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1211 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1212 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1213 rxq_data->strd_sz_n -
1214 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1215 wqe_size = sizeof(struct mlx5_wqe_mprq);
1217 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1218 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1220 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1221 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1222 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1223 /* Calculate and allocate WQ memory space. */
1224 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1225 wq_size = wqe_n * wqe_size;
1226 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
1227 if (alignment == (size_t)-1) {
1228 DRV_LOG(ERR, "Failed to get mem page size");
1232 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
1233 alignment, rxq_ctrl->socket);
1236 rxq_data->wqes = buf;
1237 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1239 if (!rxq_ctrl->wq_umem) {
1243 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1244 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1246 rxq_release_rq_resources(rxq_ctrl);
1251 * Create the Rx hairpin queue object.
1254 * Pointer to Ethernet device.
1256 * Queue index in DPDK Rx queue array
1259 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1261 static struct mlx5_rxq_obj *
1262 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1264 struct mlx5_priv *priv = dev->data->dev_private;
1265 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1266 struct mlx5_rxq_ctrl *rxq_ctrl =
1267 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1268 struct mlx5_devx_create_rq_attr attr = { 0 };
1269 struct mlx5_rxq_obj *tmpl = NULL;
1270 uint32_t max_wq_data;
1272 MLX5_ASSERT(rxq_data);
1273 MLX5_ASSERT(!rxq_ctrl->obj);
1274 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1278 "port %u Rx queue %u cannot allocate verbs resources",
1279 dev->data->port_id, rxq_data->idx);
1283 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1284 tmpl->rxq_ctrl = rxq_ctrl;
1286 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1287 /* Jumbo frames > 9KB should be supported, and more packets. */
1288 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1289 if (priv->config.log_hp_size > max_wq_data) {
1290 DRV_LOG(ERR, "total data size %u power of 2 is "
1291 "too large for hairpin",
1292 priv->config.log_hp_size);
1297 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1299 attr.wq_attr.log_hairpin_data_sz =
1300 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1301 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1303 /* Set the packets number to the maximum value for performance. */
1304 attr.wq_attr.log_hairpin_num_packets =
1305 attr.wq_attr.log_hairpin_data_sz -
1306 MLX5_HAIRPIN_QUEUE_STRIDE;
1307 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1311 "port %u Rx hairpin queue %u can't create rq object",
1312 dev->data->port_id, idx);
1317 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1318 idx, (void *)&tmpl);
1319 rte_atomic32_inc(&tmpl->refcnt);
1320 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1321 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1326 * Create the Rx queue Verbs/DevX object.
1329 * Pointer to Ethernet device.
1331 * Queue index in DPDK Rx queue array
1333 * Type of Rx queue object to create.
1336 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1338 struct mlx5_rxq_obj *
1339 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1340 enum mlx5_rxq_obj_type type)
1342 struct mlx5_priv *priv = dev->data->dev_private;
1343 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1344 struct mlx5_rxq_ctrl *rxq_ctrl =
1345 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1346 struct ibv_wq_attr mod;
1348 unsigned int wqe_n = 1 << rxq_data->elts_n;
1349 struct mlx5_rxq_obj *tmpl = NULL;
1350 struct mlx5dv_cq cq_info;
1351 struct mlx5dv_rwq rwq;
1353 struct mlx5dv_obj obj;
1355 MLX5_ASSERT(rxq_data);
1356 MLX5_ASSERT(!rxq_ctrl->obj);
1357 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1358 return mlx5_rxq_obj_hairpin_new(dev, idx);
1359 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1360 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1361 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1365 "port %u Rx queue %u cannot allocate verbs resources",
1366 dev->data->port_id, rxq_data->idx);
1371 tmpl->rxq_ctrl = rxq_ctrl;
1372 if (rxq_ctrl->irq) {
1373 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1374 if (!tmpl->channel) {
1375 DRV_LOG(ERR, "port %u: comp channel creation failure",
1376 dev->data->port_id);
1381 if (mlx5_rxq_mprq_enabled(rxq_data))
1382 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1385 tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1387 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1388 dev->data->port_id, idx);
1392 obj.cq.in = tmpl->cq;
1393 obj.cq.out = &cq_info;
1394 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1399 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1401 "port %u wrong MLX5_CQE_SIZE environment variable"
1402 " value: it should be set to %u",
1403 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1407 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1408 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
1409 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1410 dev->data->port_id, priv->sh->device_attr.max_sge);
1411 /* Allocate door-bell for types created with DevX. */
1412 if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1413 struct mlx5_devx_dbr_page *dbr_page;
1416 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
1420 rxq_ctrl->dbr_offset = dbr_offset;
1421 rxq_ctrl->dbr_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
1422 rxq_ctrl->dbr_umem_id_valid = 1;
1423 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1424 (uintptr_t)rxq_ctrl->dbr_offset);
1426 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1427 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1430 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1431 dev->data->port_id, idx);
1435 /* Change queue state to ready. */
1436 mod = (struct ibv_wq_attr){
1437 .attr_mask = IBV_WQ_ATTR_STATE,
1438 .wq_state = IBV_WQS_RDY,
1440 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1443 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1444 " failed", dev->data->port_id, idx);
1448 obj.rwq.in = tmpl->wq;
1450 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1455 rxq_data->wqes = rwq.buf;
1456 rxq_data->rq_db = rwq.dbrec;
1457 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1458 struct mlx5_devx_modify_rq_attr rq_attr;
1460 memset(&rq_attr, 0, sizeof(rq_attr));
1461 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1463 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1464 dev->data->port_id, idx);
1468 /* Change queue state to ready. */
1469 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1470 rq_attr.state = MLX5_RQC_STATE_RDY;
1471 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1475 /* Fill the rings. */
1476 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1477 rxq_data->cq_db = cq_info.dbrec;
1478 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1479 rxq_data->cq_uar = cq_info.cq_uar;
1480 rxq_data->cqn = cq_info.cqn;
1481 rxq_data->cq_arm_sn = 0;
1482 mlx5_rxq_initialize(rxq_data);
1483 rxq_data->cq_ci = 0;
1484 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1485 idx, (void *)&tmpl);
1486 rte_atomic32_inc(&tmpl->refcnt);
1487 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1488 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1492 ret = rte_errno; /* Save rte_errno before cleanup. */
1493 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1494 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1495 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1496 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1498 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1500 claim_zero(mlx5_glue->destroy_comp_channel
1503 rte_errno = ret; /* Restore rte_errno. */
1505 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1506 rxq_release_rq_resources(rxq_ctrl);
1507 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1512 * Verify the Rx queue objects list is empty
1515 * Pointer to Ethernet device.
1518 * The number of objects not released.
1521 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1523 struct mlx5_priv *priv = dev->data->dev_private;
1525 struct mlx5_rxq_obj *rxq_obj;
1527 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1528 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1529 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1536 * Callback function to initialize mbufs for Multi-Packet RQ.
1539 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1540 void *_m, unsigned int i __rte_unused)
1542 struct mlx5_mprq_buf *buf = _m;
1543 struct rte_mbuf_ext_shared_info *shinfo;
1544 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1547 memset(_m, 0, sizeof(*buf));
1549 rte_atomic16_set(&buf->refcnt, 1);
1550 for (j = 0; j != strd_n; ++j) {
1551 shinfo = &buf->shinfos[j];
1552 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1553 shinfo->fcb_opaque = buf;
1558 * Free mempool of Multi-Packet RQ.
1561 * Pointer to Ethernet device.
1564 * 0 on success, negative errno value on failure.
1567 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1569 struct mlx5_priv *priv = dev->data->dev_private;
1570 struct rte_mempool *mp = priv->mprq_mp;
1575 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1576 dev->data->port_id, mp->name);
1578 * If a buffer in the pool has been externally attached to a mbuf and it
1579 * is still in use by application, destroying the Rx queue can spoil
1580 * the packet. It is unlikely to happen but if application dynamically
1581 * creates and destroys with holding Rx packets, this can happen.
1583 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1584 * RQ isn't provided by application but managed by PMD.
1586 if (!rte_mempool_full(mp)) {
1588 "port %u mempool for Multi-Packet RQ is still in use",
1589 dev->data->port_id);
1593 rte_mempool_free(mp);
1594 /* Unset mempool for each Rx queue. */
1595 for (i = 0; i != priv->rxqs_n; ++i) {
1596 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1600 rxq->mprq_mp = NULL;
1602 priv->mprq_mp = NULL;
1607 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1608 * mempool. If already allocated, reuse it if there're enough elements.
1609 * Otherwise, resize it.
1612 * Pointer to Ethernet device.
1615 * 0 on success, negative errno value on failure.
1618 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1620 struct mlx5_priv *priv = dev->data->dev_private;
1621 struct rte_mempool *mp = priv->mprq_mp;
1622 char name[RTE_MEMPOOL_NAMESIZE];
1623 unsigned int desc = 0;
1624 unsigned int buf_len;
1625 unsigned int obj_num;
1626 unsigned int obj_size;
1627 unsigned int strd_num_n = 0;
1628 unsigned int strd_sz_n = 0;
1630 unsigned int n_ibv = 0;
1632 if (!mlx5_mprq_enabled(dev))
1634 /* Count the total number of descriptors configured. */
1635 for (i = 0; i != priv->rxqs_n; ++i) {
1636 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1637 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1638 (rxq, struct mlx5_rxq_ctrl, rxq);
1640 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1643 desc += 1 << rxq->elts_n;
1644 /* Get the max number of strides. */
1645 if (strd_num_n < rxq->strd_num_n)
1646 strd_num_n = rxq->strd_num_n;
1647 /* Get the max size of a stride. */
1648 if (strd_sz_n < rxq->strd_sz_n)
1649 strd_sz_n = rxq->strd_sz_n;
1651 MLX5_ASSERT(strd_num_n && strd_sz_n);
1652 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1653 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1654 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1656 * Received packets can be either memcpy'd or externally referenced. In
1657 * case that the packet is attached to an mbuf as an external buffer, as
1658 * it isn't possible to predict how the buffers will be queued by
1659 * application, there's no option to exactly pre-allocate needed buffers
1660 * in advance but to speculatively prepares enough buffers.
1662 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1663 * received packets to buffers provided by application (rxq->mp) until
1664 * this Mempool gets available again.
1667 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1669 * rte_mempool_create_empty() has sanity check to refuse large cache
1670 * size compared to the number of elements.
1671 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1672 * constant number 2 instead.
1674 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1675 /* Check a mempool is already allocated and if it can be resued. */
1676 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1677 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1678 dev->data->port_id, mp->name);
1681 } else if (mp != NULL) {
1682 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1683 dev->data->port_id, mp->name);
1685 * If failed to free, which means it may be still in use, no way
1686 * but to keep using the existing one. On buffer underrun,
1687 * packets will be memcpy'd instead of external buffer
1690 if (mlx5_mprq_free_mp(dev)) {
1691 if (mp->elt_size >= obj_size)
1697 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1698 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1699 0, NULL, NULL, mlx5_mprq_buf_init,
1700 (void *)(uintptr_t)(1 << strd_num_n),
1701 dev->device->numa_node, 0);
1704 "port %u failed to allocate a mempool for"
1705 " Multi-Packet RQ, count=%u, size=%u",
1706 dev->data->port_id, obj_num, obj_size);
1712 /* Set mempool for each Rx queue. */
1713 for (i = 0; i != priv->rxqs_n; ++i) {
1714 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1715 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1716 (rxq, struct mlx5_rxq_ctrl, rxq);
1718 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1722 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1723 dev->data->port_id);
1727 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1728 sizeof(struct rte_vlan_hdr) * 2 + \
1729 sizeof(struct rte_ipv6_hdr)))
1730 #define MAX_TCP_OPTION_SIZE 40u
1731 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1732 sizeof(struct rte_tcp_hdr) + \
1733 MAX_TCP_OPTION_SIZE))
1736 * Adjust the maximum LRO massage size.
1739 * Pointer to Ethernet device.
1742 * @param max_lro_size
1743 * The maximum size for LRO packet.
1746 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1747 uint32_t max_lro_size)
1749 struct mlx5_priv *priv = dev->data->dev_private;
1751 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1752 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1753 MLX5_MAX_TCP_HDR_OFFSET)
1754 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1755 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1756 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1757 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1758 if (priv->max_lro_msg_size)
1759 priv->max_lro_msg_size =
1760 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1762 priv->max_lro_msg_size = max_lro_size;
1764 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1765 dev->data->port_id, idx,
1766 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1770 * Create a DPDK Rx queue.
1773 * Pointer to Ethernet device.
1777 * Number of descriptors to configure in queue.
1779 * NUMA socket on which memory must be allocated.
1782 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1784 struct mlx5_rxq_ctrl *
1785 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1786 unsigned int socket, const struct rte_eth_rxconf *conf,
1787 struct rte_mempool *mp)
1789 struct mlx5_priv *priv = dev->data->dev_private;
1790 struct mlx5_rxq_ctrl *tmpl;
1791 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1792 unsigned int mprq_stride_nums;
1793 unsigned int mprq_stride_size;
1794 unsigned int mprq_stride_cap;
1795 struct mlx5_dev_config *config = &priv->config;
1797 * Always allocate extra slots, even if eventually
1798 * the vector Rx will not be used.
1801 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1802 uint64_t offloads = conf->offloads |
1803 dev->data->dev_conf.rxmode.offloads;
1804 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1805 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1806 unsigned int max_rx_pkt_len = lro_on_queue ?
1807 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1808 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1809 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1810 RTE_PKTMBUF_HEADROOM;
1811 unsigned int max_lro_size = 0;
1812 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1814 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1815 DEV_RX_OFFLOAD_SCATTER)) {
1816 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1817 " configured and no enough mbuf space(%u) to contain "
1818 "the maximum RX packet length(%u) with head-room(%u)",
1819 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1820 RTE_PKTMBUF_HEADROOM);
1824 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1825 desc_n * sizeof(struct rte_mbuf *), 0, socket);
1830 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1831 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1832 MLX5_MR_BTREE_CACHE_N, socket)) {
1833 /* rte_errno is already set. */
1836 tmpl->socket = socket;
1837 if (dev->data->dev_conf.intr_conf.rxq)
1839 mprq_stride_nums = config->mprq.stride_num_n ?
1840 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1841 mprq_stride_size = non_scatter_min_mbuf_size <=
1842 (1U << config->mprq.max_stride_size_n) ?
1843 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1844 mprq_stride_cap = (config->mprq.stride_num_n ?
1845 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1846 (config->mprq.stride_size_n ?
1847 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1849 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1850 * following conditions are met:
1851 * - MPRQ is enabled.
1852 * - The number of descs is more than the number of strides.
1853 * - max_rx_pkt_len plus overhead is less than the max size
1854 * of a stride or mprq_stride_size is specified by a user.
1855 * Need to nake sure that there are enough stides to encap
1856 * the maximum packet size in case mprq_stride_size is set.
1857 * Otherwise, enable Rx scatter if necessary.
1859 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1860 (non_scatter_min_mbuf_size <=
1861 (1U << config->mprq.max_stride_size_n) ||
1862 (config->mprq.stride_size_n &&
1863 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1864 /* TODO: Rx scatter isn't supported yet. */
1865 tmpl->rxq.sges_n = 0;
1866 /* Trim the number of descs needed. */
1867 desc >>= mprq_stride_nums;
1868 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1869 config->mprq.stride_num_n : mprq_stride_nums;
1870 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1871 config->mprq.stride_size_n : mprq_stride_size;
1872 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1873 tmpl->rxq.strd_scatter_en =
1874 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1875 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1876 config->mprq.max_memcpy_len);
1877 max_lro_size = RTE_MIN(max_rx_pkt_len,
1878 (1u << tmpl->rxq.strd_num_n) *
1879 (1u << tmpl->rxq.strd_sz_n));
1881 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1882 " strd_num_n = %u, strd_sz_n = %u",
1883 dev->data->port_id, idx,
1884 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1885 } else if (max_rx_pkt_len <= first_mb_free_size) {
1886 tmpl->rxq.sges_n = 0;
1887 max_lro_size = max_rx_pkt_len;
1888 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1889 unsigned int size = non_scatter_min_mbuf_size;
1890 unsigned int sges_n;
1892 if (lro_on_queue && first_mb_free_size <
1893 MLX5_MAX_LRO_HEADER_FIX) {
1894 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1895 " to include the max header size(%u) for LRO",
1896 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1897 rte_errno = ENOTSUP;
1901 * Determine the number of SGEs needed for a full packet
1902 * and round it to the next power of two.
1904 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1905 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1907 "port %u too many SGEs (%u) needed to handle"
1908 " requested maximum packet size %u, the maximum"
1909 " supported are %u", dev->data->port_id,
1910 1 << sges_n, max_rx_pkt_len,
1911 1u << MLX5_MAX_LOG_RQ_SEGS);
1912 rte_errno = ENOTSUP;
1915 tmpl->rxq.sges_n = sges_n;
1916 max_lro_size = max_rx_pkt_len;
1918 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1920 "port %u MPRQ is requested but cannot be enabled\n"
1921 " (requested: pkt_sz = %u, desc_num = %u,"
1922 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1923 " supported: min_rxqs_num = %u,"
1924 " min_stride_sz = %u, max_stride_sz = %u).",
1925 dev->data->port_id, non_scatter_min_mbuf_size,
1927 config->mprq.stride_size_n ?
1928 (1U << config->mprq.stride_size_n) :
1929 (1U << mprq_stride_size),
1930 config->mprq.stride_num_n ?
1931 (1U << config->mprq.stride_num_n) :
1932 (1U << mprq_stride_nums),
1933 config->mprq.min_rxqs_num,
1934 (1U << config->mprq.min_stride_size_n),
1935 (1U << config->mprq.max_stride_size_n));
1936 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1937 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1938 if (desc % (1 << tmpl->rxq.sges_n)) {
1940 "port %u number of Rx queue descriptors (%u) is not a"
1941 " multiple of SGEs per packet (%u)",
1944 1 << tmpl->rxq.sges_n);
1948 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1949 /* Toggle RX checksum offload if hardware supports it. */
1950 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1951 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1952 /* Configure VLAN stripping. */
1953 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1954 /* By default, FCS (CRC) is stripped by hardware. */
1955 tmpl->rxq.crc_present = 0;
1956 tmpl->rxq.lro = lro_on_queue;
1957 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1958 if (config->hw_fcs_strip) {
1960 * RQs used for LRO-enabled TIRs should not be
1961 * configured to scatter the FCS.
1965 "port %u CRC stripping has been "
1966 "disabled but will still be performed "
1967 "by hardware, because LRO is enabled",
1968 dev->data->port_id);
1970 tmpl->rxq.crc_present = 1;
1973 "port %u CRC stripping has been disabled but will"
1974 " still be performed by hardware, make sure MLNX_OFED"
1975 " and firmware are up to date",
1976 dev->data->port_id);
1980 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1981 " incoming frames to hide it",
1983 tmpl->rxq.crc_present ? "disabled" : "enabled",
1984 tmpl->rxq.crc_present << 2);
1986 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1987 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1988 tmpl->rxq.port_id = dev->data->port_id;
1991 tmpl->rxq.elts_n = log2above(desc);
1992 tmpl->rxq.rq_repl_thresh =
1993 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1995 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1997 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1999 tmpl->rxq.idx = idx;
2000 rte_atomic32_inc(&tmpl->refcnt);
2001 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2009 * Create a DPDK Rx hairpin queue.
2012 * Pointer to Ethernet device.
2016 * Number of descriptors to configure in queue.
2017 * @param hairpin_conf
2018 * The hairpin binding configuration.
2021 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2023 struct mlx5_rxq_ctrl *
2024 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2025 const struct rte_eth_hairpin_conf *hairpin_conf)
2027 struct mlx5_priv *priv = dev->data->dev_private;
2028 struct mlx5_rxq_ctrl *tmpl;
2030 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
2036 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
2037 tmpl->socket = SOCKET_ID_ANY;
2038 tmpl->rxq.rss_hash = 0;
2039 tmpl->rxq.port_id = dev->data->port_id;
2041 tmpl->rxq.mp = NULL;
2042 tmpl->rxq.elts_n = log2above(desc);
2043 tmpl->rxq.elts = NULL;
2044 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2045 tmpl->hairpin_conf = *hairpin_conf;
2046 tmpl->rxq.idx = idx;
2047 rte_atomic32_inc(&tmpl->refcnt);
2048 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2056 * Pointer to Ethernet device.
2061 * A pointer to the queue if it exists, NULL otherwise.
2063 struct mlx5_rxq_ctrl *
2064 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2066 struct mlx5_priv *priv = dev->data->dev_private;
2067 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2069 if ((*priv->rxqs)[idx]) {
2070 rxq_ctrl = container_of((*priv->rxqs)[idx],
2071 struct mlx5_rxq_ctrl,
2073 mlx5_rxq_obj_get(dev, idx);
2074 rte_atomic32_inc(&rxq_ctrl->refcnt);
2080 * Release a Rx queue.
2083 * Pointer to Ethernet device.
2088 * 1 while a reference on it exists, 0 when freed.
2091 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2093 struct mlx5_priv *priv = dev->data->dev_private;
2094 struct mlx5_rxq_ctrl *rxq_ctrl;
2096 if (!(*priv->rxqs)[idx])
2098 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2099 MLX5_ASSERT(rxq_ctrl->priv);
2100 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2101 rxq_ctrl->obj = NULL;
2102 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2103 if (rxq_ctrl->dbr_umem_id_valid)
2104 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
2105 rxq_ctrl->dbr_umem_id,
2106 rxq_ctrl->dbr_offset));
2107 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2108 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2109 LIST_REMOVE(rxq_ctrl, next);
2110 mlx5_free(rxq_ctrl);
2111 (*priv->rxqs)[idx] = NULL;
2118 * Verify the Rx Queue list is empty
2121 * Pointer to Ethernet device.
2124 * The number of object not released.
2127 mlx5_rxq_verify(struct rte_eth_dev *dev)
2129 struct mlx5_priv *priv = dev->data->dev_private;
2130 struct mlx5_rxq_ctrl *rxq_ctrl;
2133 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2134 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2135 dev->data->port_id, rxq_ctrl->rxq.idx);
2142 * Get a Rx queue type.
2145 * Pointer to Ethernet device.
2150 * The Rx queue type.
2153 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2155 struct mlx5_priv *priv = dev->data->dev_private;
2156 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2158 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
2159 rxq_ctrl = container_of((*priv->rxqs)[idx],
2160 struct mlx5_rxq_ctrl,
2162 return rxq_ctrl->type;
2164 return MLX5_RXQ_TYPE_UNDEFINED;
2168 * Create an indirection table.
2171 * Pointer to Ethernet device.
2173 * Queues entering in the indirection table.
2175 * Number of queues in the array.
2178 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2180 static struct mlx5_ind_table_obj *
2181 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2182 uint32_t queues_n, enum mlx5_ind_tbl_type type)
2184 struct mlx5_priv *priv = dev->data->dev_private;
2185 struct mlx5_ind_table_obj *ind_tbl;
2186 unsigned int i = 0, j = 0, k = 0;
2188 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2189 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2194 ind_tbl->type = type;
2195 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2196 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2197 log2above(queues_n) :
2198 log2above(priv->config.ind_table_max_size);
2199 struct ibv_wq *wq[1 << wq_n];
2201 for (i = 0; i != queues_n; ++i) {
2202 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2206 wq[i] = rxq->obj->wq;
2207 ind_tbl->queues[i] = queues[i];
2209 ind_tbl->queues_n = queues_n;
2210 /* Finalise indirection table. */
2211 k = i; /* Retain value of i for use in error case. */
2212 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2214 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2216 &(struct ibv_rwq_ind_table_init_attr){
2217 .log_ind_tbl_size = wq_n,
2221 if (!ind_tbl->ind_table) {
2225 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2226 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2227 const unsigned int rqt_n =
2228 1 << (rte_is_power_of_2(queues_n) ?
2229 log2above(queues_n) :
2230 log2above(priv->config.ind_table_max_size));
2232 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
2233 rqt_n * sizeof(uint32_t), 0,
2236 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2237 dev->data->port_id);
2241 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2242 rqt_attr->rqt_actual_size = rqt_n;
2243 for (i = 0; i != queues_n; ++i) {
2244 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2248 rqt_attr->rq_list[i] = rxq->obj->rq->id;
2249 ind_tbl->queues[i] = queues[i];
2251 k = i; /* Retain value of i for use in error case. */
2252 for (j = 0; k != rqt_n; ++k, ++j)
2253 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2254 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2256 mlx5_free(rqt_attr);
2257 if (!ind_tbl->rqt) {
2258 DRV_LOG(ERR, "port %u cannot create DevX RQT",
2259 dev->data->port_id);
2263 ind_tbl->queues_n = queues_n;
2265 rte_atomic32_inc(&ind_tbl->refcnt);
2266 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2269 for (j = 0; j < i; j++)
2270 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2272 DEBUG("port %u cannot create indirection table", dev->data->port_id);
2277 * Get an indirection table.
2280 * Pointer to Ethernet device.
2282 * Queues entering in the indirection table.
2284 * Number of queues in the array.
2287 * An indirection table if found.
2289 static struct mlx5_ind_table_obj *
2290 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2293 struct mlx5_priv *priv = dev->data->dev_private;
2294 struct mlx5_ind_table_obj *ind_tbl;
2296 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2297 if ((ind_tbl->queues_n == queues_n) &&
2298 (memcmp(ind_tbl->queues, queues,
2299 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2306 rte_atomic32_inc(&ind_tbl->refcnt);
2307 for (i = 0; i != ind_tbl->queues_n; ++i)
2308 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2314 * Release an indirection table.
2317 * Pointer to Ethernet device.
2319 * Indirection table to release.
2322 * 1 while a reference on it exists, 0 when freed.
2325 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2326 struct mlx5_ind_table_obj *ind_tbl)
2330 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2331 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2332 claim_zero(mlx5_glue->destroy_rwq_ind_table
2333 (ind_tbl->ind_table));
2334 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2335 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2337 for (i = 0; i != ind_tbl->queues_n; ++i)
2338 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2339 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2340 LIST_REMOVE(ind_tbl, next);
2348 * Verify the Rx Queue list is empty
2351 * Pointer to Ethernet device.
2354 * The number of object not released.
2357 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2359 struct mlx5_priv *priv = dev->data->dev_private;
2360 struct mlx5_ind_table_obj *ind_tbl;
2363 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2365 "port %u indirection table obj %p still referenced",
2366 dev->data->port_id, (void *)ind_tbl);
2373 * Create an Rx Hash queue.
2376 * Pointer to Ethernet device.
2378 * RSS key for the Rx hash queue.
2379 * @param rss_key_len
2381 * @param hash_fields
2382 * Verbs protocol hash field to make the RSS on.
2384 * Queues entering in hash queue. In case of empty hash_fields only the
2385 * first queue index will be taken for the indirection table.
2392 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
2395 mlx5_hrxq_new(struct rte_eth_dev *dev,
2396 const uint8_t *rss_key, uint32_t rss_key_len,
2397 uint64_t hash_fields,
2398 const uint16_t *queues, uint32_t queues_n,
2399 int tunnel __rte_unused)
2401 struct mlx5_priv *priv = dev->data->dev_private;
2402 struct mlx5_hrxq *hrxq;
2403 uint32_t hrxq_idx = 0;
2404 struct ibv_qp *qp = NULL;
2405 struct mlx5_ind_table_obj *ind_tbl;
2407 struct mlx5_devx_obj *tir = NULL;
2408 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2409 struct mlx5_rxq_ctrl *rxq_ctrl =
2410 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2412 queues_n = hash_fields ? queues_n : 1;
2413 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2415 enum mlx5_ind_tbl_type type;
2417 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2418 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2419 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2425 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2426 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2427 struct mlx5dv_qp_init_attr qp_init_attr;
2429 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2431 qp_init_attr.comp_mask =
2432 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2433 qp_init_attr.create_flags =
2434 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2436 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2437 if (dev->data->dev_conf.lpbk_mode) {
2439 * Allow packet sent from NIC loop back
2440 * w/o source MAC check.
2442 qp_init_attr.comp_mask |=
2443 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2444 qp_init_attr.create_flags |=
2445 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2448 qp = mlx5_glue->dv_create_qp
2450 &(struct ibv_qp_init_attr_ex){
2451 .qp_type = IBV_QPT_RAW_PACKET,
2453 IBV_QP_INIT_ATTR_PD |
2454 IBV_QP_INIT_ATTR_IND_TABLE |
2455 IBV_QP_INIT_ATTR_RX_HASH,
2456 .rx_hash_conf = (struct ibv_rx_hash_conf){
2458 IBV_RX_HASH_FUNC_TOEPLITZ,
2459 .rx_hash_key_len = rss_key_len,
2461 (void *)(uintptr_t)rss_key,
2462 .rx_hash_fields_mask = hash_fields,
2464 .rwq_ind_tbl = ind_tbl->ind_table,
2469 qp = mlx5_glue->create_qp_ex
2471 &(struct ibv_qp_init_attr_ex){
2472 .qp_type = IBV_QPT_RAW_PACKET,
2474 IBV_QP_INIT_ATTR_PD |
2475 IBV_QP_INIT_ATTR_IND_TABLE |
2476 IBV_QP_INIT_ATTR_RX_HASH,
2477 .rx_hash_conf = (struct ibv_rx_hash_conf){
2479 IBV_RX_HASH_FUNC_TOEPLITZ,
2480 .rx_hash_key_len = rss_key_len,
2482 (void *)(uintptr_t)rss_key,
2483 .rx_hash_fields_mask = hash_fields,
2485 .rwq_ind_tbl = ind_tbl->ind_table,
2493 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2494 struct mlx5_devx_tir_attr tir_attr;
2498 /* Enable TIR LRO only if all the queues were configured for. */
2499 for (i = 0; i < queues_n; ++i) {
2500 if (!(*priv->rxqs)[queues[i]]->lro) {
2505 memset(&tir_attr, 0, sizeof(tir_attr));
2506 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2507 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2508 tir_attr.tunneled_offload_en = !!tunnel;
2509 /* If needed, translate hash_fields bitmap to PRM format. */
2511 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2512 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2513 hash_fields & IBV_RX_HASH_INNER ?
2514 &tir_attr.rx_hash_field_selector_inner :
2515 &tir_attr.rx_hash_field_selector_outer;
2517 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2518 &tir_attr.rx_hash_field_selector_outer;
2521 /* 1 bit: 0: IPv4, 1: IPv6. */
2522 rx_hash_field_select->l3_prot_type =
2523 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2524 /* 1 bit: 0: TCP, 1: UDP. */
2525 rx_hash_field_select->l4_prot_type =
2526 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2527 /* Bitmask which sets which fields to use in RX Hash. */
2528 rx_hash_field_select->selected_fields =
2529 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2530 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2531 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2532 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2533 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2534 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2535 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2536 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2538 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2539 tir_attr.transport_domain = priv->sh->td->id;
2541 tir_attr.transport_domain = priv->sh->tdn;
2542 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
2543 MLX5_RSS_HASH_KEY_LEN);
2544 tir_attr.indirect_table = ind_tbl->rqt->id;
2545 if (dev->data->dev_conf.lpbk_mode)
2546 tir_attr.self_lb_block =
2547 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2549 tir_attr.lro_timeout_period_usecs =
2550 priv->config.lro.timeout;
2551 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2552 tir_attr.lro_enable_mask =
2553 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2554 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2556 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2558 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2559 dev->data->port_id);
2564 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2567 hrxq->ind_table = ind_tbl;
2568 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2570 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2572 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2573 if (!hrxq->action) {
2578 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2580 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2581 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2583 if (!hrxq->action) {
2589 hrxq->rss_key_len = rss_key_len;
2590 hrxq->hash_fields = hash_fields;
2591 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2592 rte_atomic32_inc(&hrxq->refcnt);
2593 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2597 err = rte_errno; /* Save rte_errno before cleanup. */
2598 mlx5_ind_table_obj_release(dev, ind_tbl);
2600 claim_zero(mlx5_glue->destroy_qp(qp));
2602 claim_zero(mlx5_devx_cmd_destroy(tir));
2603 rte_errno = err; /* Restore rte_errno. */
2608 * Get an Rx Hash queue.
2611 * Pointer to Ethernet device.
2613 * RSS configuration for the Rx hash queue.
2615 * Queues entering in hash queue. In case of empty hash_fields only the
2616 * first queue index will be taken for the indirection table.
2621 * An hash Rx queue index on success.
2624 mlx5_hrxq_get(struct rte_eth_dev *dev,
2625 const uint8_t *rss_key, uint32_t rss_key_len,
2626 uint64_t hash_fields,
2627 const uint16_t *queues, uint32_t queues_n)
2629 struct mlx5_priv *priv = dev->data->dev_private;
2630 struct mlx5_hrxq *hrxq;
2633 queues_n = hash_fields ? queues_n : 1;
2634 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2636 struct mlx5_ind_table_obj *ind_tbl;
2638 if (hrxq->rss_key_len != rss_key_len)
2640 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2642 if (hrxq->hash_fields != hash_fields)
2644 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2647 if (ind_tbl != hrxq->ind_table) {
2648 mlx5_ind_table_obj_release(dev, ind_tbl);
2651 rte_atomic32_inc(&hrxq->refcnt);
2658 * Release the hash Rx queue.
2661 * Pointer to Ethernet device.
2663 * Index to Hash Rx queue to release.
2666 * 1 while a reference on it exists, 0 when freed.
2669 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2671 struct mlx5_priv *priv = dev->data->dev_private;
2672 struct mlx5_hrxq *hrxq;
2674 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2677 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2678 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2679 mlx5_glue->destroy_flow_action(hrxq->action);
2681 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2682 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2683 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2684 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2685 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2686 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
2687 hrxq_idx, hrxq, next);
2688 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2691 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2696 * Verify the Rx Queue list is empty
2699 * Pointer to Ethernet device.
2702 * The number of object not released.
2705 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2707 struct mlx5_priv *priv = dev->data->dev_private;
2708 struct mlx5_hrxq *hrxq;
2712 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2715 "port %u hash Rx queue %p still referenced",
2716 dev->data->port_id, (void *)hrxq);
2723 * Create a drop Rx queue Verbs/DevX object.
2726 * Pointer to Ethernet device.
2729 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2731 static struct mlx5_rxq_obj *
2732 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2734 struct mlx5_priv *priv = dev->data->dev_private;
2735 struct ibv_context *ctx = priv->sh->ctx;
2737 struct ibv_wq *wq = NULL;
2738 struct mlx5_rxq_obj *rxq;
2740 if (priv->drop_queue.rxq)
2741 return priv->drop_queue.rxq;
2742 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2744 DEBUG("port %u cannot allocate CQ for drop queue",
2745 dev->data->port_id);
2749 wq = mlx5_glue->create_wq(ctx,
2750 &(struct ibv_wq_init_attr){
2751 .wq_type = IBV_WQT_RQ,
2758 DEBUG("port %u cannot allocate WQ for drop queue",
2759 dev->data->port_id);
2763 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
2765 DEBUG("port %u cannot allocate drop Rx queue memory",
2766 dev->data->port_id);
2772 priv->drop_queue.rxq = rxq;
2776 claim_zero(mlx5_glue->destroy_wq(wq));
2778 claim_zero(mlx5_glue->destroy_cq(cq));
2783 * Release a drop Rx queue Verbs/DevX object.
2786 * Pointer to Ethernet device.
2789 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2792 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2794 struct mlx5_priv *priv = dev->data->dev_private;
2795 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2798 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2800 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2802 priv->drop_queue.rxq = NULL;
2806 * Create a drop indirection table.
2809 * Pointer to Ethernet device.
2812 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2814 static struct mlx5_ind_table_obj *
2815 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2817 struct mlx5_priv *priv = dev->data->dev_private;
2818 struct mlx5_ind_table_obj *ind_tbl;
2819 struct mlx5_rxq_obj *rxq;
2820 struct mlx5_ind_table_obj tmpl;
2822 rxq = mlx5_rxq_obj_drop_new(dev);
2825 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2827 &(struct ibv_rwq_ind_table_init_attr){
2828 .log_ind_tbl_size = 0,
2829 .ind_tbl = &rxq->wq,
2832 if (!tmpl.ind_table) {
2833 DEBUG("port %u cannot allocate indirection table for drop"
2835 dev->data->port_id);
2839 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
2845 ind_tbl->ind_table = tmpl.ind_table;
2848 mlx5_rxq_obj_drop_release(dev);
2853 * Release a drop indirection table.
2856 * Pointer to Ethernet device.
2859 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2861 struct mlx5_priv *priv = dev->data->dev_private;
2862 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2864 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2865 mlx5_rxq_obj_drop_release(dev);
2867 priv->drop_queue.hrxq->ind_table = NULL;
2871 * Create a drop Rx Hash queue.
2874 * Pointer to Ethernet device.
2877 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2880 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2882 struct mlx5_priv *priv = dev->data->dev_private;
2883 struct mlx5_ind_table_obj *ind_tbl = NULL;
2884 struct ibv_qp *qp = NULL;
2885 struct mlx5_hrxq *hrxq = NULL;
2887 if (priv->drop_queue.hrxq) {
2888 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2889 return priv->drop_queue.hrxq;
2891 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2894 "port %u cannot allocate memory for drop queue",
2895 dev->data->port_id);
2899 priv->drop_queue.hrxq = hrxq;
2900 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2903 hrxq->ind_table = ind_tbl;
2904 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2905 &(struct ibv_qp_init_attr_ex){
2906 .qp_type = IBV_QPT_RAW_PACKET,
2908 IBV_QP_INIT_ATTR_PD |
2909 IBV_QP_INIT_ATTR_IND_TABLE |
2910 IBV_QP_INIT_ATTR_RX_HASH,
2911 .rx_hash_conf = (struct ibv_rx_hash_conf){
2913 IBV_RX_HASH_FUNC_TOEPLITZ,
2914 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2915 .rx_hash_key = rss_hash_default_key,
2916 .rx_hash_fields_mask = 0,
2918 .rwq_ind_tbl = ind_tbl->ind_table,
2922 DEBUG("port %u cannot allocate QP for drop queue",
2923 dev->data->port_id);
2928 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2929 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2930 if (!hrxq->action) {
2935 rte_atomic32_set(&hrxq->refcnt, 1);
2938 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2939 if (hrxq && hrxq->action)
2940 mlx5_glue->destroy_flow_action(hrxq->action);
2943 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2945 mlx5_ind_table_obj_drop_release(dev);
2947 priv->drop_queue.hrxq = NULL;
2954 * Release a drop hash Rx queue.
2957 * Pointer to Ethernet device.
2960 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2962 struct mlx5_priv *priv = dev->data->dev_private;
2963 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2965 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2966 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2967 mlx5_glue->destroy_flow_action(hrxq->action);
2969 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2970 mlx5_ind_table_obj_drop_release(dev);
2972 priv->drop_queue.hrxq = NULL;
2978 * Set the Rx queue timestamp conversion parameters
2981 * Pointer to the Ethernet device structure.
2984 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2986 struct mlx5_priv *priv = dev->data->dev_private;
2987 struct mlx5_dev_ctx_shared *sh = priv->sh;
2988 struct mlx5_rxq_data *data;
2991 for (i = 0; i != priv->rxqs_n; ++i) {
2992 if (!(*priv->rxqs)[i])
2994 data = (*priv->rxqs)[i];
2996 data->rt_timestamp = priv->config.rt_timestamp;