1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_malloc.h>
26 #include "mlx5_defs.h"
28 #include "mlx5_common_os.h"
29 #include "mlx5_rxtx.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
32 #include "mlx5_flow.h"
35 /* Default RSS hash key also used for ConnectX-3. */
36 uint8_t rss_hash_default_key[] = {
37 0x2c, 0xc6, 0x81, 0xd1,
38 0x5b, 0xdb, 0xf4, 0xf7,
39 0xfc, 0xa2, 0x83, 0x19,
40 0xdb, 0x1a, 0x3e, 0x94,
41 0x6b, 0x9e, 0x38, 0xd9,
42 0x2c, 0x9c, 0x03, 0xd1,
43 0xad, 0x99, 0x44, 0xa7,
44 0xd9, 0x56, 0x3d, 0x59,
45 0x06, 0x3c, 0x25, 0xf3,
46 0xfc, 0x1f, 0xdc, 0x2a,
49 /* Length of the default RSS hash key. */
50 static_assert(MLX5_RSS_HASH_KEY_LEN ==
51 (unsigned int)sizeof(rss_hash_default_key),
52 "wrong RSS default key size.");
55 * Check whether Multi-Packet RQ can be enabled for the device.
58 * Pointer to Ethernet device.
61 * 1 if supported, negative errno value if not.
64 mlx5_check_mprq_support(struct rte_eth_dev *dev)
66 struct mlx5_priv *priv = dev->data->dev_private;
68 if (priv->config.mprq.enabled &&
69 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
75 * Check whether Multi-Packet RQ is enabled for the Rx queue.
78 * Pointer to receive queue structure.
81 * 0 if disabled, otherwise enabled.
84 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
86 return rxq->strd_num_n > 0;
90 * Check whether Multi-Packet RQ is enabled for the device.
93 * Pointer to Ethernet device.
96 * 0 if disabled, otherwise enabled.
99 mlx5_mprq_enabled(struct rte_eth_dev *dev)
101 struct mlx5_priv *priv = dev->data->dev_private;
106 if (mlx5_check_mprq_support(dev) < 0)
108 /* All the configured queues should be enabled. */
109 for (i = 0; i < priv->rxqs_n; ++i) {
110 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
111 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
112 (rxq, struct mlx5_rxq_ctrl, rxq);
114 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
117 if (mlx5_rxq_mprq_enabled(rxq))
120 /* Multi-Packet RQ can't be partially configured. */
121 MLX5_ASSERT(n == 0 || n == n_ibv);
126 * Allocate RX queue elements for Multi-Packet RQ.
129 * Pointer to RX queue structure.
132 * 0 on success, a negative errno value otherwise and rte_errno is set.
135 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
137 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
138 unsigned int wqe_n = 1 << rxq->elts_n;
142 /* Iterate on segments. */
143 for (i = 0; i <= wqe_n; ++i) {
144 struct mlx5_mprq_buf *buf;
146 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
147 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
152 (*rxq->mprq_bufs)[i] = buf;
154 rxq->mprq_repl = buf;
157 "port %u Rx queue %u allocated and configured %u segments",
158 rxq->port_id, rxq->idx, wqe_n);
161 err = rte_errno; /* Save rte_errno before cleanup. */
163 for (i = 0; (i != wqe_n); ++i) {
164 if ((*rxq->mprq_bufs)[i] != NULL)
165 rte_mempool_put(rxq->mprq_mp,
166 (*rxq->mprq_bufs)[i]);
167 (*rxq->mprq_bufs)[i] = NULL;
169 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
170 rxq->port_id, rxq->idx);
171 rte_errno = err; /* Restore rte_errno. */
176 * Allocate RX queue elements for Single-Packet RQ.
179 * Pointer to RX queue structure.
182 * 0 on success, errno value on failure.
185 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
187 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
188 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
192 /* Iterate on segments. */
193 for (i = 0; (i != elts_n); ++i) {
194 struct rte_mbuf *buf;
196 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
198 DRV_LOG(ERR, "port %u empty mbuf pool",
199 PORT_ID(rxq_ctrl->priv));
203 /* Headroom is reserved by rte_pktmbuf_alloc(). */
204 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
205 /* Buffer is supposed to be empty. */
206 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
207 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
208 MLX5_ASSERT(!buf->next);
209 /* Only the first segment keeps headroom. */
211 SET_DATA_OFF(buf, 0);
212 PORT(buf) = rxq_ctrl->rxq.port_id;
213 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
214 PKT_LEN(buf) = DATA_LEN(buf);
216 (*rxq_ctrl->rxq.elts)[i] = buf;
218 /* If Rx vector is activated. */
219 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
220 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
221 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
222 struct rte_pktmbuf_pool_private *priv =
223 (struct rte_pktmbuf_pool_private *)
224 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
227 /* Initialize default rearm_data for vPMD. */
228 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
229 rte_mbuf_refcnt_set(mbuf_init, 1);
230 mbuf_init->nb_segs = 1;
231 mbuf_init->port = rxq->port_id;
232 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
233 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
235 * prevent compiler reordering:
236 * rearm_data covers previous fields.
238 rte_compiler_barrier();
239 rxq->mbuf_initializer =
240 *(rte_xmm_t *)&mbuf_init->rearm_data;
241 /* Padding with a fake mbuf for vectorized Rx. */
242 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
243 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
246 "port %u Rx queue %u allocated and configured %u segments"
248 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
249 elts_n / (1 << rxq_ctrl->rxq.sges_n));
252 err = rte_errno; /* Save rte_errno before cleanup. */
254 for (i = 0; (i != elts_n); ++i) {
255 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
256 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
257 (*rxq_ctrl->rxq.elts)[i] = NULL;
259 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
260 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
261 rte_errno = err; /* Restore rte_errno. */
266 * Allocate RX queue elements.
269 * Pointer to RX queue structure.
272 * 0 on success, errno value on failure.
275 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
277 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
278 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
282 * Free RX queue elements for Multi-Packet RQ.
285 * Pointer to RX queue structure.
288 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
290 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
293 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
294 rxq->port_id, rxq->idx);
295 if (rxq->mprq_bufs == NULL)
297 MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
298 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
299 if ((*rxq->mprq_bufs)[i] != NULL)
300 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
301 (*rxq->mprq_bufs)[i] = NULL;
303 if (rxq->mprq_repl != NULL) {
304 mlx5_mprq_buf_free(rxq->mprq_repl);
305 rxq->mprq_repl = NULL;
310 * Free RX queue elements for Single-Packet RQ.
313 * Pointer to RX queue structure.
316 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
318 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
319 const uint16_t q_n = (1 << rxq->elts_n);
320 const uint16_t q_mask = q_n - 1;
321 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
324 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
325 PORT_ID(rxq_ctrl->priv), rxq->idx);
326 if (rxq->elts == NULL)
329 * Some mbuf in the Ring belongs to the application. They cannot be
332 if (mlx5_rxq_check_vec_support(rxq) > 0) {
333 for (i = 0; i < used; ++i)
334 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
335 rxq->rq_pi = rxq->rq_ci;
337 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
338 if ((*rxq->elts)[i] != NULL)
339 rte_pktmbuf_free_seg((*rxq->elts)[i]);
340 (*rxq->elts)[i] = NULL;
345 * Free RX queue elements.
348 * Pointer to RX queue structure.
351 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
353 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
354 rxq_free_elts_mprq(rxq_ctrl);
356 rxq_free_elts_sprq(rxq_ctrl);
360 * Returns the per-queue supported offloads.
363 * Pointer to Ethernet device.
366 * Supported Rx offloads.
369 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
371 struct mlx5_priv *priv = dev->data->dev_private;
372 struct mlx5_dev_config *config = &priv->config;
373 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
374 DEV_RX_OFFLOAD_TIMESTAMP |
375 DEV_RX_OFFLOAD_JUMBO_FRAME |
376 DEV_RX_OFFLOAD_RSS_HASH);
378 if (config->hw_fcs_strip)
379 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
382 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
383 DEV_RX_OFFLOAD_UDP_CKSUM |
384 DEV_RX_OFFLOAD_TCP_CKSUM);
385 if (config->hw_vlan_strip)
386 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
387 if (MLX5_LRO_SUPPORTED(dev))
388 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
394 * Returns the per-port supported offloads.
397 * Supported Rx offloads.
400 mlx5_get_rx_port_offloads(void)
402 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
408 * Verify if the queue can be released.
411 * Pointer to Ethernet device.
416 * 1 if the queue can be released
417 * 0 if the queue can not be released, there are references to it.
418 * Negative errno and rte_errno is set if queue doesn't exist.
421 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
423 struct mlx5_priv *priv = dev->data->dev_private;
424 struct mlx5_rxq_ctrl *rxq_ctrl;
426 if (!(*priv->rxqs)[idx]) {
430 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
431 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
434 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
436 rxq_sync_cq(struct mlx5_rxq_data *rxq)
438 const uint16_t cqe_n = 1 << rxq->cqe_n;
439 const uint16_t cqe_mask = cqe_n - 1;
440 volatile struct mlx5_cqe *cqe;
445 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
446 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
447 if (ret == MLX5_CQE_STATUS_HW_OWN)
449 if (ret == MLX5_CQE_STATUS_ERR) {
453 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
454 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
458 /* Compute the next non compressed CQE. */
459 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
462 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
463 for (i = 0; i < cqe_n; i++) {
464 cqe = &(*rxq->cqes)[i];
465 cqe->op_own = MLX5_CQE_INVALIDATE;
467 /* Resync CQE and WQE (WQ in RESET state). */
469 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
471 *rxq->rq_db = rte_cpu_to_be_32(0);
476 * Rx queue stop. Device queue goes to the RESET state,
477 * all involved mbufs are freed from WQ.
480 * Pointer to Ethernet device structure.
485 * 0 on success, a negative errno value otherwise and rte_errno is set.
488 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
490 struct mlx5_priv *priv = dev->data->dev_private;
491 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
492 struct mlx5_rxq_ctrl *rxq_ctrl =
493 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
496 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
497 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
498 struct ibv_wq_attr mod = {
499 .attr_mask = IBV_WQ_ATTR_STATE,
500 .wq_state = IBV_WQS_RESET,
503 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
504 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
505 struct mlx5_devx_modify_rq_attr rq_attr;
507 memset(&rq_attr, 0, sizeof(rq_attr));
508 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
509 rq_attr.state = MLX5_RQC_STATE_RST;
510 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
513 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
518 /* Remove all processes CQEs. */
520 /* Free all involved mbufs. */
521 rxq_free_elts(rxq_ctrl);
522 /* Set the actual queue state. */
523 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
528 * Rx queue stop. Device queue goes to the RESET state,
529 * all involved mbufs are freed from WQ.
532 * Pointer to Ethernet device structure.
537 * 0 on success, a negative errno value otherwise and rte_errno is set.
540 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
542 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
545 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
546 DRV_LOG(ERR, "Hairpin queue can't be stopped");
550 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
553 * Vectorized Rx burst requires the CQ and RQ indices
554 * synchronized, that might be broken on RQ restart
555 * and cause Rx malfunction, so queue stopping is
556 * not supported if vectorized Rx burst is engaged.
557 * The routine pointer depends on the process
558 * type, should perform check there.
560 if (pkt_burst == mlx5_rx_burst) {
561 DRV_LOG(ERR, "Rx queue stop is not supported "
562 "for vectorized Rx");
566 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
567 ret = mlx5_mp_os_req_queue_control(dev, idx,
568 MLX5_MP_REQ_QUEUE_RX_STOP);
570 ret = mlx5_rx_queue_stop_primary(dev, idx);
576 * Rx queue start. Device queue goes to the ready state,
577 * all required mbufs are allocated and WQ is replenished.
580 * Pointer to Ethernet device structure.
585 * 0 on success, a negative errno value otherwise and rte_errno is set.
588 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
590 struct mlx5_priv *priv = dev->data->dev_private;
591 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
592 struct mlx5_rxq_ctrl *rxq_ctrl =
593 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
596 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
597 /* Allocate needed buffers. */
598 ret = rxq_alloc_elts(rxq_ctrl);
600 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
605 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
607 /* Reset RQ consumer before moving queue to READY state. */
608 *rxq->rq_db = rte_cpu_to_be_32(0);
610 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
611 struct ibv_wq_attr mod = {
612 .attr_mask = IBV_WQ_ATTR_STATE,
613 .wq_state = IBV_WQS_RDY,
616 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
617 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
618 struct mlx5_devx_modify_rq_attr rq_attr;
620 memset(&rq_attr, 0, sizeof(rq_attr));
621 rq_attr.rq_state = MLX5_RQC_STATE_RST;
622 rq_attr.state = MLX5_RQC_STATE_RDY;
623 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
626 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
631 /* Reinitialize RQ - set WQEs. */
632 mlx5_rxq_initialize(rxq);
633 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
634 /* Set actual queue state. */
635 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
640 * Rx queue start. Device queue goes to the ready state,
641 * all required mbufs are allocated and WQ is replenished.
644 * Pointer to Ethernet device structure.
649 * 0 on success, a negative errno value otherwise and rte_errno is set.
652 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
656 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
657 DRV_LOG(ERR, "Hairpin queue can't be started");
661 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
663 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
664 ret = mlx5_mp_os_req_queue_control(dev, idx,
665 MLX5_MP_REQ_QUEUE_RX_START);
667 ret = mlx5_rx_queue_start_primary(dev, idx);
673 * Rx queue presetup checks.
676 * Pointer to Ethernet device structure.
680 * Number of descriptors to configure in queue.
683 * 0 on success, a negative errno value otherwise and rte_errno is set.
686 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
688 struct mlx5_priv *priv = dev->data->dev_private;
690 if (!rte_is_power_of_2(*desc)) {
691 *desc = 1 << log2above(*desc);
693 "port %u increased number of descriptors in Rx queue %u"
694 " to the next power of two (%d)",
695 dev->data->port_id, idx, *desc);
697 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
698 dev->data->port_id, idx, *desc);
699 if (idx >= priv->rxqs_n) {
700 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
701 dev->data->port_id, idx, priv->rxqs_n);
702 rte_errno = EOVERFLOW;
705 if (!mlx5_rxq_releasable(dev, idx)) {
706 DRV_LOG(ERR, "port %u unable to release queue index %u",
707 dev->data->port_id, idx);
711 mlx5_rxq_release(dev, idx);
718 * Pointer to Ethernet device structure.
722 * Number of descriptors to configure in queue.
724 * NUMA socket on which memory must be allocated.
726 * Thresholds parameters.
728 * Memory pool for buffer allocations.
731 * 0 on success, a negative errno value otherwise and rte_errno is set.
734 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
735 unsigned int socket, const struct rte_eth_rxconf *conf,
736 struct rte_mempool *mp)
738 struct mlx5_priv *priv = dev->data->dev_private;
739 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
740 struct mlx5_rxq_ctrl *rxq_ctrl =
741 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
744 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
747 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
749 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
750 dev->data->port_id, idx);
754 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
755 dev->data->port_id, idx);
756 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
763 * Pointer to Ethernet device structure.
767 * Number of descriptors to configure in queue.
768 * @param hairpin_conf
769 * Hairpin configuration parameters.
772 * 0 on success, a negative errno value otherwise and rte_errno is set.
775 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
777 const struct rte_eth_hairpin_conf *hairpin_conf)
779 struct mlx5_priv *priv = dev->data->dev_private;
780 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
781 struct mlx5_rxq_ctrl *rxq_ctrl =
782 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
785 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
788 if (hairpin_conf->peer_count != 1 ||
789 hairpin_conf->peers[0].port != dev->data->port_id ||
790 hairpin_conf->peers[0].queue >= priv->txqs_n) {
791 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
792 " invalid hairpind configuration", dev->data->port_id,
797 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
799 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
800 dev->data->port_id, idx);
804 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
805 dev->data->port_id, idx);
806 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
811 * DPDK callback to release a RX queue.
814 * Generic RX queue pointer.
817 mlx5_rx_queue_release(void *dpdk_rxq)
819 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
820 struct mlx5_rxq_ctrl *rxq_ctrl;
821 struct mlx5_priv *priv;
825 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
826 priv = rxq_ctrl->priv;
827 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
828 rte_panic("port %u Rx queue %u is still used by a flow and"
829 " cannot be removed\n",
830 PORT_ID(priv), rxq->idx);
831 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
835 * Get an Rx queue Verbs/DevX object.
838 * Pointer to Ethernet device.
840 * Queue index in DPDK Rx queue array
843 * The Verbs/DevX object if it exists.
845 static struct mlx5_rxq_obj *
846 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
848 struct mlx5_priv *priv = dev->data->dev_private;
849 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
850 struct mlx5_rxq_ctrl *rxq_ctrl;
852 if (idx >= priv->rxqs_n)
856 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
858 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
859 return rxq_ctrl->obj;
863 * Release the resources allocated for an RQ DevX object.
866 * DevX Rx queue object.
869 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
871 if (rxq_ctrl->rxq.wqes) {
872 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
873 rxq_ctrl->rxq.wqes = NULL;
875 if (rxq_ctrl->wq_umem) {
876 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
877 rxq_ctrl->wq_umem = NULL;
882 * Release the resources allocated for the Rx CQ DevX object.
885 * DevX Rx queue object.
888 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
890 if (rxq_ctrl->rxq.cqes) {
891 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
892 rxq_ctrl->rxq.cqes = NULL;
894 if (rxq_ctrl->cq_umem) {
895 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
896 rxq_ctrl->cq_umem = NULL;
901 * Release an Rx hairpin related resources.
904 * Hairpin Rx queue object.
907 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
909 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
911 MLX5_ASSERT(rxq_obj);
912 rq_attr.state = MLX5_RQC_STATE_RST;
913 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
914 mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
915 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
919 * Release an Rx verbs/DevX queue object.
922 * Verbs/DevX Rx queue object.
925 * 1 while a reference on it exists, 0 when freed.
928 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
930 struct mlx5_priv *priv = rxq_obj->rxq_ctrl->priv;
931 struct mlx5_rxq_ctrl *rxq_ctrl = rxq_obj->rxq_ctrl;
933 MLX5_ASSERT(rxq_obj);
934 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
935 switch (rxq_obj->type) {
936 case MLX5_RXQ_OBJ_TYPE_IBV:
937 MLX5_ASSERT(rxq_obj->wq);
938 MLX5_ASSERT(rxq_obj->ibv_cq);
939 rxq_free_elts(rxq_ctrl);
940 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
941 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
942 if (rxq_obj->ibv_channel)
943 claim_zero(mlx5_glue->destroy_comp_channel
944 (rxq_obj->ibv_channel));
946 case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
947 MLX5_ASSERT(rxq_obj->rq);
948 MLX5_ASSERT(rxq_obj->devx_cq);
949 rxq_free_elts(rxq_ctrl);
950 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
951 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
952 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
953 rxq_ctrl->rq_dbr_umem_id,
954 rxq_ctrl->rq_dbr_offset));
955 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
956 rxq_ctrl->cq_dbr_umem_id,
957 rxq_ctrl->cq_dbr_offset));
958 if (rxq_obj->devx_channel)
959 mlx5_glue->devx_destroy_event_channel
960 (rxq_obj->devx_channel);
961 rxq_release_devx_rq_resources(rxq_ctrl);
962 rxq_release_devx_cq_resources(rxq_ctrl);
964 case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
965 MLX5_ASSERT(rxq_obj->rq);
966 rxq_obj_hairpin_release(rxq_obj);
969 LIST_REMOVE(rxq_obj, next);
977 * Allocate queue vector and fill epoll fd list for Rx interrupts.
980 * Pointer to Ethernet device.
983 * 0 on success, a negative errno value otherwise and rte_errno is set.
986 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
988 struct mlx5_priv *priv = dev->data->dev_private;
990 unsigned int rxqs_n = priv->rxqs_n;
991 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
992 unsigned int count = 0;
993 struct rte_intr_handle *intr_handle = dev->intr_handle;
995 if (!dev->data->dev_conf.intr_conf.rxq)
997 mlx5_rx_intr_vec_disable(dev);
998 intr_handle->intr_vec = mlx5_malloc(0,
999 n * sizeof(intr_handle->intr_vec[0]),
1001 if (intr_handle->intr_vec == NULL) {
1003 "port %u failed to allocate memory for interrupt"
1004 " vector, Rx interrupts will not be supported",
1005 dev->data->port_id);
1009 intr_handle->type = RTE_INTR_HANDLE_EXT;
1010 for (i = 0; i != n; ++i) {
1011 /* This rxq obj must not be released in this function. */
1012 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
1015 /* Skip queues that cannot request interrupts. */
1016 if (!rxq_obj || (!rxq_obj->ibv_channel &&
1017 !rxq_obj->devx_channel)) {
1018 /* Use invalid intr_vec[] index to disable entry. */
1019 intr_handle->intr_vec[i] =
1020 RTE_INTR_VEC_RXTX_OFFSET +
1021 RTE_MAX_RXTX_INTR_VEC_ID;
1024 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1026 "port %u too many Rx queues for interrupt"
1027 " vector size (%d), Rx interrupts cannot be"
1029 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
1030 mlx5_rx_intr_vec_disable(dev);
1034 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
1038 "port %u failed to make Rx interrupt file"
1039 " descriptor %d non-blocking for queue index"
1041 dev->data->port_id, rxq_obj->fd, i);
1042 mlx5_rx_intr_vec_disable(dev);
1045 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
1046 intr_handle->efds[count] = rxq_obj->fd;
1050 mlx5_rx_intr_vec_disable(dev);
1052 intr_handle->nb_efd = count;
1057 * Clean up Rx interrupts handler.
1060 * Pointer to Ethernet device.
1063 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
1065 struct mlx5_priv *priv = dev->data->dev_private;
1066 struct rte_intr_handle *intr_handle = dev->intr_handle;
1068 unsigned int rxqs_n = priv->rxqs_n;
1069 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1071 if (!dev->data->dev_conf.intr_conf.rxq)
1073 if (!intr_handle->intr_vec)
1075 for (i = 0; i != n; ++i) {
1076 struct mlx5_rxq_ctrl *rxq_ctrl;
1077 struct mlx5_rxq_data *rxq_data;
1079 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
1080 RTE_MAX_RXTX_INTR_VEC_ID)
1083 * Need to access directly the queue to release the reference
1084 * kept in mlx5_rx_intr_vec_enable().
1086 rxq_data = (*priv->rxqs)[i];
1087 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1089 mlx5_rxq_obj_release(rxq_ctrl->obj);
1092 rte_intr_free_epoll_fd(intr_handle);
1093 if (intr_handle->intr_vec)
1094 mlx5_free(intr_handle->intr_vec);
1095 intr_handle->nb_efd = 0;
1096 intr_handle->intr_vec = NULL;
1100 * MLX5 CQ notification .
1103 * Pointer to receive queue structure.
1105 * Sequence number per receive queue .
1108 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1111 uint32_t doorbell_hi;
1113 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
1115 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1116 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1117 doorbell = (uint64_t)doorbell_hi << 32;
1118 doorbell |= rxq->cqn;
1119 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1120 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
1121 cq_db_reg, rxq->uar_lock_cq);
1125 * DPDK callback for Rx queue interrupt enable.
1128 * Pointer to Ethernet device structure.
1129 * @param rx_queue_id
1133 * 0 on success, a negative errno value otherwise and rte_errno is set.
1136 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1138 struct mlx5_priv *priv = dev->data->dev_private;
1139 struct mlx5_rxq_data *rxq_data;
1140 struct mlx5_rxq_ctrl *rxq_ctrl;
1142 rxq_data = (*priv->rxqs)[rx_queue_id];
1147 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1148 if (rxq_ctrl->irq) {
1149 struct mlx5_rxq_obj *rxq_obj;
1151 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
1156 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
1157 mlx5_rxq_obj_release(rxq_obj);
1163 * DPDK callback for Rx queue interrupt disable.
1166 * Pointer to Ethernet device structure.
1167 * @param rx_queue_id
1171 * 0 on success, a negative errno value otherwise and rte_errno is set.
1174 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1176 struct mlx5_priv *priv = dev->data->dev_private;
1177 struct mlx5_rxq_data *rxq_data;
1178 struct mlx5_rxq_ctrl *rxq_ctrl;
1179 struct mlx5_rxq_obj *rxq_obj = NULL;
1180 struct ibv_cq *ev_cq;
1184 rxq_data = (*priv->rxqs)[rx_queue_id];
1189 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1192 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
1197 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1198 ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel, &ev_cq,
1200 if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
1202 mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
1203 } else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1204 #ifdef HAVE_IBV_DEVX_EVENT
1206 struct mlx5dv_devx_async_event_hdr event_resp;
1207 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
1211 ret = mlx5_glue->devx_get_event
1212 (rxq_obj->devx_channel, &out.event_resp,
1214 if (ret < 0 || out.event_resp.cookie !=
1215 (uint64_t)(uintptr_t)rxq_obj->devx_cq)
1217 #endif /* HAVE_IBV_DEVX_EVENT */
1219 rxq_data->cq_arm_sn++;
1220 mlx5_rxq_obj_release(rxq_obj);
1224 * For ret < 0 save the errno (may be EAGAIN which means the get_event
1225 * function was called before receiving one).
1231 ret = rte_errno; /* Save rte_errno before cleanup. */
1233 mlx5_rxq_obj_release(rxq_obj);
1235 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1236 dev->data->port_id, rx_queue_id);
1237 rte_errno = ret; /* Restore rte_errno. */
1242 * Create a CQ Verbs object.
1245 * Pointer to Ethernet device.
1247 * Pointer to device private data.
1249 * Pointer to Rx queue data.
1251 * Number of CQEs in CQ.
1253 * Pointer to Rx queue object data.
1256 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1258 static struct ibv_cq *
1259 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1260 struct mlx5_rxq_data *rxq_data,
1261 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
1264 struct ibv_cq_init_attr_ex ibv;
1265 struct mlx5dv_cq_init_attr mlx5;
1268 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
1270 .channel = rxq_obj->ibv_channel,
1273 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
1276 if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
1277 cq_attr.mlx5.comp_mask |=
1278 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
1279 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1280 cq_attr.mlx5.cqe_comp_res_format =
1281 mlx5_rxq_mprq_enabled(rxq_data) ?
1282 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
1283 MLX5DV_CQE_RES_FORMAT_HASH;
1285 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1288 * For vectorized Rx, it must not be doubled in order to
1289 * make cq_ci and rq_ci aligned.
1291 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1292 cq_attr.ibv.cqe *= 2;
1293 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1295 "port %u Rx CQE compression is disabled for HW"
1297 dev->data->port_id);
1299 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1300 if (priv->config.cqe_pad) {
1301 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1302 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1305 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1311 * Create a WQ Verbs object.
1314 * Pointer to Ethernet device.
1316 * Pointer to device private data.
1318 * Pointer to Rx queue data.
1320 * Queue index in DPDK Rx queue array
1322 * Number of WQEs in WQ.
1324 * Pointer to Rx queue object data.
1327 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1329 static struct ibv_wq *
1330 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1331 struct mlx5_rxq_data *rxq_data, uint16_t idx,
1332 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1335 struct ibv_wq_init_attr ibv;
1336 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1337 struct mlx5dv_wq_init_attr mlx5;
1341 wq_attr.ibv = (struct ibv_wq_init_attr){
1342 .wq_context = NULL, /* Could be useful in the future. */
1343 .wq_type = IBV_WQT_RQ,
1344 /* Max number of outstanding WRs. */
1345 .max_wr = wqe_n >> rxq_data->sges_n,
1346 /* Max number of scatter/gather elements in a WR. */
1347 .max_sge = 1 << rxq_data->sges_n,
1349 .cq = rxq_obj->ibv_cq,
1350 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1351 .create_flags = (rxq_data->vlan_strip ?
1352 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1354 /* By default, FCS (CRC) is stripped by hardware. */
1355 if (rxq_data->crc_present) {
1356 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1357 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1359 if (priv->config.hw_padding) {
1360 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1361 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1362 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1363 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1364 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1365 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1368 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1369 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1372 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1373 struct mlx5dv_striding_rq_init_attr *mprq_attr =
1374 &wq_attr.mlx5.striding_rq_attrs;
1376 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1377 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1378 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1379 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1380 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1383 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1386 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1390 * Make sure number of WRs*SGEs match expectations since a queue
1391 * cannot allocate more than "desc" buffers.
1393 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1394 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1396 "port %u Rx queue %u requested %u*%u but got"
1398 dev->data->port_id, idx,
1399 wqe_n >> rxq_data->sges_n,
1400 (1 << rxq_data->sges_n),
1401 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1402 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1411 * Fill common fields of create RQ attributes structure.
1414 * Pointer to Rx queue data.
1416 * CQ number to use with this RQ.
1418 * RQ attributes structure to fill..
1421 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1422 struct mlx5_devx_create_rq_attr *rq_attr)
1424 rq_attr->state = MLX5_RQC_STATE_RST;
1425 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1427 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1431 * Fill common fields of DevX WQ attributes structure.
1434 * Pointer to device private data.
1436 * Pointer to Rx queue control structure.
1438 * WQ attributes structure to fill..
1441 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1442 struct mlx5_devx_wq_attr *wq_attr)
1444 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1445 MLX5_WQ_END_PAD_MODE_ALIGN :
1446 MLX5_WQ_END_PAD_MODE_NONE;
1447 wq_attr->pd = priv->sh->pdn;
1448 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
1449 wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
1450 wq_attr->dbr_umem_valid = 1;
1451 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
1452 wq_attr->wq_umem_valid = 1;
1456 * Create a RQ object using DevX.
1459 * Pointer to Ethernet device.
1461 * Queue index in DPDK Rx queue array
1463 * CQ number to use with this RQ.
1466 * The DevX object initialised, NULL otherwise and rte_errno is set.
1468 static struct mlx5_devx_obj *
1469 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1471 struct mlx5_priv *priv = dev->data->dev_private;
1472 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1473 struct mlx5_rxq_ctrl *rxq_ctrl =
1474 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1475 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
1476 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1477 uint32_t wq_size = 0;
1478 uint32_t wqe_size = 0;
1479 uint32_t log_wqe_size = 0;
1481 struct mlx5_devx_obj *rq;
1483 /* Fill RQ attributes. */
1484 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1485 rq_attr.flush_in_error_en = 1;
1486 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1487 /* Fill WQ attributes for this RQ. */
1488 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1489 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1491 * Number of strides in each WQE:
1492 * 512*2^single_wqe_log_num_of_strides.
1494 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1495 rxq_data->strd_num_n -
1496 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1497 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1498 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1499 rxq_data->strd_sz_n -
1500 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1501 wqe_size = sizeof(struct mlx5_wqe_mprq);
1503 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1504 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1506 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1507 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1508 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1509 /* Calculate and allocate WQ memory space. */
1510 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1511 wq_size = wqe_n * wqe_size;
1512 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
1513 if (alignment == (size_t)-1) {
1514 DRV_LOG(ERR, "Failed to get mem page size");
1518 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
1519 alignment, rxq_ctrl->socket);
1522 rxq_data->wqes = buf;
1523 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1525 if (!rxq_ctrl->wq_umem) {
1529 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1530 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1532 rxq_release_devx_rq_resources(rxq_ctrl);
1537 * Create a DevX CQ object for an Rx queue.
1540 * Pointer to Ethernet device.
1542 * Number of CQEs in CQ.
1544 * Queue index in DPDK Rx queue array
1546 * Pointer to Rx queue object data.
1549 * The DevX object initialised, NULL otherwise and rte_errno is set.
1551 static struct mlx5_devx_obj *
1552 mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
1553 struct mlx5_rxq_obj *rxq_obj)
1555 struct mlx5_devx_obj *cq_obj = 0;
1556 struct mlx5_devx_cq_attr cq_attr = { 0 };
1557 struct mlx5_priv *priv = dev->data->dev_private;
1558 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1559 struct mlx5_rxq_ctrl *rxq_ctrl =
1560 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1561 size_t page_size = rte_mem_page_size();
1562 uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
1565 uint16_t event_nums[1] = {0};
1570 if (page_size == (size_t)-1) {
1571 DRV_LOG(ERR, "Failed to get page_size.");
1574 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
1576 cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
1577 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1578 cq_attr.mini_cqe_res_format =
1579 mlx5_rxq_mprq_enabled(rxq_data) ?
1580 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
1581 MLX5DV_CQE_RES_FORMAT_HASH;
1583 cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1586 * For vectorized Rx, it must not be doubled in order to
1587 * make cq_ci and rq_ci aligned.
1589 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1591 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1593 "port %u Rx CQE compression is disabled for HW"
1595 dev->data->port_id);
1596 } else if (priv->config.cqe_comp && rxq_data->lro) {
1598 "port %u Rx CQE compression is disabled for LRO",
1599 dev->data->port_id);
1601 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1602 if (priv->config.cqe_pad)
1603 cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1605 log_cqe_n = log2above(cqe_n);
1606 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
1607 /* Query the EQN for this core. */
1608 if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
1609 DRV_LOG(ERR, "Failed to query EQN for CQ.");
1613 buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
1616 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
1619 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
1620 rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
1622 IBV_ACCESS_LOCAL_WRITE);
1623 if (!rxq_ctrl->cq_umem) {
1624 DRV_LOG(ERR, "Failed to register umem for CQ.");
1627 cq_attr.uar_page_id =
1628 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
1629 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
1630 cq_attr.q_umem_valid = 1;
1631 cq_attr.log_cq_size = log_cqe_n;
1632 cq_attr.log_page_size = rte_log2_u32(page_size);
1633 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
1634 cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
1635 cq_attr.db_umem_valid = 1;
1636 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
1639 rxq_data->cqe_n = log_cqe_n;
1640 rxq_data->cqn = cq_obj->id;
1641 if (rxq_obj->devx_channel) {
1642 ret = mlx5_glue->devx_subscribe_devx_event
1643 (rxq_obj->devx_channel,
1647 (uint64_t)(uintptr_t)cq_obj);
1649 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
1654 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
1655 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
1659 mlx5_devx_cmd_destroy(cq_obj);
1660 rxq_release_devx_cq_resources(rxq_ctrl);
1665 * Create the Rx hairpin queue object.
1668 * Pointer to Ethernet device.
1670 * Queue index in DPDK Rx queue array
1673 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1675 static struct mlx5_rxq_obj *
1676 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1678 struct mlx5_priv *priv = dev->data->dev_private;
1679 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1680 struct mlx5_rxq_ctrl *rxq_ctrl =
1681 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1682 struct mlx5_devx_create_rq_attr attr = { 0 };
1683 struct mlx5_rxq_obj *tmpl = NULL;
1684 uint32_t max_wq_data;
1686 MLX5_ASSERT(rxq_data);
1687 MLX5_ASSERT(!rxq_ctrl->obj);
1688 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1691 DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
1692 dev->data->port_id, rxq_data->idx);
1696 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1697 tmpl->rxq_ctrl = rxq_ctrl;
1699 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1700 /* Jumbo frames > 9KB should be supported, and more packets. */
1701 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1702 if (priv->config.log_hp_size > max_wq_data) {
1703 DRV_LOG(ERR, "total data size %u power of 2 is "
1704 "too large for hairpin",
1705 priv->config.log_hp_size);
1710 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1712 attr.wq_attr.log_hairpin_data_sz =
1713 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1714 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1716 /* Set the packets number to the maximum value for performance. */
1717 attr.wq_attr.log_hairpin_num_packets =
1718 attr.wq_attr.log_hairpin_data_sz -
1719 MLX5_HAIRPIN_QUEUE_STRIDE;
1720 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1724 "port %u Rx hairpin queue %u can't create rq object",
1725 dev->data->port_id, idx);
1730 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1731 idx, (void *)&tmpl);
1732 rte_atomic32_inc(&tmpl->refcnt);
1733 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1734 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
1739 * Create the Rx queue Verbs/DevX object.
1742 * Pointer to Ethernet device.
1744 * Queue index in DPDK Rx queue array
1746 * Type of Rx queue object to create.
1749 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1751 struct mlx5_rxq_obj *
1752 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1753 enum mlx5_rxq_obj_type type)
1755 struct mlx5_priv *priv = dev->data->dev_private;
1756 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1757 struct mlx5_rxq_ctrl *rxq_ctrl =
1758 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1759 struct ibv_wq_attr mod;
1761 unsigned int wqe_n = 1 << rxq_data->elts_n;
1762 struct mlx5_rxq_obj *tmpl = NULL;
1763 struct mlx5_devx_dbr_page *cq_dbr_page = NULL;
1764 struct mlx5_devx_dbr_page *rq_dbr_page = NULL;
1765 struct mlx5dv_cq cq_info;
1766 struct mlx5dv_rwq rwq;
1768 struct mlx5dv_obj obj;
1770 MLX5_ASSERT(rxq_data);
1771 MLX5_ASSERT(!rxq_ctrl->obj);
1772 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1773 return mlx5_rxq_obj_hairpin_new(dev, idx);
1774 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1777 DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
1778 dev->data->port_id, rxq_data->idx);
1783 tmpl->rxq_ctrl = rxq_ctrl;
1784 if (rxq_ctrl->irq) {
1785 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1787 mlx5_glue->create_comp_channel(priv->sh->ctx);
1788 if (!tmpl->ibv_channel) {
1789 DRV_LOG(ERR, "port %u: comp channel creation "
1790 "failure", dev->data->port_id);
1794 tmpl->fd = ((struct ibv_comp_channel *)
1795 (tmpl->ibv_channel))->fd;
1796 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1798 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
1800 tmpl->devx_channel =
1801 mlx5_glue->devx_create_event_channel
1804 if (!tmpl->devx_channel) {
1807 "Failed to create event channel %d.",
1812 mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
1815 if (mlx5_rxq_mprq_enabled(rxq_data))
1816 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1819 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1820 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
1821 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1822 dev->data->port_id, priv->sh->device_attr.max_sge);
1823 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1824 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1825 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1826 /* Create CQ using Verbs API. */
1827 tmpl->ibv_cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n,
1829 if (!tmpl->ibv_cq) {
1830 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1831 dev->data->port_id, idx);
1835 obj.cq.in = tmpl->ibv_cq;
1836 obj.cq.out = &cq_info;
1837 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1842 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1844 "port %u wrong MLX5_CQE_SIZE environment "
1845 "variable value: it should be set to %u",
1846 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1850 /* Fill the rings. */
1851 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1852 rxq_data->cq_db = cq_info.dbrec;
1854 (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1855 rxq_data->cq_uar = cq_info.cq_uar;
1856 rxq_data->cqn = cq_info.cqn;
1857 /* Create WQ (RQ) using Verbs API. */
1858 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1861 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1862 dev->data->port_id, idx);
1866 /* Change queue state to ready. */
1867 mod = (struct ibv_wq_attr){
1868 .attr_mask = IBV_WQ_ATTR_STATE,
1869 .wq_state = IBV_WQS_RDY,
1871 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1874 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1875 " failed", dev->data->port_id, idx);
1879 obj.rwq.in = tmpl->wq;
1881 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1886 rxq_data->wqes = rwq.buf;
1887 rxq_data->rq_db = rwq.dbrec;
1888 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1889 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1890 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
1893 /* Allocate CQ door-bell. */
1894 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
1896 if (dbr_offset < 0) {
1897 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
1900 rxq_ctrl->cq_dbr_offset = dbr_offset;
1901 rxq_ctrl->cq_dbr_umem_id =
1902 mlx5_os_get_umem_id(cq_dbr_page->umem);
1904 (uint32_t *)((uintptr_t)cq_dbr_page->dbrs +
1905 (uintptr_t)rxq_ctrl->cq_dbr_offset);
1907 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
1908 /* Create CQ using DevX API. */
1909 tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
1910 if (!tmpl->devx_cq) {
1911 DRV_LOG(ERR, "Failed to create CQ.");
1914 /* Allocate RQ door-bell. */
1915 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
1917 if (dbr_offset < 0) {
1918 DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
1921 rxq_ctrl->rq_dbr_offset = dbr_offset;
1922 rxq_ctrl->rq_dbr_umem_id =
1923 mlx5_os_get_umem_id(rq_dbr_page->umem);
1925 (uint32_t *)((uintptr_t)rq_dbr_page->dbrs +
1926 (uintptr_t)rxq_ctrl->rq_dbr_offset);
1927 /* Create RQ using DevX API. */
1928 tmpl->rq = mlx5_devx_rq_new(dev, idx, tmpl->devx_cq->id);
1930 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1931 dev->data->port_id, idx);
1935 /* Change queue state to ready. */
1936 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1937 rq_attr.state = MLX5_RQC_STATE_RDY;
1938 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1942 rxq_data->cq_arm_sn = 0;
1943 mlx5_rxq_initialize(rxq_data);
1944 rxq_data->cq_ci = 0;
1945 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1946 idx, (void *)&tmpl);
1947 rte_atomic32_inc(&tmpl->refcnt);
1948 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1949 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1953 ret = rte_errno; /* Save rte_errno before cleanup. */
1954 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1956 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1958 claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
1959 if (tmpl->ibv_channel)
1960 claim_zero(mlx5_glue->destroy_comp_channel
1961 (tmpl->ibv_channel));
1962 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1963 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1965 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1967 claim_zero(mlx5_devx_cmd_destroy
1969 if (tmpl->devx_channel)
1970 mlx5_glue->devx_destroy_event_channel
1971 (tmpl->devx_channel);
1973 claim_zero(mlx5_release_dbr
1975 rxq_ctrl->rq_dbr_umem_id,
1976 rxq_ctrl->rq_dbr_offset));
1978 claim_zero(mlx5_release_dbr
1980 rxq_ctrl->cq_dbr_umem_id,
1981 rxq_ctrl->cq_dbr_offset));
1984 rte_errno = ret; /* Restore rte_errno. */
1986 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1987 rxq_release_devx_rq_resources(rxq_ctrl);
1988 rxq_release_devx_cq_resources(rxq_ctrl);
1994 * Verify the Rx queue objects list is empty
1997 * Pointer to Ethernet device.
2000 * The number of objects not released.
2003 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
2005 struct mlx5_priv *priv = dev->data->dev_private;
2007 struct mlx5_rxq_obj *rxq_obj;
2009 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
2010 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
2011 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
2018 * Callback function to initialize mbufs for Multi-Packet RQ.
2021 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
2022 void *_m, unsigned int i __rte_unused)
2024 struct mlx5_mprq_buf *buf = _m;
2025 struct rte_mbuf_ext_shared_info *shinfo;
2026 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
2029 memset(_m, 0, sizeof(*buf));
2031 rte_atomic16_set(&buf->refcnt, 1);
2032 for (j = 0; j != strd_n; ++j) {
2033 shinfo = &buf->shinfos[j];
2034 shinfo->free_cb = mlx5_mprq_buf_free_cb;
2035 shinfo->fcb_opaque = buf;
2040 * Free mempool of Multi-Packet RQ.
2043 * Pointer to Ethernet device.
2046 * 0 on success, negative errno value on failure.
2049 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
2051 struct mlx5_priv *priv = dev->data->dev_private;
2052 struct rte_mempool *mp = priv->mprq_mp;
2057 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
2058 dev->data->port_id, mp->name);
2060 * If a buffer in the pool has been externally attached to a mbuf and it
2061 * is still in use by application, destroying the Rx queue can spoil
2062 * the packet. It is unlikely to happen but if application dynamically
2063 * creates and destroys with holding Rx packets, this can happen.
2065 * TODO: It is unavoidable for now because the mempool for Multi-Packet
2066 * RQ isn't provided by application but managed by PMD.
2068 if (!rte_mempool_full(mp)) {
2070 "port %u mempool for Multi-Packet RQ is still in use",
2071 dev->data->port_id);
2075 rte_mempool_free(mp);
2076 /* Unset mempool for each Rx queue. */
2077 for (i = 0; i != priv->rxqs_n; ++i) {
2078 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
2082 rxq->mprq_mp = NULL;
2084 priv->mprq_mp = NULL;
2089 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
2090 * mempool. If already allocated, reuse it if there're enough elements.
2091 * Otherwise, resize it.
2094 * Pointer to Ethernet device.
2097 * 0 on success, negative errno value on failure.
2100 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
2102 struct mlx5_priv *priv = dev->data->dev_private;
2103 struct rte_mempool *mp = priv->mprq_mp;
2104 char name[RTE_MEMPOOL_NAMESIZE];
2105 unsigned int desc = 0;
2106 unsigned int buf_len;
2107 unsigned int obj_num;
2108 unsigned int obj_size;
2109 unsigned int strd_num_n = 0;
2110 unsigned int strd_sz_n = 0;
2112 unsigned int n_ibv = 0;
2114 if (!mlx5_mprq_enabled(dev))
2116 /* Count the total number of descriptors configured. */
2117 for (i = 0; i != priv->rxqs_n; ++i) {
2118 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
2119 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
2120 (rxq, struct mlx5_rxq_ctrl, rxq);
2122 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
2125 desc += 1 << rxq->elts_n;
2126 /* Get the max number of strides. */
2127 if (strd_num_n < rxq->strd_num_n)
2128 strd_num_n = rxq->strd_num_n;
2129 /* Get the max size of a stride. */
2130 if (strd_sz_n < rxq->strd_sz_n)
2131 strd_sz_n = rxq->strd_sz_n;
2133 MLX5_ASSERT(strd_num_n && strd_sz_n);
2134 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
2135 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
2136 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
2138 * Received packets can be either memcpy'd or externally referenced. In
2139 * case that the packet is attached to an mbuf as an external buffer, as
2140 * it isn't possible to predict how the buffers will be queued by
2141 * application, there's no option to exactly pre-allocate needed buffers
2142 * in advance but to speculatively prepares enough buffers.
2144 * In the data path, if this Mempool is depleted, PMD will try to memcpy
2145 * received packets to buffers provided by application (rxq->mp) until
2146 * this Mempool gets available again.
2149 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
2151 * rte_mempool_create_empty() has sanity check to refuse large cache
2152 * size compared to the number of elements.
2153 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
2154 * constant number 2 instead.
2156 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
2157 /* Check a mempool is already allocated and if it can be resued. */
2158 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
2159 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
2160 dev->data->port_id, mp->name);
2163 } else if (mp != NULL) {
2164 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
2165 dev->data->port_id, mp->name);
2167 * If failed to free, which means it may be still in use, no way
2168 * but to keep using the existing one. On buffer underrun,
2169 * packets will be memcpy'd instead of external buffer
2172 if (mlx5_mprq_free_mp(dev)) {
2173 if (mp->elt_size >= obj_size)
2179 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
2180 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
2181 0, NULL, NULL, mlx5_mprq_buf_init,
2182 (void *)(uintptr_t)(1 << strd_num_n),
2183 dev->device->numa_node, 0);
2186 "port %u failed to allocate a mempool for"
2187 " Multi-Packet RQ, count=%u, size=%u",
2188 dev->data->port_id, obj_num, obj_size);
2194 /* Set mempool for each Rx queue. */
2195 for (i = 0; i != priv->rxqs_n; ++i) {
2196 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
2197 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
2198 (rxq, struct mlx5_rxq_ctrl, rxq);
2200 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
2204 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
2205 dev->data->port_id);
2209 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
2210 sizeof(struct rte_vlan_hdr) * 2 + \
2211 sizeof(struct rte_ipv6_hdr)))
2212 #define MAX_TCP_OPTION_SIZE 40u
2213 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
2214 sizeof(struct rte_tcp_hdr) + \
2215 MAX_TCP_OPTION_SIZE))
2218 * Adjust the maximum LRO massage size.
2221 * Pointer to Ethernet device.
2224 * @param max_lro_size
2225 * The maximum size for LRO packet.
2228 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
2229 uint32_t max_lro_size)
2231 struct mlx5_priv *priv = dev->data->dev_private;
2233 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
2234 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
2235 MLX5_MAX_TCP_HDR_OFFSET)
2236 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
2237 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
2238 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
2239 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
2240 if (priv->max_lro_msg_size)
2241 priv->max_lro_msg_size =
2242 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
2244 priv->max_lro_msg_size = max_lro_size;
2246 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
2247 dev->data->port_id, idx,
2248 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
2252 * Create a DPDK Rx queue.
2255 * Pointer to Ethernet device.
2259 * Number of descriptors to configure in queue.
2261 * NUMA socket on which memory must be allocated.
2264 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2266 struct mlx5_rxq_ctrl *
2267 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2268 unsigned int socket, const struct rte_eth_rxconf *conf,
2269 struct rte_mempool *mp)
2271 struct mlx5_priv *priv = dev->data->dev_private;
2272 struct mlx5_rxq_ctrl *tmpl;
2273 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
2274 unsigned int mprq_stride_nums;
2275 unsigned int mprq_stride_size;
2276 unsigned int mprq_stride_cap;
2277 struct mlx5_dev_config *config = &priv->config;
2279 * Always allocate extra slots, even if eventually
2280 * the vector Rx will not be used.
2283 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
2284 uint64_t offloads = conf->offloads |
2285 dev->data->dev_conf.rxmode.offloads;
2286 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
2287 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
2288 unsigned int max_rx_pkt_len = lro_on_queue ?
2289 dev->data->dev_conf.rxmode.max_lro_pkt_size :
2290 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2291 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
2292 RTE_PKTMBUF_HEADROOM;
2293 unsigned int max_lro_size = 0;
2294 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
2296 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
2297 DEV_RX_OFFLOAD_SCATTER)) {
2298 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
2299 " configured and no enough mbuf space(%u) to contain "
2300 "the maximum RX packet length(%u) with head-room(%u)",
2301 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
2302 RTE_PKTMBUF_HEADROOM);
2306 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
2307 desc_n * sizeof(struct rte_mbuf *), 0, socket);
2312 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
2313 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
2314 MLX5_MR_BTREE_CACHE_N, socket)) {
2315 /* rte_errno is already set. */
2318 tmpl->socket = socket;
2319 if (dev->data->dev_conf.intr_conf.rxq)
2321 mprq_stride_nums = config->mprq.stride_num_n ?
2322 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
2323 mprq_stride_size = non_scatter_min_mbuf_size <=
2324 (1U << config->mprq.max_stride_size_n) ?
2325 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
2326 mprq_stride_cap = (config->mprq.stride_num_n ?
2327 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
2328 (config->mprq.stride_size_n ?
2329 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
2331 * This Rx queue can be configured as a Multi-Packet RQ if all of the
2332 * following conditions are met:
2333 * - MPRQ is enabled.
2334 * - The number of descs is more than the number of strides.
2335 * - max_rx_pkt_len plus overhead is less than the max size
2336 * of a stride or mprq_stride_size is specified by a user.
2337 * Need to nake sure that there are enough stides to encap
2338 * the maximum packet size in case mprq_stride_size is set.
2339 * Otherwise, enable Rx scatter if necessary.
2341 if (mprq_en && desc > (1U << mprq_stride_nums) &&
2342 (non_scatter_min_mbuf_size <=
2343 (1U << config->mprq.max_stride_size_n) ||
2344 (config->mprq.stride_size_n &&
2345 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
2346 /* TODO: Rx scatter isn't supported yet. */
2347 tmpl->rxq.sges_n = 0;
2348 /* Trim the number of descs needed. */
2349 desc >>= mprq_stride_nums;
2350 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
2351 config->mprq.stride_num_n : mprq_stride_nums;
2352 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
2353 config->mprq.stride_size_n : mprq_stride_size;
2354 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
2355 tmpl->rxq.strd_scatter_en =
2356 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
2357 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
2358 config->mprq.max_memcpy_len);
2359 max_lro_size = RTE_MIN(max_rx_pkt_len,
2360 (1u << tmpl->rxq.strd_num_n) *
2361 (1u << tmpl->rxq.strd_sz_n));
2363 "port %u Rx queue %u: Multi-Packet RQ is enabled"
2364 " strd_num_n = %u, strd_sz_n = %u",
2365 dev->data->port_id, idx,
2366 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
2367 } else if (max_rx_pkt_len <= first_mb_free_size) {
2368 tmpl->rxq.sges_n = 0;
2369 max_lro_size = max_rx_pkt_len;
2370 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
2371 unsigned int size = non_scatter_min_mbuf_size;
2372 unsigned int sges_n;
2374 if (lro_on_queue && first_mb_free_size <
2375 MLX5_MAX_LRO_HEADER_FIX) {
2376 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
2377 " to include the max header size(%u) for LRO",
2378 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
2379 rte_errno = ENOTSUP;
2383 * Determine the number of SGEs needed for a full packet
2384 * and round it to the next power of two.
2386 sges_n = log2above((size / mb_len) + !!(size % mb_len));
2387 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
2389 "port %u too many SGEs (%u) needed to handle"
2390 " requested maximum packet size %u, the maximum"
2391 " supported are %u", dev->data->port_id,
2392 1 << sges_n, max_rx_pkt_len,
2393 1u << MLX5_MAX_LOG_RQ_SEGS);
2394 rte_errno = ENOTSUP;
2397 tmpl->rxq.sges_n = sges_n;
2398 max_lro_size = max_rx_pkt_len;
2400 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
2402 "port %u MPRQ is requested but cannot be enabled\n"
2403 " (requested: pkt_sz = %u, desc_num = %u,"
2404 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
2405 " supported: min_rxqs_num = %u,"
2406 " min_stride_sz = %u, max_stride_sz = %u).",
2407 dev->data->port_id, non_scatter_min_mbuf_size,
2409 config->mprq.stride_size_n ?
2410 (1U << config->mprq.stride_size_n) :
2411 (1U << mprq_stride_size),
2412 config->mprq.stride_num_n ?
2413 (1U << config->mprq.stride_num_n) :
2414 (1U << mprq_stride_nums),
2415 config->mprq.min_rxqs_num,
2416 (1U << config->mprq.min_stride_size_n),
2417 (1U << config->mprq.max_stride_size_n));
2418 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
2419 dev->data->port_id, 1 << tmpl->rxq.sges_n);
2420 if (desc % (1 << tmpl->rxq.sges_n)) {
2422 "port %u number of Rx queue descriptors (%u) is not a"
2423 " multiple of SGEs per packet (%u)",
2426 1 << tmpl->rxq.sges_n);
2430 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
2431 /* Toggle RX checksum offload if hardware supports it. */
2432 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
2433 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
2434 /* Configure VLAN stripping. */
2435 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
2436 /* By default, FCS (CRC) is stripped by hardware. */
2437 tmpl->rxq.crc_present = 0;
2438 tmpl->rxq.lro = lro_on_queue;
2439 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
2440 if (config->hw_fcs_strip) {
2442 * RQs used for LRO-enabled TIRs should not be
2443 * configured to scatter the FCS.
2447 "port %u CRC stripping has been "
2448 "disabled but will still be performed "
2449 "by hardware, because LRO is enabled",
2450 dev->data->port_id);
2452 tmpl->rxq.crc_present = 1;
2455 "port %u CRC stripping has been disabled but will"
2456 " still be performed by hardware, make sure MLNX_OFED"
2457 " and firmware are up to date",
2458 dev->data->port_id);
2462 "port %u CRC stripping is %s, %u bytes will be subtracted from"
2463 " incoming frames to hide it",
2465 tmpl->rxq.crc_present ? "disabled" : "enabled",
2466 tmpl->rxq.crc_present << 2);
2468 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
2469 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
2470 tmpl->rxq.port_id = dev->data->port_id;
2473 tmpl->rxq.elts_n = log2above(desc);
2474 tmpl->rxq.rq_repl_thresh =
2475 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
2477 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
2479 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
2481 tmpl->rxq.idx = idx;
2482 rte_atomic32_inc(&tmpl->refcnt);
2483 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2491 * Create a DPDK Rx hairpin queue.
2494 * Pointer to Ethernet device.
2498 * Number of descriptors to configure in queue.
2499 * @param hairpin_conf
2500 * The hairpin binding configuration.
2503 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2505 struct mlx5_rxq_ctrl *
2506 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2507 const struct rte_eth_hairpin_conf *hairpin_conf)
2509 struct mlx5_priv *priv = dev->data->dev_private;
2510 struct mlx5_rxq_ctrl *tmpl;
2512 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
2518 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
2519 tmpl->socket = SOCKET_ID_ANY;
2520 tmpl->rxq.rss_hash = 0;
2521 tmpl->rxq.port_id = dev->data->port_id;
2523 tmpl->rxq.mp = NULL;
2524 tmpl->rxq.elts_n = log2above(desc);
2525 tmpl->rxq.elts = NULL;
2526 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2527 tmpl->hairpin_conf = *hairpin_conf;
2528 tmpl->rxq.idx = idx;
2529 rte_atomic32_inc(&tmpl->refcnt);
2530 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2538 * Pointer to Ethernet device.
2543 * A pointer to the queue if it exists, NULL otherwise.
2545 struct mlx5_rxq_ctrl *
2546 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2548 struct mlx5_priv *priv = dev->data->dev_private;
2549 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2551 if ((*priv->rxqs)[idx]) {
2552 rxq_ctrl = container_of((*priv->rxqs)[idx],
2553 struct mlx5_rxq_ctrl,
2555 mlx5_rxq_obj_get(dev, idx);
2556 rte_atomic32_inc(&rxq_ctrl->refcnt);
2562 * Release a Rx queue.
2565 * Pointer to Ethernet device.
2570 * 1 while a reference on it exists, 0 when freed.
2573 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2575 struct mlx5_priv *priv = dev->data->dev_private;
2576 struct mlx5_rxq_ctrl *rxq_ctrl;
2578 if (!(*priv->rxqs)[idx])
2580 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2581 MLX5_ASSERT(rxq_ctrl->priv);
2582 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2583 rxq_ctrl->obj = NULL;
2584 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2585 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2586 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2587 LIST_REMOVE(rxq_ctrl, next);
2588 mlx5_free(rxq_ctrl);
2589 (*priv->rxqs)[idx] = NULL;
2596 * Verify the Rx Queue list is empty
2599 * Pointer to Ethernet device.
2602 * The number of object not released.
2605 mlx5_rxq_verify(struct rte_eth_dev *dev)
2607 struct mlx5_priv *priv = dev->data->dev_private;
2608 struct mlx5_rxq_ctrl *rxq_ctrl;
2611 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2612 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2613 dev->data->port_id, rxq_ctrl->rxq.idx);
2620 * Get a Rx queue type.
2623 * Pointer to Ethernet device.
2628 * The Rx queue type.
2631 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2633 struct mlx5_priv *priv = dev->data->dev_private;
2634 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2636 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
2637 rxq_ctrl = container_of((*priv->rxqs)[idx],
2638 struct mlx5_rxq_ctrl,
2640 return rxq_ctrl->type;
2642 return MLX5_RXQ_TYPE_UNDEFINED;
2646 * Create an indirection table.
2649 * Pointer to Ethernet device.
2651 * Queues entering in the indirection table.
2653 * Number of queues in the array.
2656 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2658 static struct mlx5_ind_table_obj *
2659 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2660 uint32_t queues_n, enum mlx5_ind_tbl_type type)
2662 struct mlx5_priv *priv = dev->data->dev_private;
2663 struct mlx5_ind_table_obj *ind_tbl;
2664 unsigned int i = 0, j = 0, k = 0;
2666 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2667 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2672 ind_tbl->type = type;
2673 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2674 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2675 log2above(queues_n) :
2676 log2above(priv->config.ind_table_max_size);
2677 struct ibv_wq *wq[1 << wq_n];
2679 for (i = 0; i != queues_n; ++i) {
2680 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2684 wq[i] = rxq->obj->wq;
2685 ind_tbl->queues[i] = queues[i];
2687 ind_tbl->queues_n = queues_n;
2688 /* Finalise indirection table. */
2689 k = i; /* Retain value of i for use in error case. */
2690 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2692 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2694 &(struct ibv_rwq_ind_table_init_attr){
2695 .log_ind_tbl_size = wq_n,
2699 if (!ind_tbl->ind_table) {
2703 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2704 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2705 const unsigned int rqt_n =
2706 1 << (rte_is_power_of_2(queues_n) ?
2707 log2above(queues_n) :
2708 log2above(priv->config.ind_table_max_size));
2710 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
2711 rqt_n * sizeof(uint32_t), 0,
2714 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2715 dev->data->port_id);
2719 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2720 rqt_attr->rqt_actual_size = rqt_n;
2721 for (i = 0; i != queues_n; ++i) {
2722 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2726 rqt_attr->rq_list[i] = rxq->obj->rq->id;
2727 ind_tbl->queues[i] = queues[i];
2729 k = i; /* Retain value of i for use in error case. */
2730 for (j = 0; k != rqt_n; ++k, ++j)
2731 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2732 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2734 mlx5_free(rqt_attr);
2735 if (!ind_tbl->rqt) {
2736 DRV_LOG(ERR, "port %u cannot create DevX RQT",
2737 dev->data->port_id);
2741 ind_tbl->queues_n = queues_n;
2743 rte_atomic32_inc(&ind_tbl->refcnt);
2744 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2747 for (j = 0; j < i; j++)
2748 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2750 DEBUG("port %u cannot create indirection table", dev->data->port_id);
2755 * Get an indirection table.
2758 * Pointer to Ethernet device.
2760 * Queues entering in the indirection table.
2762 * Number of queues in the array.
2765 * An indirection table if found.
2767 static struct mlx5_ind_table_obj *
2768 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2771 struct mlx5_priv *priv = dev->data->dev_private;
2772 struct mlx5_ind_table_obj *ind_tbl;
2774 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2775 if ((ind_tbl->queues_n == queues_n) &&
2776 (memcmp(ind_tbl->queues, queues,
2777 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2784 rte_atomic32_inc(&ind_tbl->refcnt);
2785 for (i = 0; i != ind_tbl->queues_n; ++i)
2786 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2792 * Release an indirection table.
2795 * Pointer to Ethernet device.
2797 * Indirection table to release.
2800 * 1 while a reference on it exists, 0 when freed.
2803 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2804 struct mlx5_ind_table_obj *ind_tbl)
2808 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2809 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2810 claim_zero(mlx5_glue->destroy_rwq_ind_table
2811 (ind_tbl->ind_table));
2812 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2813 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2815 for (i = 0; i != ind_tbl->queues_n; ++i)
2816 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2817 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2818 LIST_REMOVE(ind_tbl, next);
2826 * Verify the Rx Queue list is empty
2829 * Pointer to Ethernet device.
2832 * The number of object not released.
2835 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2837 struct mlx5_priv *priv = dev->data->dev_private;
2838 struct mlx5_ind_table_obj *ind_tbl;
2841 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2843 "port %u indirection table obj %p still referenced",
2844 dev->data->port_id, (void *)ind_tbl);
2851 * Create an Rx Hash queue.
2854 * Pointer to Ethernet device.
2856 * RSS key for the Rx hash queue.
2857 * @param rss_key_len
2859 * @param hash_fields
2860 * Verbs protocol hash field to make the RSS on.
2862 * Queues entering in hash queue. In case of empty hash_fields only the
2863 * first queue index will be taken for the indirection table.
2870 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
2873 mlx5_hrxq_new(struct rte_eth_dev *dev,
2874 const uint8_t *rss_key, uint32_t rss_key_len,
2875 uint64_t hash_fields,
2876 const uint16_t *queues, uint32_t queues_n,
2877 int tunnel __rte_unused)
2879 struct mlx5_priv *priv = dev->data->dev_private;
2880 struct mlx5_hrxq *hrxq = NULL;
2881 uint32_t hrxq_idx = 0;
2882 struct ibv_qp *qp = NULL;
2883 struct mlx5_ind_table_obj *ind_tbl;
2885 struct mlx5_devx_obj *tir = NULL;
2886 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2887 struct mlx5_rxq_ctrl *rxq_ctrl =
2888 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2890 queues_n = hash_fields ? queues_n : 1;
2891 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2893 enum mlx5_ind_tbl_type type;
2895 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2896 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2897 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2903 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2904 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2905 struct mlx5dv_qp_init_attr qp_init_attr;
2907 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2909 qp_init_attr.comp_mask =
2910 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2911 qp_init_attr.create_flags =
2912 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2914 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2915 if (dev->data->dev_conf.lpbk_mode) {
2917 * Allow packet sent from NIC loop back
2918 * w/o source MAC check.
2920 qp_init_attr.comp_mask |=
2921 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2922 qp_init_attr.create_flags |=
2923 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2926 qp = mlx5_glue->dv_create_qp
2928 &(struct ibv_qp_init_attr_ex){
2929 .qp_type = IBV_QPT_RAW_PACKET,
2931 IBV_QP_INIT_ATTR_PD |
2932 IBV_QP_INIT_ATTR_IND_TABLE |
2933 IBV_QP_INIT_ATTR_RX_HASH,
2934 .rx_hash_conf = (struct ibv_rx_hash_conf){
2936 IBV_RX_HASH_FUNC_TOEPLITZ,
2937 .rx_hash_key_len = rss_key_len,
2939 (void *)(uintptr_t)rss_key,
2940 .rx_hash_fields_mask = hash_fields,
2942 .rwq_ind_tbl = ind_tbl->ind_table,
2947 qp = mlx5_glue->create_qp_ex
2949 &(struct ibv_qp_init_attr_ex){
2950 .qp_type = IBV_QPT_RAW_PACKET,
2952 IBV_QP_INIT_ATTR_PD |
2953 IBV_QP_INIT_ATTR_IND_TABLE |
2954 IBV_QP_INIT_ATTR_RX_HASH,
2955 .rx_hash_conf = (struct ibv_rx_hash_conf){
2957 IBV_RX_HASH_FUNC_TOEPLITZ,
2958 .rx_hash_key_len = rss_key_len,
2960 (void *)(uintptr_t)rss_key,
2961 .rx_hash_fields_mask = hash_fields,
2963 .rwq_ind_tbl = ind_tbl->ind_table,
2971 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2972 struct mlx5_devx_tir_attr tir_attr;
2976 /* Enable TIR LRO only if all the queues were configured for. */
2977 for (i = 0; i < queues_n; ++i) {
2978 if (!(*priv->rxqs)[queues[i]]->lro) {
2983 memset(&tir_attr, 0, sizeof(tir_attr));
2984 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2985 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2986 tir_attr.tunneled_offload_en = !!tunnel;
2987 /* If needed, translate hash_fields bitmap to PRM format. */
2989 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2990 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2991 hash_fields & IBV_RX_HASH_INNER ?
2992 &tir_attr.rx_hash_field_selector_inner :
2993 &tir_attr.rx_hash_field_selector_outer;
2995 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2996 &tir_attr.rx_hash_field_selector_outer;
2999 /* 1 bit: 0: IPv4, 1: IPv6. */
3000 rx_hash_field_select->l3_prot_type =
3001 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
3002 /* 1 bit: 0: TCP, 1: UDP. */
3003 rx_hash_field_select->l4_prot_type =
3004 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
3005 /* Bitmask which sets which fields to use in RX Hash. */
3006 rx_hash_field_select->selected_fields =
3007 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
3008 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
3009 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
3010 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
3011 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
3012 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
3013 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
3014 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
3016 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
3017 tir_attr.transport_domain = priv->sh->td->id;
3019 tir_attr.transport_domain = priv->sh->tdn;
3020 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
3021 MLX5_RSS_HASH_KEY_LEN);
3022 tir_attr.indirect_table = ind_tbl->rqt->id;
3023 if (dev->data->dev_conf.lpbk_mode)
3024 tir_attr.self_lb_block =
3025 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
3027 tir_attr.lro_timeout_period_usecs =
3028 priv->config.lro.timeout;
3029 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
3030 tir_attr.lro_enable_mask =
3031 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
3032 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
3034 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
3036 DRV_LOG(ERR, "port %u cannot create DevX TIR",
3037 dev->data->port_id);
3042 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
3045 hrxq->ind_table = ind_tbl;
3046 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
3048 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3050 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
3051 if (!hrxq->action) {
3056 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
3058 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3059 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
3061 if (!hrxq->action) {
3067 hrxq->rss_key_len = rss_key_len;
3068 hrxq->hash_fields = hash_fields;
3069 memcpy(hrxq->rss_key, rss_key, rss_key_len);
3070 rte_atomic32_inc(&hrxq->refcnt);
3071 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
3075 err = rte_errno; /* Save rte_errno before cleanup. */
3076 mlx5_ind_table_obj_release(dev, ind_tbl);
3078 claim_zero(mlx5_glue->destroy_qp(qp));
3080 claim_zero(mlx5_devx_cmd_destroy(tir));
3082 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
3083 rte_errno = err; /* Restore rte_errno. */
3088 * Get an Rx Hash queue.
3091 * Pointer to Ethernet device.
3093 * RSS configuration for the Rx hash queue.
3095 * Queues entering in hash queue. In case of empty hash_fields only the
3096 * first queue index will be taken for the indirection table.
3101 * An hash Rx queue index on success.
3104 mlx5_hrxq_get(struct rte_eth_dev *dev,
3105 const uint8_t *rss_key, uint32_t rss_key_len,
3106 uint64_t hash_fields,
3107 const uint16_t *queues, uint32_t queues_n)
3109 struct mlx5_priv *priv = dev->data->dev_private;
3110 struct mlx5_hrxq *hrxq;
3113 queues_n = hash_fields ? queues_n : 1;
3114 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
3116 struct mlx5_ind_table_obj *ind_tbl;
3118 if (hrxq->rss_key_len != rss_key_len)
3120 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
3122 if (hrxq->hash_fields != hash_fields)
3124 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
3127 if (ind_tbl != hrxq->ind_table) {
3128 mlx5_ind_table_obj_release(dev, ind_tbl);
3131 rte_atomic32_inc(&hrxq->refcnt);
3138 * Release the hash Rx queue.
3141 * Pointer to Ethernet device.
3143 * Index to Hash Rx queue to release.
3146 * 1 while a reference on it exists, 0 when freed.
3149 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
3151 struct mlx5_priv *priv = dev->data->dev_private;
3152 struct mlx5_hrxq *hrxq;
3154 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
3157 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
3158 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3159 mlx5_glue->destroy_flow_action(hrxq->action);
3161 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
3162 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
3163 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
3164 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
3165 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
3166 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
3167 hrxq_idx, hrxq, next);
3168 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
3171 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
3176 * Verify the Rx Queue list is empty
3179 * Pointer to Ethernet device.
3182 * The number of object not released.
3185 mlx5_hrxq_verify(struct rte_eth_dev *dev)
3187 struct mlx5_priv *priv = dev->data->dev_private;
3188 struct mlx5_hrxq *hrxq;
3192 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
3195 "port %u hash Rx queue %p still referenced",
3196 dev->data->port_id, (void *)hrxq);
3203 * Create a drop Rx queue Verbs/DevX object.
3206 * Pointer to Ethernet device.
3209 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
3211 static struct mlx5_rxq_obj *
3212 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
3214 struct mlx5_priv *priv = dev->data->dev_private;
3215 struct ibv_context *ctx = priv->sh->ctx;
3217 struct ibv_wq *wq = NULL;
3218 struct mlx5_rxq_obj *rxq;
3220 if (priv->drop_queue.rxq)
3221 return priv->drop_queue.rxq;
3222 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
3224 DEBUG("port %u cannot allocate CQ for drop queue",
3225 dev->data->port_id);
3229 wq = mlx5_glue->create_wq(ctx,
3230 &(struct ibv_wq_init_attr){
3231 .wq_type = IBV_WQT_RQ,
3238 DEBUG("port %u cannot allocate WQ for drop queue",
3239 dev->data->port_id);
3243 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
3245 DEBUG("port %u cannot allocate drop Rx queue memory",
3246 dev->data->port_id);
3252 priv->drop_queue.rxq = rxq;
3256 claim_zero(mlx5_glue->destroy_wq(wq));
3258 claim_zero(mlx5_glue->destroy_cq(cq));
3263 * Release a drop Rx queue Verbs/DevX object.
3266 * Pointer to Ethernet device.
3269 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
3272 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
3274 struct mlx5_priv *priv = dev->data->dev_private;
3275 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
3278 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
3280 claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
3282 priv->drop_queue.rxq = NULL;
3286 * Create a drop indirection table.
3289 * Pointer to Ethernet device.
3292 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
3294 static struct mlx5_ind_table_obj *
3295 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
3297 struct mlx5_priv *priv = dev->data->dev_private;
3298 struct mlx5_ind_table_obj *ind_tbl;
3299 struct mlx5_rxq_obj *rxq;
3300 struct mlx5_ind_table_obj tmpl;
3302 rxq = mlx5_rxq_obj_drop_new(dev);
3305 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
3307 &(struct ibv_rwq_ind_table_init_attr){
3308 .log_ind_tbl_size = 0,
3309 .ind_tbl = (struct ibv_wq **)&rxq->wq,
3312 if (!tmpl.ind_table) {
3313 DEBUG("port %u cannot allocate indirection table for drop"
3315 dev->data->port_id);
3319 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
3325 ind_tbl->ind_table = tmpl.ind_table;
3328 mlx5_rxq_obj_drop_release(dev);
3333 * Release a drop indirection table.
3336 * Pointer to Ethernet device.
3339 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
3341 struct mlx5_priv *priv = dev->data->dev_private;
3342 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
3344 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
3345 mlx5_rxq_obj_drop_release(dev);
3347 priv->drop_queue.hrxq->ind_table = NULL;
3351 * Create a drop Rx Hash queue.
3354 * Pointer to Ethernet device.
3357 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
3360 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
3362 struct mlx5_priv *priv = dev->data->dev_private;
3363 struct mlx5_ind_table_obj *ind_tbl = NULL;
3364 struct ibv_qp *qp = NULL;
3365 struct mlx5_hrxq *hrxq = NULL;
3367 if (priv->drop_queue.hrxq) {
3368 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
3369 return priv->drop_queue.hrxq;
3371 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
3374 "port %u cannot allocate memory for drop queue",
3375 dev->data->port_id);
3379 priv->drop_queue.hrxq = hrxq;
3380 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
3383 hrxq->ind_table = ind_tbl;
3384 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
3385 &(struct ibv_qp_init_attr_ex){
3386 .qp_type = IBV_QPT_RAW_PACKET,
3388 IBV_QP_INIT_ATTR_PD |
3389 IBV_QP_INIT_ATTR_IND_TABLE |
3390 IBV_QP_INIT_ATTR_RX_HASH,
3391 .rx_hash_conf = (struct ibv_rx_hash_conf){
3393 IBV_RX_HASH_FUNC_TOEPLITZ,
3394 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
3395 .rx_hash_key = rss_hash_default_key,
3396 .rx_hash_fields_mask = 0,
3398 .rwq_ind_tbl = ind_tbl->ind_table,
3402 DEBUG("port %u cannot allocate QP for drop queue",
3403 dev->data->port_id);
3408 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3409 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
3410 if (!hrxq->action) {
3415 rte_atomic32_set(&hrxq->refcnt, 1);
3418 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3419 if (hrxq && hrxq->action)
3420 mlx5_glue->destroy_flow_action(hrxq->action);
3423 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
3425 mlx5_ind_table_obj_drop_release(dev);
3427 priv->drop_queue.hrxq = NULL;
3434 * Release a drop hash Rx queue.
3437 * Pointer to Ethernet device.
3440 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
3442 struct mlx5_priv *priv = dev->data->dev_private;
3443 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
3445 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
3446 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
3447 mlx5_glue->destroy_flow_action(hrxq->action);
3449 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
3450 mlx5_ind_table_obj_drop_release(dev);
3452 priv->drop_queue.hrxq = NULL;
3458 * Set the Rx queue timestamp conversion parameters
3461 * Pointer to the Ethernet device structure.
3464 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
3466 struct mlx5_priv *priv = dev->data->dev_private;
3467 struct mlx5_dev_ctx_shared *sh = priv->sh;
3468 struct mlx5_rxq_data *data;
3471 for (i = 0; i != priv->rxqs_n; ++i) {
3472 if (!(*priv->rxqs)[i])
3474 data = (*priv->rxqs)[i];
3476 data->rt_timestamp = priv->config.rt_timestamp;