1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
25 #include "mlx5_defs.h"
27 #include "mlx5_rxtx.h"
28 #include "mlx5_utils.h"
29 #include "mlx5_autoconf.h"
32 /* Default RSS hash key also used for ConnectX-3. */
33 uint8_t rss_hash_default_key[] = {
34 0x2c, 0xc6, 0x81, 0xd1,
35 0x5b, 0xdb, 0xf4, 0xf7,
36 0xfc, 0xa2, 0x83, 0x19,
37 0xdb, 0x1a, 0x3e, 0x94,
38 0x6b, 0x9e, 0x38, 0xd9,
39 0x2c, 0x9c, 0x03, 0xd1,
40 0xad, 0x99, 0x44, 0xa7,
41 0xd9, 0x56, 0x3d, 0x59,
42 0x06, 0x3c, 0x25, 0xf3,
43 0xfc, 0x1f, 0xdc, 0x2a,
46 /* Length of the default RSS hash key. */
47 static_assert(MLX5_RSS_HASH_KEY_LEN ==
48 (unsigned int)sizeof(rss_hash_default_key),
49 "wrong RSS default key size.");
52 * Check whether Multi-Packet RQ can be enabled for the device.
55 * Pointer to Ethernet device.
58 * 1 if supported, negative errno value if not.
61 mlx5_check_mprq_support(struct rte_eth_dev *dev)
63 struct mlx5_priv *priv = dev->data->dev_private;
65 if (priv->config.mprq.enabled &&
66 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
72 * Check whether Multi-Packet RQ is enabled for the Rx queue.
75 * Pointer to receive queue structure.
78 * 0 if disabled, otherwise enabled.
81 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
83 return rxq->strd_num_n > 0;
87 * Check whether Multi-Packet RQ is enabled for the device.
90 * Pointer to Ethernet device.
93 * 0 if disabled, otherwise enabled.
96 mlx5_mprq_enabled(struct rte_eth_dev *dev)
98 struct mlx5_priv *priv = dev->data->dev_private;
103 if (mlx5_check_mprq_support(dev) < 0)
105 /* All the configured queues should be enabled. */
106 for (i = 0; i < priv->rxqs_n; ++i) {
107 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
108 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
109 (rxq, struct mlx5_rxq_ctrl, rxq);
111 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
114 if (mlx5_rxq_mprq_enabled(rxq))
117 /* Multi-Packet RQ can't be partially configured. */
118 MLX5_ASSERT(n == 0 || n == n_ibv);
123 * Calculate the number of CQEs in CQ for the Rx queue.
126 * Pointer to receive queue structure.
129 * Number of CQEs in CQ.
132 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
135 unsigned int wqe_n = 1 << rxq_data->elts_n;
137 if (mlx5_rxq_mprq_enabled(rxq_data))
138 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
145 * Allocate RX queue elements for Multi-Packet RQ.
148 * Pointer to RX queue structure.
151 * 0 on success, a negative errno value otherwise and rte_errno is set.
154 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
156 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
157 unsigned int wqe_n = 1 << rxq->elts_n;
161 /* Iterate on segments. */
162 for (i = 0; i <= wqe_n; ++i) {
163 struct mlx5_mprq_buf *buf;
165 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
166 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
171 (*rxq->mprq_bufs)[i] = buf;
173 rxq->mprq_repl = buf;
176 "port %u MPRQ queue %u allocated and configured %u segments",
177 rxq->port_id, rxq->idx, wqe_n);
180 err = rte_errno; /* Save rte_errno before cleanup. */
182 for (i = 0; (i != wqe_n); ++i) {
183 if ((*rxq->mprq_bufs)[i] != NULL)
184 rte_mempool_put(rxq->mprq_mp,
185 (*rxq->mprq_bufs)[i]);
186 (*rxq->mprq_bufs)[i] = NULL;
188 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
189 rxq->port_id, rxq->idx);
190 rte_errno = err; /* Restore rte_errno. */
195 * Allocate RX queue elements for Single-Packet RQ.
198 * Pointer to RX queue structure.
201 * 0 on success, errno value on failure.
204 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
206 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
207 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
208 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
209 (1 << rxq_ctrl->rxq.elts_n);
213 /* Iterate on segments. */
214 for (i = 0; (i != elts_n); ++i) {
215 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
216 struct rte_mbuf *buf;
218 buf = rte_pktmbuf_alloc(seg->mp);
220 DRV_LOG(ERR, "port %u empty mbuf pool",
221 PORT_ID(rxq_ctrl->priv));
225 /* Headroom is reserved by rte_pktmbuf_alloc(). */
226 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
227 /* Buffer is supposed to be empty. */
228 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
229 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
230 MLX5_ASSERT(!buf->next);
231 SET_DATA_OFF(buf, seg->offset);
232 PORT(buf) = rxq_ctrl->rxq.port_id;
233 DATA_LEN(buf) = seg->length;
234 PKT_LEN(buf) = seg->length;
236 (*rxq_ctrl->rxq.elts)[i] = buf;
238 /* If Rx vector is activated. */
239 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
240 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
241 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
242 struct rte_pktmbuf_pool_private *priv =
243 (struct rte_pktmbuf_pool_private *)
244 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
247 /* Initialize default rearm_data for vPMD. */
248 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
249 rte_mbuf_refcnt_set(mbuf_init, 1);
250 mbuf_init->nb_segs = 1;
251 mbuf_init->port = rxq->port_id;
252 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
253 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
255 * prevent compiler reordering:
256 * rearm_data covers previous fields.
258 rte_compiler_barrier();
259 rxq->mbuf_initializer =
260 *(rte_xmm_t *)&mbuf_init->rearm_data;
261 /* Padding with a fake mbuf for vectorized Rx. */
262 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
263 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
266 "port %u SPRQ queue %u allocated and configured %u segments"
268 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
269 elts_n / (1 << rxq_ctrl->rxq.sges_n));
272 err = rte_errno; /* Save rte_errno before cleanup. */
274 for (i = 0; (i != elts_n); ++i) {
275 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
276 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
277 (*rxq_ctrl->rxq.elts)[i] = NULL;
279 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
280 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
281 rte_errno = err; /* Restore rte_errno. */
286 * Allocate RX queue elements.
289 * Pointer to RX queue structure.
292 * 0 on success, errno value on failure.
295 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
300 * For MPRQ we need to allocate both MPRQ buffers
301 * for WQEs and simple mbufs for vector processing.
303 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
304 ret = rxq_alloc_elts_mprq(rxq_ctrl);
305 return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
309 * Free RX queue elements for Multi-Packet RQ.
312 * Pointer to RX queue structure.
315 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
317 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
320 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
321 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
322 if (rxq->mprq_bufs == NULL)
324 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
325 if ((*rxq->mprq_bufs)[i] != NULL)
326 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
327 (*rxq->mprq_bufs)[i] = NULL;
329 if (rxq->mprq_repl != NULL) {
330 mlx5_mprq_buf_free(rxq->mprq_repl);
331 rxq->mprq_repl = NULL;
336 * Free RX queue elements for Single-Packet RQ.
339 * Pointer to RX queue structure.
342 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
344 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
345 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
346 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
348 const uint16_t q_mask = q_n - 1;
349 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
352 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
353 PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
354 if (rxq->elts == NULL)
357 * Some mbuf in the Ring belongs to the application.
358 * They cannot be freed.
360 if (mlx5_rxq_check_vec_support(rxq) > 0) {
361 for (i = 0; i < used; ++i)
362 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
363 rxq->rq_pi = rxq->rq_ci;
365 for (i = 0; i != q_n; ++i) {
366 if ((*rxq->elts)[i] != NULL)
367 rte_pktmbuf_free_seg((*rxq->elts)[i]);
368 (*rxq->elts)[i] = NULL;
373 * Free RX queue elements.
376 * Pointer to RX queue structure.
379 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
382 * For MPRQ we need to allocate both MPRQ buffers
383 * for WQEs and simple mbufs for vector processing.
385 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
386 rxq_free_elts_mprq(rxq_ctrl);
387 rxq_free_elts_sprq(rxq_ctrl);
391 * Returns the per-queue supported offloads.
394 * Pointer to Ethernet device.
397 * Supported Rx offloads.
400 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
402 struct mlx5_priv *priv = dev->data->dev_private;
403 struct mlx5_dev_config *config = &priv->config;
404 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
405 RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
406 DEV_RX_OFFLOAD_TIMESTAMP |
407 DEV_RX_OFFLOAD_JUMBO_FRAME |
408 DEV_RX_OFFLOAD_RSS_HASH);
410 if (config->hw_fcs_strip)
411 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
414 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
415 DEV_RX_OFFLOAD_UDP_CKSUM |
416 DEV_RX_OFFLOAD_TCP_CKSUM);
417 if (config->hw_vlan_strip)
418 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
419 if (MLX5_LRO_SUPPORTED(dev))
420 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
426 * Returns the per-port supported offloads.
429 * Supported Rx offloads.
432 mlx5_get_rx_port_offloads(void)
434 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
440 * Verify if the queue can be released.
443 * Pointer to Ethernet device.
448 * 1 if the queue can be released
449 * 0 if the queue can not be released, there are references to it.
450 * Negative errno and rte_errno is set if queue doesn't exist.
453 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
455 struct mlx5_priv *priv = dev->data->dev_private;
456 struct mlx5_rxq_ctrl *rxq_ctrl;
458 if (!(*priv->rxqs)[idx]) {
462 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
463 return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
467 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
469 rxq_sync_cq(struct mlx5_rxq_data *rxq)
471 const uint16_t cqe_n = 1 << rxq->cqe_n;
472 const uint16_t cqe_mask = cqe_n - 1;
473 volatile struct mlx5_cqe *cqe;
478 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
479 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
480 if (ret == MLX5_CQE_STATUS_HW_OWN)
482 if (ret == MLX5_CQE_STATUS_ERR) {
486 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
487 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
491 /* Compute the next non compressed CQE. */
492 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
495 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
496 for (i = 0; i < cqe_n; i++) {
497 cqe = &(*rxq->cqes)[i];
498 cqe->op_own = MLX5_CQE_INVALIDATE;
500 /* Resync CQE and WQE (WQ in RESET state). */
502 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
504 *rxq->rq_db = rte_cpu_to_be_32(0);
509 * Rx queue stop. Device queue goes to the RESET state,
510 * all involved mbufs are freed from WQ.
513 * Pointer to Ethernet device structure.
518 * 0 on success, a negative errno value otherwise and rte_errno is set.
521 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
523 struct mlx5_priv *priv = dev->data->dev_private;
524 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
525 struct mlx5_rxq_ctrl *rxq_ctrl =
526 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
529 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
530 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
532 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
537 /* Remove all processes CQEs. */
539 /* Free all involved mbufs. */
540 rxq_free_elts(rxq_ctrl);
541 /* Set the actual queue state. */
542 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
547 * Rx queue stop. Device queue goes to the RESET state,
548 * all involved mbufs are freed from WQ.
551 * Pointer to Ethernet device structure.
556 * 0 on success, a negative errno value otherwise and rte_errno is set.
559 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
561 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
564 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
565 DRV_LOG(ERR, "Hairpin queue can't be stopped");
569 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
572 * Vectorized Rx burst requires the CQ and RQ indices
573 * synchronized, that might be broken on RQ restart
574 * and cause Rx malfunction, so queue stopping is
575 * not supported if vectorized Rx burst is engaged.
576 * The routine pointer depends on the process
577 * type, should perform check there.
579 if (pkt_burst == mlx5_rx_burst_vec) {
580 DRV_LOG(ERR, "Rx queue stop is not supported "
581 "for vectorized Rx");
585 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
586 ret = mlx5_mp_os_req_queue_control(dev, idx,
587 MLX5_MP_REQ_QUEUE_RX_STOP);
589 ret = mlx5_rx_queue_stop_primary(dev, idx);
595 * Rx queue start. Device queue goes to the ready state,
596 * all required mbufs are allocated and WQ is replenished.
599 * Pointer to Ethernet device structure.
604 * 0 on success, a negative errno value otherwise and rte_errno is set.
607 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
609 struct mlx5_priv *priv = dev->data->dev_private;
610 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
611 struct mlx5_rxq_ctrl *rxq_ctrl =
612 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
615 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
616 /* Allocate needed buffers. */
617 ret = rxq_alloc_elts(rxq_ctrl);
619 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
624 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
626 /* Reset RQ consumer before moving queue ro READY state. */
627 *rxq->rq_db = rte_cpu_to_be_32(0);
629 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
631 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
636 /* Reinitialize RQ - set WQEs. */
637 mlx5_rxq_initialize(rxq);
638 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
639 /* Set actual queue state. */
640 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
645 * Rx queue start. Device queue goes to the ready state,
646 * all required mbufs are allocated and WQ is replenished.
649 * Pointer to Ethernet device structure.
654 * 0 on success, a negative errno value otherwise and rte_errno is set.
657 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
661 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
662 DRV_LOG(ERR, "Hairpin queue can't be started");
666 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
668 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
669 ret = mlx5_mp_os_req_queue_control(dev, idx,
670 MLX5_MP_REQ_QUEUE_RX_START);
672 ret = mlx5_rx_queue_start_primary(dev, idx);
678 * Rx queue presetup checks.
681 * Pointer to Ethernet device structure.
685 * Number of descriptors to configure in queue.
688 * 0 on success, a negative errno value otherwise and rte_errno is set.
691 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
693 struct mlx5_priv *priv = dev->data->dev_private;
695 if (!rte_is_power_of_2(*desc)) {
696 *desc = 1 << log2above(*desc);
698 "port %u increased number of descriptors in Rx queue %u"
699 " to the next power of two (%d)",
700 dev->data->port_id, idx, *desc);
702 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
703 dev->data->port_id, idx, *desc);
704 if (idx >= priv->rxqs_n) {
705 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
706 dev->data->port_id, idx, priv->rxqs_n);
707 rte_errno = EOVERFLOW;
710 if (!mlx5_rxq_releasable(dev, idx)) {
711 DRV_LOG(ERR, "port %u unable to release queue index %u",
712 dev->data->port_id, idx);
716 mlx5_rxq_release(dev, idx);
723 * Pointer to Ethernet device structure.
727 * Number of descriptors to configure in queue.
729 * NUMA socket on which memory must be allocated.
731 * Thresholds parameters.
733 * Memory pool for buffer allocations.
736 * 0 on success, a negative errno value otherwise and rte_errno is set.
739 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
740 unsigned int socket, const struct rte_eth_rxconf *conf,
741 struct rte_mempool *mp)
743 struct mlx5_priv *priv = dev->data->dev_private;
744 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
745 struct mlx5_rxq_ctrl *rxq_ctrl =
746 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
747 struct rte_eth_rxseg_split *rx_seg =
748 (struct rte_eth_rxseg_split *)conf->rx_seg;
749 struct rte_eth_rxseg_split rx_single = {.mp = mp};
750 uint16_t n_seg = conf->rx_nseg;
755 * The parameters should be checked on rte_eth_dev layer.
756 * If mp is specified it means the compatible configuration
757 * without buffer split feature tuning.
763 uint64_t offloads = conf->offloads |
764 dev->data->dev_conf.rxmode.offloads;
766 /* The offloads should be checked on rte_eth_dev layer. */
767 MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
768 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
769 DRV_LOG(ERR, "port %u queue index %u split "
770 "offload not configured",
771 dev->data->port_id, idx);
775 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
777 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
780 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
782 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
783 dev->data->port_id, idx);
787 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
788 dev->data->port_id, idx);
789 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
796 * Pointer to Ethernet device structure.
800 * Number of descriptors to configure in queue.
801 * @param hairpin_conf
802 * Hairpin configuration parameters.
805 * 0 on success, a negative errno value otherwise and rte_errno is set.
808 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
810 const struct rte_eth_hairpin_conf *hairpin_conf)
812 struct mlx5_priv *priv = dev->data->dev_private;
813 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
814 struct mlx5_rxq_ctrl *rxq_ctrl =
815 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
818 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
821 if (hairpin_conf->peer_count != 1 ||
822 hairpin_conf->peers[0].port != dev->data->port_id ||
823 hairpin_conf->peers[0].queue >= priv->txqs_n) {
824 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
825 " invalid hairpind configuration", dev->data->port_id,
830 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
832 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
833 dev->data->port_id, idx);
837 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
838 dev->data->port_id, idx);
839 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
844 * DPDK callback to release a RX queue.
847 * Generic RX queue pointer.
850 mlx5_rx_queue_release(void *dpdk_rxq)
852 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
853 struct mlx5_rxq_ctrl *rxq_ctrl;
854 struct mlx5_priv *priv;
858 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
859 priv = rxq_ctrl->priv;
860 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
861 rte_panic("port %u Rx queue %u is still used by a flow and"
862 " cannot be removed\n",
863 PORT_ID(priv), rxq->idx);
864 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
868 * Allocate queue vector and fill epoll fd list for Rx interrupts.
871 * Pointer to Ethernet device.
874 * 0 on success, a negative errno value otherwise and rte_errno is set.
877 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
879 struct mlx5_priv *priv = dev->data->dev_private;
881 unsigned int rxqs_n = priv->rxqs_n;
882 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
883 unsigned int count = 0;
884 struct rte_intr_handle *intr_handle = dev->intr_handle;
886 if (!dev->data->dev_conf.intr_conf.rxq)
888 mlx5_rx_intr_vec_disable(dev);
889 intr_handle->intr_vec = mlx5_malloc(0,
890 n * sizeof(intr_handle->intr_vec[0]),
892 if (intr_handle->intr_vec == NULL) {
894 "port %u failed to allocate memory for interrupt"
895 " vector, Rx interrupts will not be supported",
900 intr_handle->type = RTE_INTR_HANDLE_EXT;
901 for (i = 0; i != n; ++i) {
902 /* This rxq obj must not be released in this function. */
903 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
904 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
907 /* Skip queues that cannot request interrupts. */
908 if (!rxq_obj || (!rxq_obj->ibv_channel &&
909 !rxq_obj->devx_channel)) {
910 /* Use invalid intr_vec[] index to disable entry. */
911 intr_handle->intr_vec[i] =
912 RTE_INTR_VEC_RXTX_OFFSET +
913 RTE_MAX_RXTX_INTR_VEC_ID;
914 /* Decrease the rxq_ctrl's refcnt */
916 mlx5_rxq_release(dev, i);
919 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
921 "port %u too many Rx queues for interrupt"
922 " vector size (%d), Rx interrupts cannot be"
924 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
925 mlx5_rx_intr_vec_disable(dev);
929 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
933 "port %u failed to make Rx interrupt file"
934 " descriptor %d non-blocking for queue index"
936 dev->data->port_id, rxq_obj->fd, i);
937 mlx5_rx_intr_vec_disable(dev);
940 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
941 intr_handle->efds[count] = rxq_obj->fd;
945 mlx5_rx_intr_vec_disable(dev);
947 intr_handle->nb_efd = count;
952 * Clean up Rx interrupts handler.
955 * Pointer to Ethernet device.
958 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
960 struct mlx5_priv *priv = dev->data->dev_private;
961 struct rte_intr_handle *intr_handle = dev->intr_handle;
963 unsigned int rxqs_n = priv->rxqs_n;
964 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
966 if (!dev->data->dev_conf.intr_conf.rxq)
968 if (!intr_handle->intr_vec)
970 for (i = 0; i != n; ++i) {
971 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
972 RTE_MAX_RXTX_INTR_VEC_ID)
975 * Need to access directly the queue to release the reference
976 * kept in mlx5_rx_intr_vec_enable().
978 mlx5_rxq_release(dev, i);
981 rte_intr_free_epoll_fd(intr_handle);
982 if (intr_handle->intr_vec)
983 mlx5_free(intr_handle->intr_vec);
984 intr_handle->nb_efd = 0;
985 intr_handle->intr_vec = NULL;
989 * MLX5 CQ notification .
992 * Pointer to receive queue structure.
994 * Sequence number per receive queue .
997 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1000 uint32_t doorbell_hi;
1002 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
1004 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1005 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1006 doorbell = (uint64_t)doorbell_hi << 32;
1007 doorbell |= rxq->cqn;
1008 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1009 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
1010 cq_db_reg, rxq->uar_lock_cq);
1014 * DPDK callback for Rx queue interrupt enable.
1017 * Pointer to Ethernet device structure.
1018 * @param rx_queue_id
1022 * 0 on success, a negative errno value otherwise and rte_errno is set.
1025 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1027 struct mlx5_rxq_ctrl *rxq_ctrl;
1029 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1032 if (rxq_ctrl->irq) {
1033 if (!rxq_ctrl->obj) {
1034 mlx5_rxq_release(dev, rx_queue_id);
1037 mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
1039 mlx5_rxq_release(dev, rx_queue_id);
1047 * DPDK callback for Rx queue interrupt disable.
1050 * Pointer to Ethernet device structure.
1051 * @param rx_queue_id
1055 * 0 on success, a negative errno value otherwise and rte_errno is set.
1058 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1060 struct mlx5_priv *priv = dev->data->dev_private;
1061 struct mlx5_rxq_ctrl *rxq_ctrl;
1064 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1071 if (rxq_ctrl->irq) {
1072 ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
1075 rxq_ctrl->rxq.cq_arm_sn++;
1077 mlx5_rxq_release(dev, rx_queue_id);
1081 * The ret variable may be EAGAIN which means the get_event function was
1082 * called before receiving one.
1088 ret = rte_errno; /* Save rte_errno before cleanup. */
1089 mlx5_rxq_release(dev, rx_queue_id);
1091 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1092 dev->data->port_id, rx_queue_id);
1093 rte_errno = ret; /* Restore rte_errno. */
1098 * Verify the Rx queue objects list is empty
1101 * Pointer to Ethernet device.
1104 * The number of objects not released.
1107 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1109 struct mlx5_priv *priv = dev->data->dev_private;
1111 struct mlx5_rxq_obj *rxq_obj;
1113 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1114 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1115 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1122 * Callback function to initialize mbufs for Multi-Packet RQ.
1125 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1126 void *_m, unsigned int i __rte_unused)
1128 struct mlx5_mprq_buf *buf = _m;
1129 struct rte_mbuf_ext_shared_info *shinfo;
1130 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1133 memset(_m, 0, sizeof(*buf));
1135 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1136 for (j = 0; j != strd_n; ++j) {
1137 shinfo = &buf->shinfos[j];
1138 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1139 shinfo->fcb_opaque = buf;
1144 * Free mempool of Multi-Packet RQ.
1147 * Pointer to Ethernet device.
1150 * 0 on success, negative errno value on failure.
1153 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1155 struct mlx5_priv *priv = dev->data->dev_private;
1156 struct rte_mempool *mp = priv->mprq_mp;
1161 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1162 dev->data->port_id, mp->name);
1164 * If a buffer in the pool has been externally attached to a mbuf and it
1165 * is still in use by application, destroying the Rx queue can spoil
1166 * the packet. It is unlikely to happen but if application dynamically
1167 * creates and destroys with holding Rx packets, this can happen.
1169 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1170 * RQ isn't provided by application but managed by PMD.
1172 if (!rte_mempool_full(mp)) {
1174 "port %u mempool for Multi-Packet RQ is still in use",
1175 dev->data->port_id);
1179 rte_mempool_free(mp);
1180 /* Unset mempool for each Rx queue. */
1181 for (i = 0; i != priv->rxqs_n; ++i) {
1182 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1186 rxq->mprq_mp = NULL;
1188 priv->mprq_mp = NULL;
1193 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1194 * mempool. If already allocated, reuse it if there're enough elements.
1195 * Otherwise, resize it.
1198 * Pointer to Ethernet device.
1201 * 0 on success, negative errno value on failure.
1204 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1206 struct mlx5_priv *priv = dev->data->dev_private;
1207 struct rte_mempool *mp = priv->mprq_mp;
1208 char name[RTE_MEMPOOL_NAMESIZE];
1209 unsigned int desc = 0;
1210 unsigned int buf_len;
1211 unsigned int obj_num;
1212 unsigned int obj_size;
1213 unsigned int strd_num_n = 0;
1214 unsigned int strd_sz_n = 0;
1216 unsigned int n_ibv = 0;
1218 if (!mlx5_mprq_enabled(dev))
1220 /* Count the total number of descriptors configured. */
1221 for (i = 0; i != priv->rxqs_n; ++i) {
1222 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1223 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1224 (rxq, struct mlx5_rxq_ctrl, rxq);
1226 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1229 desc += 1 << rxq->elts_n;
1230 /* Get the max number of strides. */
1231 if (strd_num_n < rxq->strd_num_n)
1232 strd_num_n = rxq->strd_num_n;
1233 /* Get the max size of a stride. */
1234 if (strd_sz_n < rxq->strd_sz_n)
1235 strd_sz_n = rxq->strd_sz_n;
1237 MLX5_ASSERT(strd_num_n && strd_sz_n);
1238 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1239 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1240 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1242 * Received packets can be either memcpy'd or externally referenced. In
1243 * case that the packet is attached to an mbuf as an external buffer, as
1244 * it isn't possible to predict how the buffers will be queued by
1245 * application, there's no option to exactly pre-allocate needed buffers
1246 * in advance but to speculatively prepares enough buffers.
1248 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1249 * received packets to buffers provided by application (rxq->mp) until
1250 * this Mempool gets available again.
1253 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1255 * rte_mempool_create_empty() has sanity check to refuse large cache
1256 * size compared to the number of elements.
1257 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1258 * constant number 2 instead.
1260 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1261 /* Check a mempool is already allocated and if it can be resued. */
1262 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1263 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1264 dev->data->port_id, mp->name);
1267 } else if (mp != NULL) {
1268 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1269 dev->data->port_id, mp->name);
1271 * If failed to free, which means it may be still in use, no way
1272 * but to keep using the existing one. On buffer underrun,
1273 * packets will be memcpy'd instead of external buffer
1276 if (mlx5_mprq_free_mp(dev)) {
1277 if (mp->elt_size >= obj_size)
1283 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1284 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1285 0, NULL, NULL, mlx5_mprq_buf_init,
1286 (void *)(uintptr_t)(1 << strd_num_n),
1287 dev->device->numa_node, 0);
1290 "port %u failed to allocate a mempool for"
1291 " Multi-Packet RQ, count=%u, size=%u",
1292 dev->data->port_id, obj_num, obj_size);
1298 /* Set mempool for each Rx queue. */
1299 for (i = 0; i != priv->rxqs_n; ++i) {
1300 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1301 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1302 (rxq, struct mlx5_rxq_ctrl, rxq);
1304 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1308 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1309 dev->data->port_id);
1313 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1314 sizeof(struct rte_vlan_hdr) * 2 + \
1315 sizeof(struct rte_ipv6_hdr)))
1316 #define MAX_TCP_OPTION_SIZE 40u
1317 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1318 sizeof(struct rte_tcp_hdr) + \
1319 MAX_TCP_OPTION_SIZE))
1322 * Adjust the maximum LRO massage size.
1325 * Pointer to Ethernet device.
1328 * @param max_lro_size
1329 * The maximum size for LRO packet.
1332 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1333 uint32_t max_lro_size)
1335 struct mlx5_priv *priv = dev->data->dev_private;
1337 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1338 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1339 MLX5_MAX_TCP_HDR_OFFSET)
1340 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1341 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1342 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1343 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1344 if (priv->max_lro_msg_size)
1345 priv->max_lro_msg_size =
1346 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1348 priv->max_lro_msg_size = max_lro_size;
1350 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1351 dev->data->port_id, idx,
1352 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1356 * Create a DPDK Rx queue.
1359 * Pointer to Ethernet device.
1363 * Number of descriptors to configure in queue.
1365 * NUMA socket on which memory must be allocated.
1368 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1370 struct mlx5_rxq_ctrl *
1371 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1372 unsigned int socket, const struct rte_eth_rxconf *conf,
1373 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1375 struct mlx5_priv *priv = dev->data->dev_private;
1376 struct mlx5_rxq_ctrl *tmpl;
1377 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1378 struct mlx5_dev_config *config = &priv->config;
1379 uint64_t offloads = conf->offloads |
1380 dev->data->dev_conf.rxmode.offloads;
1381 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1382 unsigned int max_rx_pkt_len = lro_on_queue ?
1383 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1384 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1385 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1386 RTE_PKTMBUF_HEADROOM;
1387 unsigned int max_lro_size = 0;
1388 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1389 const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
1390 !rx_seg[0].offset && !rx_seg[0].length;
1391 unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1392 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1393 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1394 (1U << config->mprq.max_stride_size_n) ?
1395 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1396 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1397 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1398 (config->mprq.stride_size_n ?
1399 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1401 * Always allocate extra slots, even if eventually
1402 * the vector Rx will not be used.
1404 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1405 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1406 unsigned int tail_len;
1408 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1409 desc_n * sizeof(struct rte_mbuf *), 0, socket);
1414 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1416 * Build the array of actual buffer offsets and lengths.
1417 * Pad with the buffers from the last memory pool if
1418 * needed to handle max size packets, replace zero length
1419 * with the buffer length from the pool.
1421 tail_len = max_rx_pkt_len;
1423 struct mlx5_eth_rxseg *hw_seg =
1424 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1425 uint32_t buf_len, offset, seg_len;
1428 * For the buffers beyond descriptions offset is zero,
1429 * the first buffer contains head room.
1431 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1432 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1433 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1435 * For the buffers beyond descriptions the length is
1436 * pool buffer length, zero lengths are replaced with
1437 * pool buffer length either.
1439 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1443 /* Check is done in long int, now overflows. */
1444 if (buf_len < seg_len + offset) {
1445 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1446 "%u/%u can't be satisfied",
1447 dev->data->port_id, idx,
1448 qs_seg->length, qs_seg->offset);
1452 if (seg_len > tail_len)
1453 seg_len = buf_len - offset;
1454 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1456 "port %u too many SGEs (%u) needed to handle"
1457 " requested maximum packet size %u, the maximum"
1458 " supported are %u", dev->data->port_id,
1459 tmpl->rxq.rxseg_n, max_rx_pkt_len,
1461 rte_errno = ENOTSUP;
1464 /* Build the actual scattering element in the queue object. */
1465 hw_seg->mp = qs_seg->mp;
1466 MLX5_ASSERT(offset <= UINT16_MAX);
1467 MLX5_ASSERT(seg_len <= UINT16_MAX);
1468 hw_seg->offset = (uint16_t)offset;
1469 hw_seg->length = (uint16_t)seg_len;
1471 * Advance the segment descriptor, the padding is the based
1472 * on the attributes of the last descriptor.
1474 if (tmpl->rxq.rxseg_n < n_seg)
1476 tail_len -= RTE_MIN(tail_len, seg_len);
1477 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1478 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1479 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1480 if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
1481 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1482 " configured and no enough mbuf space(%u) to contain "
1483 "the maximum RX packet length(%u) with head-room(%u)",
1484 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1485 RTE_PKTMBUF_HEADROOM);
1489 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1490 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1491 MLX5_MR_BTREE_CACHE_N, socket)) {
1492 /* rte_errno is already set. */
1495 tmpl->socket = socket;
1496 if (dev->data->dev_conf.intr_conf.rxq)
1499 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1500 * following conditions are met:
1501 * - MPRQ is enabled.
1502 * - The number of descs is more than the number of strides.
1503 * - max_rx_pkt_len plus overhead is less than the max size
1504 * of a stride or mprq_stride_size is specified by a user.
1505 * Need to make sure that there are enough strides to encap
1506 * the maximum packet size in case mprq_stride_size is set.
1507 * Otherwise, enable Rx scatter if necessary.
1509 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1510 (non_scatter_min_mbuf_size <=
1511 (1U << config->mprq.max_stride_size_n) ||
1512 (config->mprq.stride_size_n &&
1513 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1514 /* TODO: Rx scatter isn't supported yet. */
1515 tmpl->rxq.sges_n = 0;
1516 /* Trim the number of descs needed. */
1517 desc >>= mprq_stride_nums;
1518 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1519 config->mprq.stride_num_n : mprq_stride_nums;
1520 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1521 config->mprq.stride_size_n : mprq_stride_size;
1522 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1523 tmpl->rxq.strd_scatter_en =
1524 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1525 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1526 config->mprq.max_memcpy_len);
1527 max_lro_size = RTE_MIN(max_rx_pkt_len,
1528 (1u << tmpl->rxq.strd_num_n) *
1529 (1u << tmpl->rxq.strd_sz_n));
1531 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1532 " strd_num_n = %u, strd_sz_n = %u",
1533 dev->data->port_id, idx,
1534 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1535 } else if (tmpl->rxq.rxseg_n == 1) {
1536 MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
1537 tmpl->rxq.sges_n = 0;
1538 max_lro_size = max_rx_pkt_len;
1539 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1540 unsigned int sges_n;
1542 if (lro_on_queue && first_mb_free_size <
1543 MLX5_MAX_LRO_HEADER_FIX) {
1544 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1545 " to include the max header size(%u) for LRO",
1546 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1547 rte_errno = ENOTSUP;
1551 * Determine the number of SGEs needed for a full packet
1552 * and round it to the next power of two.
1554 sges_n = log2above(tmpl->rxq.rxseg_n);
1555 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1557 "port %u too many SGEs (%u) needed to handle"
1558 " requested maximum packet size %u, the maximum"
1559 " supported are %u", dev->data->port_id,
1560 1 << sges_n, max_rx_pkt_len,
1561 1u << MLX5_MAX_LOG_RQ_SEGS);
1562 rte_errno = ENOTSUP;
1565 tmpl->rxq.sges_n = sges_n;
1566 max_lro_size = max_rx_pkt_len;
1568 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1570 "port %u MPRQ is requested but cannot be enabled\n"
1571 " (requested: pkt_sz = %u, desc_num = %u,"
1572 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1573 " supported: min_rxqs_num = %u,"
1574 " min_stride_sz = %u, max_stride_sz = %u).",
1575 dev->data->port_id, non_scatter_min_mbuf_size,
1577 config->mprq.stride_size_n ?
1578 (1U << config->mprq.stride_size_n) :
1579 (1U << mprq_stride_size),
1580 config->mprq.stride_num_n ?
1581 (1U << config->mprq.stride_num_n) :
1582 (1U << mprq_stride_nums),
1583 config->mprq.min_rxqs_num,
1584 (1U << config->mprq.min_stride_size_n),
1585 (1U << config->mprq.max_stride_size_n));
1586 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1587 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1588 if (desc % (1 << tmpl->rxq.sges_n)) {
1590 "port %u number of Rx queue descriptors (%u) is not a"
1591 " multiple of SGEs per packet (%u)",
1594 1 << tmpl->rxq.sges_n);
1598 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1599 /* Toggle RX checksum offload if hardware supports it. */
1600 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1601 /* Configure Rx timestamp. */
1602 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1603 tmpl->rxq.timestamp_rx_flag = 0;
1604 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1605 &tmpl->rxq.timestamp_offset,
1606 &tmpl->rxq.timestamp_rx_flag) != 0) {
1607 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1610 /* Configure VLAN stripping. */
1611 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1612 /* By default, FCS (CRC) is stripped by hardware. */
1613 tmpl->rxq.crc_present = 0;
1614 tmpl->rxq.lro = lro_on_queue;
1615 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1616 if (config->hw_fcs_strip) {
1618 * RQs used for LRO-enabled TIRs should not be
1619 * configured to scatter the FCS.
1623 "port %u CRC stripping has been "
1624 "disabled but will still be performed "
1625 "by hardware, because LRO is enabled",
1626 dev->data->port_id);
1628 tmpl->rxq.crc_present = 1;
1631 "port %u CRC stripping has been disabled but will"
1632 " still be performed by hardware, make sure MLNX_OFED"
1633 " and firmware are up to date",
1634 dev->data->port_id);
1638 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1639 " incoming frames to hide it",
1641 tmpl->rxq.crc_present ? "disabled" : "enabled",
1642 tmpl->rxq.crc_present << 2);
1644 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1645 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1646 tmpl->rxq.port_id = dev->data->port_id;
1648 tmpl->rxq.mp = rx_seg[0].mp;
1649 tmpl->rxq.elts_n = log2above(desc);
1650 tmpl->rxq.rq_repl_thresh =
1651 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1653 (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1654 tmpl->rxq.mprq_bufs =
1655 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1657 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1659 tmpl->rxq.idx = idx;
1660 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1661 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1669 * Create a DPDK Rx hairpin queue.
1672 * Pointer to Ethernet device.
1676 * Number of descriptors to configure in queue.
1677 * @param hairpin_conf
1678 * The hairpin binding configuration.
1681 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1683 struct mlx5_rxq_ctrl *
1684 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1685 const struct rte_eth_hairpin_conf *hairpin_conf)
1687 struct mlx5_priv *priv = dev->data->dev_private;
1688 struct mlx5_rxq_ctrl *tmpl;
1690 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1696 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1697 tmpl->socket = SOCKET_ID_ANY;
1698 tmpl->rxq.rss_hash = 0;
1699 tmpl->rxq.port_id = dev->data->port_id;
1701 tmpl->rxq.mp = NULL;
1702 tmpl->rxq.elts_n = log2above(desc);
1703 tmpl->rxq.elts = NULL;
1704 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1705 tmpl->hairpin_conf = *hairpin_conf;
1706 tmpl->rxq.idx = idx;
1707 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1708 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1716 * Pointer to Ethernet device.
1721 * A pointer to the queue if it exists, NULL otherwise.
1723 struct mlx5_rxq_ctrl *
1724 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1726 struct mlx5_priv *priv = dev->data->dev_private;
1727 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1728 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1731 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1732 __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
1738 * Release a Rx queue.
1741 * Pointer to Ethernet device.
1746 * 1 while a reference on it exists, 0 when freed.
1749 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1751 struct mlx5_priv *priv = dev->data->dev_private;
1752 struct mlx5_rxq_ctrl *rxq_ctrl;
1754 if (!(*priv->rxqs)[idx])
1756 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1757 if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1759 if (rxq_ctrl->obj) {
1760 priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
1761 LIST_REMOVE(rxq_ctrl->obj, next);
1762 mlx5_free(rxq_ctrl->obj);
1763 rxq_ctrl->obj = NULL;
1765 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1766 rxq_free_elts(rxq_ctrl);
1767 if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1768 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1769 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1770 LIST_REMOVE(rxq_ctrl, next);
1771 mlx5_free(rxq_ctrl);
1772 (*priv->rxqs)[idx] = NULL;
1778 * Verify the Rx Queue list is empty
1781 * Pointer to Ethernet device.
1784 * The number of object not released.
1787 mlx5_rxq_verify(struct rte_eth_dev *dev)
1789 struct mlx5_priv *priv = dev->data->dev_private;
1790 struct mlx5_rxq_ctrl *rxq_ctrl;
1793 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1794 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1795 dev->data->port_id, rxq_ctrl->rxq.idx);
1802 * Get a Rx queue type.
1805 * Pointer to Ethernet device.
1810 * The Rx queue type.
1813 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1815 struct mlx5_priv *priv = dev->data->dev_private;
1816 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1818 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1819 rxq_ctrl = container_of((*priv->rxqs)[idx],
1820 struct mlx5_rxq_ctrl,
1822 return rxq_ctrl->type;
1824 return MLX5_RXQ_TYPE_UNDEFINED;
1828 * Match queues listed in arguments to queues contained in indirection table
1832 * Pointer to indirection table to match.
1834 * Queues to match to ques in indirection table.
1836 * Number of queues in the array.
1839 * 1 if all queues in indirection table match 0 othrwise.
1842 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
1843 const uint16_t *queues, uint32_t queues_n)
1845 return (ind_tbl->queues_n == queues_n) &&
1846 (!memcmp(ind_tbl->queues, queues,
1847 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
1851 * Get an indirection table.
1854 * Pointer to Ethernet device.
1856 * Queues entering in the indirection table.
1858 * Number of queues in the array.
1861 * An indirection table if found.
1863 struct mlx5_ind_table_obj *
1864 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1867 struct mlx5_priv *priv = dev->data->dev_private;
1868 struct mlx5_ind_table_obj *ind_tbl;
1870 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1871 if ((ind_tbl->queues_n == queues_n) &&
1872 (memcmp(ind_tbl->queues, queues,
1873 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1880 rte_atomic32_inc(&ind_tbl->refcnt);
1881 for (i = 0; i != ind_tbl->queues_n; ++i)
1882 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1888 * Release an indirection table.
1891 * Pointer to Ethernet device.
1893 * Indirection table to release.
1896 * 1 while a reference on it exists, 0 when freed.
1899 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1900 struct mlx5_ind_table_obj *ind_tbl)
1902 struct mlx5_priv *priv = dev->data->dev_private;
1905 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1906 priv->obj_ops.ind_table_destroy(ind_tbl);
1907 for (i = 0; i != ind_tbl->queues_n; ++i)
1908 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1909 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1910 LIST_REMOVE(ind_tbl, next);
1918 * Verify the Rx Queue list is empty
1921 * Pointer to Ethernet device.
1924 * The number of object not released.
1927 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1929 struct mlx5_priv *priv = dev->data->dev_private;
1930 struct mlx5_ind_table_obj *ind_tbl;
1933 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1935 "port %u indirection table obj %p still referenced",
1936 dev->data->port_id, (void *)ind_tbl);
1943 * Create an indirection table.
1946 * Pointer to Ethernet device.
1948 * Queues entering in the indirection table.
1950 * Number of queues in the array.
1953 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
1955 static struct mlx5_ind_table_obj *
1956 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1959 struct mlx5_priv *priv = dev->data->dev_private;
1960 struct mlx5_ind_table_obj *ind_tbl;
1961 const unsigned int n = rte_is_power_of_2(queues_n) ?
1962 log2above(queues_n) :
1963 log2above(priv->config.ind_table_max_size);
1967 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
1968 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
1973 ind_tbl->queues_n = queues_n;
1974 for (i = 0; i != queues_n; ++i) {
1975 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1978 ind_tbl->queues[i] = queues[i];
1980 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
1983 rte_atomic32_inc(&ind_tbl->refcnt);
1984 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1988 for (j = 0; j < i; j++)
1989 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1992 DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
1997 * Get an Rx Hash queue.
2000 * Pointer to Ethernet device.
2002 * RSS configuration for the Rx hash queue.
2004 * Queues entering in hash queue. In case of empty hash_fields only the
2005 * first queue index will be taken for the indirection table.
2010 * An hash Rx queue index on success.
2013 mlx5_hrxq_get(struct rte_eth_dev *dev,
2014 const uint8_t *rss_key, uint32_t rss_key_len,
2015 uint64_t hash_fields,
2016 const uint16_t *queues, uint32_t queues_n)
2018 struct mlx5_priv *priv = dev->data->dev_private;
2019 struct mlx5_hrxq *hrxq;
2022 queues_n = hash_fields ? queues_n : 1;
2023 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2025 struct mlx5_ind_table_obj *ind_tbl;
2029 if (hrxq->rss_key_len != rss_key_len)
2031 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2033 if (hrxq->hash_fields != hash_fields)
2035 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2038 if (ind_tbl != hrxq->ind_table) {
2039 mlx5_ind_table_obj_release(dev, ind_tbl);
2042 rte_atomic32_inc(&hrxq->refcnt);
2049 * Modify an Rx Hash queue configuration.
2052 * Pointer to Ethernet device.
2054 * Index to Hash Rx queue to modify.
2056 * RSS key for the Rx hash queue.
2057 * @param rss_key_len
2059 * @param hash_fields
2060 * Verbs protocol hash field to make the RSS on.
2062 * Queues entering in hash queue. In case of empty hash_fields only the
2063 * first queue index will be taken for the indirection table.
2068 * 0 on success, a negative errno value otherwise and rte_errno is set.
2071 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2072 const uint8_t *rss_key, uint32_t rss_key_len,
2073 uint64_t hash_fields,
2074 const uint16_t *queues, uint32_t queues_n)
2077 struct mlx5_ind_table_obj *ind_tbl = NULL;
2078 struct mlx5_priv *priv = dev->data->dev_private;
2079 struct mlx5_hrxq *hrxq =
2080 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2088 if (hrxq->rss_key_len != rss_key_len) {
2089 /* rss_key_len is fixed size 40 byte & not supposed to change */
2093 queues_n = hash_fields ? queues_n : 1;
2094 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2095 queues, queues_n)) {
2096 ind_tbl = hrxq->ind_table;
2098 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2100 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
2106 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2107 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2108 hash_fields, ind_tbl);
2113 if (ind_tbl != hrxq->ind_table) {
2114 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2115 hrxq->ind_table = ind_tbl;
2117 hrxq->hash_fields = hash_fields;
2118 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2122 if (ind_tbl != hrxq->ind_table)
2123 mlx5_ind_table_obj_release(dev, ind_tbl);
2129 * Release the hash Rx queue.
2132 * Pointer to Ethernet device.
2134 * Index to Hash Rx queue to release.
2137 * 1 while a reference on it exists, 0 when freed.
2140 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2142 struct mlx5_priv *priv = dev->data->dev_private;
2143 struct mlx5_hrxq *hrxq;
2145 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2148 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2149 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2150 mlx5_glue->destroy_flow_action(hrxq->action);
2152 priv->obj_ops.hrxq_destroy(hrxq);
2153 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2154 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
2155 hrxq_idx, hrxq, next);
2156 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2159 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2164 * Create an Rx Hash queue.
2167 * Pointer to Ethernet device.
2169 * RSS key for the Rx hash queue.
2170 * @param rss_key_len
2172 * @param hash_fields
2173 * Verbs protocol hash field to make the RSS on.
2175 * Queues entering in hash queue. In case of empty hash_fields only the
2176 * first queue index will be taken for the indirection table.
2182 * If true new object of Rx Hash queue will be used in shared action.
2185 * The DevX object initialized index, 0 otherwise and rte_errno is set.
2188 mlx5_hrxq_new(struct rte_eth_dev *dev,
2189 const uint8_t *rss_key, uint32_t rss_key_len,
2190 uint64_t hash_fields,
2191 const uint16_t *queues, uint32_t queues_n,
2192 int tunnel, bool shared)
2194 struct mlx5_priv *priv = dev->data->dev_private;
2195 struct mlx5_hrxq *hrxq = NULL;
2196 uint32_t hrxq_idx = 0;
2197 struct mlx5_ind_table_obj *ind_tbl;
2200 queues_n = hash_fields ? queues_n : 1;
2201 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2203 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
2208 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2211 hrxq->shared = !!shared;
2212 hrxq->ind_table = ind_tbl;
2213 hrxq->rss_key_len = rss_key_len;
2214 hrxq->hash_fields = hash_fields;
2215 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2216 ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
2221 rte_atomic32_inc(&hrxq->refcnt);
2222 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2226 ret = rte_errno; /* Save rte_errno before cleanup. */
2227 mlx5_ind_table_obj_release(dev, ind_tbl);
2229 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2230 rte_errno = ret; /* Restore rte_errno. */
2235 * Create a drop Rx Hash queue.
2238 * Pointer to Ethernet device.
2241 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2244 mlx5_drop_action_create(struct rte_eth_dev *dev)
2246 struct mlx5_priv *priv = dev->data->dev_private;
2247 struct mlx5_hrxq *hrxq = NULL;
2250 if (priv->drop_queue.hrxq) {
2251 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2252 return priv->drop_queue.hrxq;
2254 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2257 "Port %u cannot allocate memory for drop queue.",
2258 dev->data->port_id);
2262 priv->drop_queue.hrxq = hrxq;
2263 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2265 if (!hrxq->ind_table) {
2269 ret = priv->obj_ops.drop_action_create(dev);
2272 rte_atomic32_set(&hrxq->refcnt, 1);
2276 if (hrxq->ind_table)
2277 mlx5_free(hrxq->ind_table);
2278 priv->drop_queue.hrxq = NULL;
2285 * Release a drop hash Rx queue.
2288 * Pointer to Ethernet device.
2291 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2293 struct mlx5_priv *priv = dev->data->dev_private;
2294 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2296 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2297 priv->obj_ops.drop_action_destroy(dev);
2298 mlx5_free(priv->drop_queue.rxq);
2299 mlx5_free(hrxq->ind_table);
2301 priv->drop_queue.rxq = NULL;
2302 priv->drop_queue.hrxq = NULL;
2307 * Verify the Rx Queue list is empty
2310 * Pointer to Ethernet device.
2313 * The number of object not released.
2316 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2318 struct mlx5_priv *priv = dev->data->dev_private;
2319 struct mlx5_hrxq *hrxq;
2323 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2326 "port %u hash Rx queue %p still referenced",
2327 dev->data->port_id, (void *)hrxq);
2334 * Set the Rx queue timestamp conversion parameters
2337 * Pointer to the Ethernet device structure.
2340 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2342 struct mlx5_priv *priv = dev->data->dev_private;
2343 struct mlx5_dev_ctx_shared *sh = priv->sh;
2344 struct mlx5_rxq_data *data;
2347 for (i = 0; i != priv->rxqs_n; ++i) {
2348 if (!(*priv->rxqs)[i])
2350 data = (*priv->rxqs)[i];
2352 data->rt_timestamp = priv->config.rt_timestamp;