1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
24 #include <mlx5_common_mr.h>
26 #include "mlx5_defs.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
34 /* Default RSS hash key also used for ConnectX-3. */
35 uint8_t rss_hash_default_key[] = {
36 0x2c, 0xc6, 0x81, 0xd1,
37 0x5b, 0xdb, 0xf4, 0xf7,
38 0xfc, 0xa2, 0x83, 0x19,
39 0xdb, 0x1a, 0x3e, 0x94,
40 0x6b, 0x9e, 0x38, 0xd9,
41 0x2c, 0x9c, 0x03, 0xd1,
42 0xad, 0x99, 0x44, 0xa7,
43 0xd9, 0x56, 0x3d, 0x59,
44 0x06, 0x3c, 0x25, 0xf3,
45 0xfc, 0x1f, 0xdc, 0x2a,
48 /* Length of the default RSS hash key. */
49 static_assert(MLX5_RSS_HASH_KEY_LEN ==
50 (unsigned int)sizeof(rss_hash_default_key),
51 "wrong RSS default key size.");
54 * Calculate the number of CQEs in CQ for the Rx queue.
57 * Pointer to receive queue structure.
60 * Number of CQEs in CQ.
63 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
66 unsigned int wqe_n = 1 << rxq_data->elts_n;
68 if (mlx5_rxq_mprq_enabled(rxq_data))
69 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
76 * Allocate RX queue elements for Multi-Packet RQ.
79 * Pointer to RX queue structure.
82 * 0 on success, a negative errno value otherwise and rte_errno is set.
85 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
87 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
88 unsigned int wqe_n = 1 << rxq->elts_n;
92 /* Iterate on segments. */
93 for (i = 0; i <= wqe_n; ++i) {
94 struct mlx5_mprq_buf *buf;
96 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
97 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
102 (*rxq->mprq_bufs)[i] = buf;
104 rxq->mprq_repl = buf;
107 "port %u MPRQ queue %u allocated and configured %u segments",
108 rxq->port_id, rxq->idx, wqe_n);
111 err = rte_errno; /* Save rte_errno before cleanup. */
113 for (i = 0; (i != wqe_n); ++i) {
114 if ((*rxq->mprq_bufs)[i] != NULL)
115 rte_mempool_put(rxq->mprq_mp,
116 (*rxq->mprq_bufs)[i]);
117 (*rxq->mprq_bufs)[i] = NULL;
119 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
120 rxq->port_id, rxq->idx);
121 rte_errno = err; /* Restore rte_errno. */
126 * Allocate RX queue elements for Single-Packet RQ.
129 * Pointer to RX queue structure.
132 * 0 on success, negative errno value on failure.
135 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
137 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
138 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
139 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
140 (1 << rxq_ctrl->rxq.elts_n);
144 /* Iterate on segments. */
145 for (i = 0; (i != elts_n); ++i) {
146 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
147 struct rte_mbuf *buf;
149 buf = rte_pktmbuf_alloc(seg->mp);
151 if (rxq_ctrl->share_group == 0)
152 DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
153 RXQ_PORT_ID(rxq_ctrl),
156 DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
157 rxq_ctrl->share_group,
158 rxq_ctrl->share_qid);
162 /* Headroom is reserved by rte_pktmbuf_alloc(). */
163 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
164 /* Buffer is supposed to be empty. */
165 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
166 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
167 MLX5_ASSERT(!buf->next);
168 SET_DATA_OFF(buf, seg->offset);
169 PORT(buf) = rxq_ctrl->rxq.port_id;
170 DATA_LEN(buf) = seg->length;
171 PKT_LEN(buf) = seg->length;
173 (*rxq_ctrl->rxq.elts)[i] = buf;
175 /* If Rx vector is activated. */
176 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
177 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
178 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
179 struct rte_pktmbuf_pool_private *priv =
180 (struct rte_pktmbuf_pool_private *)
181 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
184 /* Initialize default rearm_data for vPMD. */
185 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
186 rte_mbuf_refcnt_set(mbuf_init, 1);
187 mbuf_init->nb_segs = 1;
188 mbuf_init->port = rxq->port_id;
189 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
190 mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
192 * prevent compiler reordering:
193 * rearm_data covers previous fields.
195 rte_compiler_barrier();
196 rxq->mbuf_initializer =
197 *(rte_xmm_t *)&mbuf_init->rearm_data;
198 /* Padding with a fake mbuf for vectorized Rx. */
199 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
200 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
202 if (rxq_ctrl->share_group == 0)
204 "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
205 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
206 elts_n / (1 << rxq_ctrl->rxq.sges_n));
209 "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
210 rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
211 elts_n / (1 << rxq_ctrl->rxq.sges_n));
214 err = rte_errno; /* Save rte_errno before cleanup. */
216 for (i = 0; (i != elts_n); ++i) {
217 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
218 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
219 (*rxq_ctrl->rxq.elts)[i] = NULL;
221 if (rxq_ctrl->share_group == 0)
222 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
223 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
225 DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
226 rxq_ctrl->share_group, rxq_ctrl->share_qid);
227 rte_errno = err; /* Restore rte_errno. */
232 * Allocate RX queue elements.
235 * Pointer to RX queue structure.
238 * 0 on success, negative errno value on failure.
241 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
246 * For MPRQ we need to allocate both MPRQ buffers
247 * for WQEs and simple mbufs for vector processing.
249 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
250 ret = rxq_alloc_elts_mprq(rxq_ctrl);
252 ret = rxq_alloc_elts_sprq(rxq_ctrl);
257 * Free RX queue elements for Multi-Packet RQ.
260 * Pointer to RX queue structure.
263 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
265 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
268 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
269 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
270 if (rxq->mprq_bufs == NULL)
272 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
273 if ((*rxq->mprq_bufs)[i] != NULL)
274 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
275 (*rxq->mprq_bufs)[i] = NULL;
277 if (rxq->mprq_repl != NULL) {
278 mlx5_mprq_buf_free(rxq->mprq_repl);
279 rxq->mprq_repl = NULL;
284 * Free RX queue elements for Single-Packet RQ.
287 * Pointer to RX queue structure.
290 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
292 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
293 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
294 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
296 const uint16_t q_mask = q_n - 1;
297 uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
298 rxq->elts_ci : rxq->rq_ci;
299 uint16_t used = q_n - (elts_ci - rxq->rq_pi);
302 if (rxq_ctrl->share_group == 0)
303 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
304 RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
306 DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
307 rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
308 if (rxq->elts == NULL)
311 * Some mbuf in the Ring belongs to the application.
312 * They cannot be freed.
314 if (mlx5_rxq_check_vec_support(rxq) > 0) {
315 for (i = 0; i < used; ++i)
316 (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
317 rxq->rq_pi = elts_ci;
319 for (i = 0; i != q_n; ++i) {
320 if ((*rxq->elts)[i] != NULL)
321 rte_pktmbuf_free_seg((*rxq->elts)[i]);
322 (*rxq->elts)[i] = NULL;
327 * Free RX queue elements.
330 * Pointer to RX queue structure.
333 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
336 * For MPRQ we need to allocate both MPRQ buffers
337 * for WQEs and simple mbufs for vector processing.
339 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
340 rxq_free_elts_mprq(rxq_ctrl);
341 rxq_free_elts_sprq(rxq_ctrl);
345 * Returns the per-queue supported offloads.
348 * Pointer to Ethernet device.
351 * Supported Rx offloads.
354 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
356 struct mlx5_priv *priv = dev->data->dev_private;
357 struct mlx5_dev_config *config = &priv->config;
358 uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
359 RTE_ETH_RX_OFFLOAD_TIMESTAMP |
360 RTE_ETH_RX_OFFLOAD_RSS_HASH);
362 if (!config->mprq.enabled)
363 offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
364 if (config->hw_fcs_strip)
365 offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
367 offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
368 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
369 RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
370 if (config->hw_vlan_strip)
371 offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
372 if (MLX5_LRO_SUPPORTED(dev))
373 offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
379 * Returns the per-port supported offloads.
382 * Supported Rx offloads.
385 mlx5_get_rx_port_offloads(void)
387 uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
393 * Verify if the queue can be released.
396 * Pointer to Ethernet device.
401 * 1 if the queue can be released
402 * 0 if the queue can not be released, there are references to it.
403 * Negative errno and rte_errno is set if queue doesn't exist.
406 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
408 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
414 return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
417 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
419 rxq_sync_cq(struct mlx5_rxq_data *rxq)
421 const uint16_t cqe_n = 1 << rxq->cqe_n;
422 const uint16_t cqe_mask = cqe_n - 1;
423 volatile struct mlx5_cqe *cqe;
428 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
429 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
430 if (ret == MLX5_CQE_STATUS_HW_OWN)
432 if (ret == MLX5_CQE_STATUS_ERR) {
436 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
437 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
441 /* Compute the next non compressed CQE. */
442 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
445 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
446 for (i = 0; i < cqe_n; i++) {
447 cqe = &(*rxq->cqes)[i];
448 cqe->op_own = MLX5_CQE_INVALIDATE;
450 /* Resync CQE and WQE (WQ in RESET state). */
452 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
454 *rxq->rq_db = rte_cpu_to_be_32(0);
459 * Rx queue stop. Device queue goes to the RESET state,
460 * all involved mbufs are freed from WQ.
463 * Pointer to Ethernet device structure.
468 * 0 on success, a negative errno value otherwise and rte_errno is set.
471 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
473 struct mlx5_priv *priv = dev->data->dev_private;
474 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
475 struct mlx5_rxq_ctrl *rxq_ctrl =
476 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
479 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
480 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
482 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
487 /* Remove all processes CQEs. */
489 /* Free all involved mbufs. */
490 rxq_free_elts(rxq_ctrl);
491 /* Set the actual queue state. */
492 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
497 * Rx queue stop. Device queue goes to the RESET state,
498 * all involved mbufs are freed from WQ.
501 * Pointer to Ethernet device structure.
506 * 0 on success, a negative errno value otherwise and rte_errno is set.
509 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
511 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
514 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
515 DRV_LOG(ERR, "Hairpin queue can't be stopped");
519 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
522 * Vectorized Rx burst requires the CQ and RQ indices
523 * synchronized, that might be broken on RQ restart
524 * and cause Rx malfunction, so queue stopping is
525 * not supported if vectorized Rx burst is engaged.
526 * The routine pointer depends on the process
527 * type, should perform check there.
529 if (pkt_burst == mlx5_rx_burst_vec) {
530 DRV_LOG(ERR, "Rx queue stop is not supported "
531 "for vectorized Rx");
535 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
536 ret = mlx5_mp_os_req_queue_control(dev, idx,
537 MLX5_MP_REQ_QUEUE_RX_STOP);
539 ret = mlx5_rx_queue_stop_primary(dev, idx);
545 * Rx queue start. Device queue goes to the ready state,
546 * all required mbufs are allocated and WQ is replenished.
549 * Pointer to Ethernet device structure.
554 * 0 on success, a negative errno value otherwise and rte_errno is set.
557 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
559 struct mlx5_priv *priv = dev->data->dev_private;
560 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
561 struct mlx5_rxq_ctrl *rxq_ctrl =
562 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
565 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
566 /* Allocate needed buffers. */
567 ret = rxq_alloc_elts(rxq_ctrl);
569 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
574 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
576 /* Reset RQ consumer before moving queue to READY state. */
577 *rxq->rq_db = rte_cpu_to_be_32(0);
579 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
581 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
586 /* Reinitialize RQ - set WQEs. */
587 mlx5_rxq_initialize(rxq);
588 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
589 /* Set actual queue state. */
590 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
595 * Rx queue start. Device queue goes to the ready state,
596 * all required mbufs are allocated and WQ is replenished.
599 * Pointer to Ethernet device structure.
604 * 0 on success, a negative errno value otherwise and rte_errno is set.
607 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
611 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
612 DRV_LOG(ERR, "Hairpin queue can't be started");
616 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
618 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
619 ret = mlx5_mp_os_req_queue_control(dev, idx,
620 MLX5_MP_REQ_QUEUE_RX_START);
622 ret = mlx5_rx_queue_start_primary(dev, idx);
628 * Rx queue presetup checks.
631 * Pointer to Ethernet device structure.
635 * Number of descriptors to configure in queue.
638 * 0 on success, a negative errno value otherwise and rte_errno is set.
641 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
643 struct mlx5_priv *priv = dev->data->dev_private;
645 if (!rte_is_power_of_2(*desc)) {
646 *desc = 1 << log2above(*desc);
648 "port %u increased number of descriptors in Rx queue %u"
649 " to the next power of two (%d)",
650 dev->data->port_id, idx, *desc);
652 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
653 dev->data->port_id, idx, *desc);
654 if (idx >= priv->rxqs_n) {
655 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
656 dev->data->port_id, idx, priv->rxqs_n);
657 rte_errno = EOVERFLOW;
660 if (!mlx5_rxq_releasable(dev, idx)) {
661 DRV_LOG(ERR, "port %u unable to release queue index %u",
662 dev->data->port_id, idx);
666 mlx5_rxq_release(dev, idx);
673 * Pointer to Ethernet device structure.
677 * Number of descriptors to configure in queue.
679 * NUMA socket on which memory must be allocated.
681 * Thresholds parameters.
683 * Memory pool for buffer allocations.
686 * 0 on success, a negative errno value otherwise and rte_errno is set.
689 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
690 unsigned int socket, const struct rte_eth_rxconf *conf,
691 struct rte_mempool *mp)
693 struct mlx5_priv *priv = dev->data->dev_private;
694 struct mlx5_rxq_priv *rxq;
695 struct mlx5_rxq_ctrl *rxq_ctrl;
696 struct rte_eth_rxseg_split *rx_seg =
697 (struct rte_eth_rxseg_split *)conf->rx_seg;
698 struct rte_eth_rxseg_split rx_single = {.mp = mp};
699 uint16_t n_seg = conf->rx_nseg;
704 * The parameters should be checked on rte_eth_dev layer.
705 * If mp is specified it means the compatible configuration
706 * without buffer split feature tuning.
712 uint64_t offloads = conf->offloads |
713 dev->data->dev_conf.rxmode.offloads;
715 /* The offloads should be checked on rte_eth_dev layer. */
716 MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
717 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
718 DRV_LOG(ERR, "port %u queue index %u split "
719 "offload not configured",
720 dev->data->port_id, idx);
724 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
726 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
729 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
732 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
733 dev->data->port_id, idx);
739 (*priv->rxq_privs)[idx] = rxq;
740 rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);
742 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
743 dev->data->port_id, idx);
745 (*priv->rxq_privs)[idx] = NULL;
749 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
750 dev->data->port_id, idx);
751 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
758 * Pointer to Ethernet device structure.
762 * Number of descriptors to configure in queue.
763 * @param hairpin_conf
764 * Hairpin configuration parameters.
767 * 0 on success, a negative errno value otherwise and rte_errno is set.
770 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
772 const struct rte_eth_hairpin_conf *hairpin_conf)
774 struct mlx5_priv *priv = dev->data->dev_private;
775 struct mlx5_rxq_priv *rxq;
776 struct mlx5_rxq_ctrl *rxq_ctrl;
779 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
782 if (hairpin_conf->peer_count != 1) {
784 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
785 " peer count is %u", dev->data->port_id,
786 idx, hairpin_conf->peer_count);
789 if (hairpin_conf->peers[0].port == dev->data->port_id) {
790 if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
792 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
793 " index %u, Tx %u is larger than %u",
794 dev->data->port_id, idx,
795 hairpin_conf->peers[0].queue, priv->txqs_n);
799 if (hairpin_conf->manual_bind == 0 ||
800 hairpin_conf->tx_explicit == 0) {
802 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
803 " index %u peer port %u with attributes %u %u",
804 dev->data->port_id, idx,
805 hairpin_conf->peers[0].port,
806 hairpin_conf->manual_bind,
807 hairpin_conf->tx_explicit);
811 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
814 DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
815 dev->data->port_id, idx);
821 (*priv->rxq_privs)[idx] = rxq;
822 rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
824 DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
825 dev->data->port_id, idx);
827 (*priv->rxq_privs)[idx] = NULL;
831 DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
832 dev->data->port_id, idx);
833 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
838 * DPDK callback to release a RX queue.
841 * Pointer to Ethernet device structure.
843 * Receive queue index.
846 mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
848 struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
852 if (!mlx5_rxq_releasable(dev, qid))
853 rte_panic("port %u Rx queue %u is still used by a flow and"
854 " cannot be removed\n", dev->data->port_id, qid);
855 mlx5_rxq_release(dev, qid);
859 * Allocate queue vector and fill epoll fd list for Rx interrupts.
862 * Pointer to Ethernet device.
865 * 0 on success, a negative errno value otherwise and rte_errno is set.
868 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
870 struct mlx5_priv *priv = dev->data->dev_private;
872 unsigned int rxqs_n = priv->rxqs_n;
873 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
874 unsigned int count = 0;
875 struct rte_intr_handle *intr_handle = dev->intr_handle;
877 if (!dev->data->dev_conf.intr_conf.rxq)
879 mlx5_rx_intr_vec_disable(dev);
880 if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
882 "port %u failed to allocate memory for interrupt"
883 " vector, Rx interrupts will not be supported",
889 if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
892 for (i = 0; i != n; ++i) {
893 /* This rxq obj must not be released in this function. */
894 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
895 struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
898 /* Skip queues that cannot request interrupts. */
899 if (!rxq_obj || (!rxq_obj->ibv_channel &&
900 !rxq_obj->devx_channel)) {
901 /* Use invalid intr_vec[] index to disable entry. */
902 if (rte_intr_vec_list_index_set(intr_handle, i,
903 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
907 mlx5_rxq_ref(dev, i);
908 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
910 "port %u too many Rx queues for interrupt"
911 " vector size (%d), Rx interrupts cannot be"
913 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
914 mlx5_rx_intr_vec_disable(dev);
918 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
922 "port %u failed to make Rx interrupt file"
923 " descriptor %d non-blocking for queue index"
925 dev->data->port_id, rxq_obj->fd, i);
926 mlx5_rx_intr_vec_disable(dev);
930 if (rte_intr_vec_list_index_set(intr_handle, i,
931 RTE_INTR_VEC_RXTX_OFFSET + count))
933 if (rte_intr_efds_index_set(intr_handle, count,
939 mlx5_rx_intr_vec_disable(dev);
940 else if (rte_intr_nb_efd_set(intr_handle, count))
946 * Clean up Rx interrupts handler.
949 * Pointer to Ethernet device.
952 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
954 struct mlx5_priv *priv = dev->data->dev_private;
955 struct rte_intr_handle *intr_handle = dev->intr_handle;
957 unsigned int rxqs_n = priv->rxqs_n;
958 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
960 if (!dev->data->dev_conf.intr_conf.rxq)
962 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
964 for (i = 0; i != n; ++i) {
965 if (rte_intr_vec_list_index_get(intr_handle, i) ==
966 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
969 * Need to access directly the queue to release the reference
970 * kept in mlx5_rx_intr_vec_enable().
972 mlx5_rxq_deref(dev, i);
975 rte_intr_free_epoll_fd(intr_handle);
977 rte_intr_vec_list_free(intr_handle);
979 rte_intr_nb_efd_set(intr_handle, 0);
983 * MLX5 CQ notification .
986 * Pointer to receive queue structure.
988 * Sequence number per receive queue .
991 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
994 uint32_t doorbell_hi;
996 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
998 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
999 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1000 doorbell = (uint64_t)doorbell_hi << 32;
1001 doorbell |= rxq->cqn;
1002 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1003 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
1004 cq_db_reg, rxq->uar_lock_cq);
1008 * DPDK callback for Rx queue interrupt enable.
1011 * Pointer to Ethernet device structure.
1012 * @param rx_queue_id
1016 * 0 on success, a negative errno value otherwise and rte_errno is set.
1019 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1021 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1024 if (rxq->ctrl->irq) {
1025 if (!rxq->ctrl->obj)
1027 mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
1036 * DPDK callback for Rx queue interrupt disable.
1039 * Pointer to Ethernet device structure.
1040 * @param rx_queue_id
1044 * 0 on success, a negative errno value otherwise and rte_errno is set.
1047 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1049 struct mlx5_priv *priv = dev->data->dev_private;
1050 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1057 if (!rxq->ctrl->obj)
1059 if (rxq->ctrl->irq) {
1060 ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
1063 rxq->ctrl->rxq.cq_arm_sn++;
1068 * The ret variable may be EAGAIN which means the get_event function was
1069 * called before receiving one.
1075 if (rte_errno != EAGAIN)
1076 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1077 dev->data->port_id, rx_queue_id);
1082 * Verify the Rx queue objects list is empty
1085 * Pointer to Ethernet device.
1088 * The number of objects not released.
1091 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1093 struct mlx5_priv *priv = dev->data->dev_private;
1095 struct mlx5_rxq_obj *rxq_obj;
1097 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1098 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1099 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1106 * Callback function to initialize mbufs for Multi-Packet RQ.
1109 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1110 void *_m, unsigned int i __rte_unused)
1112 struct mlx5_mprq_buf *buf = _m;
1113 struct rte_mbuf_ext_shared_info *shinfo;
1114 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1117 memset(_m, 0, sizeof(*buf));
1119 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1120 for (j = 0; j != strd_n; ++j) {
1121 shinfo = &buf->shinfos[j];
1122 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1123 shinfo->fcb_opaque = buf;
1128 * Free mempool of Multi-Packet RQ.
1131 * Pointer to Ethernet device.
1134 * 0 on success, negative errno value on failure.
1137 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1139 struct mlx5_priv *priv = dev->data->dev_private;
1140 struct rte_mempool *mp = priv->mprq_mp;
1145 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1146 dev->data->port_id, mp->name);
1148 * If a buffer in the pool has been externally attached to a mbuf and it
1149 * is still in use by application, destroying the Rx queue can spoil
1150 * the packet. It is unlikely to happen but if application dynamically
1151 * creates and destroys with holding Rx packets, this can happen.
1153 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1154 * RQ isn't provided by application but managed by PMD.
1156 if (!rte_mempool_full(mp)) {
1158 "port %u mempool for Multi-Packet RQ is still in use",
1159 dev->data->port_id);
1163 rte_mempool_free(mp);
1164 /* Unset mempool for each Rx queue. */
1165 for (i = 0; i != priv->rxqs_n; ++i) {
1166 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1170 rxq->mprq_mp = NULL;
1172 priv->mprq_mp = NULL;
1177 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1178 * mempool. If already allocated, reuse it if there're enough elements.
1179 * Otherwise, resize it.
1182 * Pointer to Ethernet device.
1185 * 0 on success, negative errno value on failure.
1188 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1190 struct mlx5_priv *priv = dev->data->dev_private;
1191 struct rte_mempool *mp = priv->mprq_mp;
1192 char name[RTE_MEMPOOL_NAMESIZE];
1193 unsigned int desc = 0;
1194 unsigned int buf_len;
1195 unsigned int obj_num;
1196 unsigned int obj_size;
1197 unsigned int strd_num_n = 0;
1198 unsigned int strd_sz_n = 0;
1200 unsigned int n_ibv = 0;
1203 if (!mlx5_mprq_enabled(dev))
1205 /* Count the total number of descriptors configured. */
1206 for (i = 0; i != priv->rxqs_n; ++i) {
1207 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1208 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1209 (rxq, struct mlx5_rxq_ctrl, rxq);
1211 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1214 desc += 1 << rxq->elts_n;
1215 /* Get the max number of strides. */
1216 if (strd_num_n < rxq->strd_num_n)
1217 strd_num_n = rxq->strd_num_n;
1218 /* Get the max size of a stride. */
1219 if (strd_sz_n < rxq->strd_sz_n)
1220 strd_sz_n = rxq->strd_sz_n;
1222 MLX5_ASSERT(strd_num_n && strd_sz_n);
1223 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1224 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1225 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1227 * Received packets can be either memcpy'd or externally referenced. In
1228 * case that the packet is attached to an mbuf as an external buffer, as
1229 * it isn't possible to predict how the buffers will be queued by
1230 * application, there's no option to exactly pre-allocate needed buffers
1231 * in advance but to speculatively prepares enough buffers.
1233 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1234 * received packets to buffers provided by application (rxq->mp) until
1235 * this Mempool gets available again.
1238 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1240 * rte_mempool_create_empty() has sanity check to refuse large cache
1241 * size compared to the number of elements.
1242 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1243 * constant number 2 instead.
1245 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1246 /* Check a mempool is already allocated and if it can be resued. */
1247 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1248 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1249 dev->data->port_id, mp->name);
1252 } else if (mp != NULL) {
1253 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1254 dev->data->port_id, mp->name);
1256 * If failed to free, which means it may be still in use, no way
1257 * but to keep using the existing one. On buffer underrun,
1258 * packets will be memcpy'd instead of external buffer
1261 if (mlx5_mprq_free_mp(dev)) {
1262 if (mp->elt_size >= obj_size)
1268 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1269 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1270 0, NULL, NULL, mlx5_mprq_buf_init,
1271 (void *)((uintptr_t)1 << strd_num_n),
1272 dev->device->numa_node, 0);
1275 "port %u failed to allocate a mempool for"
1276 " Multi-Packet RQ, count=%u, size=%u",
1277 dev->data->port_id, obj_num, obj_size);
1281 ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
1282 priv->sh->cdev->pd, mp, &priv->mp_id);
1283 if (ret < 0 && rte_errno != EEXIST) {
1285 DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
1286 dev->data->port_id);
1287 rte_mempool_free(mp);
1293 /* Set mempool for each Rx queue. */
1294 for (i = 0; i != priv->rxqs_n; ++i) {
1295 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1296 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1297 (rxq, struct mlx5_rxq_ctrl, rxq);
1299 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1303 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1304 dev->data->port_id);
1308 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1309 sizeof(struct rte_vlan_hdr) * 2 + \
1310 sizeof(struct rte_ipv6_hdr)))
1311 #define MAX_TCP_OPTION_SIZE 40u
1312 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1313 sizeof(struct rte_tcp_hdr) + \
1314 MAX_TCP_OPTION_SIZE))
1317 * Adjust the maximum LRO massage size.
1320 * Pointer to Ethernet device.
1323 * @param max_lro_size
1324 * The maximum size for LRO packet.
1327 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1328 uint32_t max_lro_size)
1330 struct mlx5_priv *priv = dev->data->dev_private;
1332 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1333 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1334 MLX5_MAX_TCP_HDR_OFFSET)
1335 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1336 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1337 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1338 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1339 if (priv->max_lro_msg_size)
1340 priv->max_lro_msg_size =
1341 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1343 priv->max_lro_msg_size = max_lro_size;
1345 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1346 dev->data->port_id, idx,
1347 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1351 * Create a DPDK Rx queue.
1354 * Pointer to Ethernet device.
1356 * RX queue private data.
1358 * Number of descriptors to configure in queue.
1360 * NUMA socket on which memory must be allocated.
1363 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1365 struct mlx5_rxq_ctrl *
1366 mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
1368 unsigned int socket, const struct rte_eth_rxconf *conf,
1369 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1371 uint16_t idx = rxq->idx;
1372 struct mlx5_priv *priv = dev->data->dev_private;
1373 struct mlx5_rxq_ctrl *tmpl;
1374 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1375 struct mlx5_dev_config *config = &priv->config;
1376 uint64_t offloads = conf->offloads |
1377 dev->data->dev_conf.rxmode.offloads;
1378 unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
1379 unsigned int max_rx_pktlen = lro_on_queue ?
1380 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1381 dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
1383 unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
1384 RTE_PKTMBUF_HEADROOM;
1385 unsigned int max_lro_size = 0;
1386 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1387 const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
1388 !rx_seg[0].offset && !rx_seg[0].length;
1389 unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1390 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1391 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1392 (1U << config->mprq.max_stride_size_n) ?
1393 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1394 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1395 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1396 (config->mprq.stride_size_n ?
1397 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1399 * Always allocate extra slots, even if eventually
1400 * the vector Rx will not be used.
1402 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1403 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1404 unsigned int tail_len;
1406 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1407 sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
1409 (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
1415 LIST_INIT(&tmpl->owners);
1417 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
1418 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1420 * Build the array of actual buffer offsets and lengths.
1421 * Pad with the buffers from the last memory pool if
1422 * needed to handle max size packets, replace zero length
1423 * with the buffer length from the pool.
1425 tail_len = max_rx_pktlen;
1427 struct mlx5_eth_rxseg *hw_seg =
1428 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1429 uint32_t buf_len, offset, seg_len;
1432 * For the buffers beyond descriptions offset is zero,
1433 * the first buffer contains head room.
1435 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1436 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1437 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1439 * For the buffers beyond descriptions the length is
1440 * pool buffer length, zero lengths are replaced with
1441 * pool buffer length either.
1443 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1447 /* Check is done in long int, now overflows. */
1448 if (buf_len < seg_len + offset) {
1449 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1450 "%u/%u can't be satisfied",
1451 dev->data->port_id, idx,
1452 qs_seg->length, qs_seg->offset);
1456 if (seg_len > tail_len)
1457 seg_len = buf_len - offset;
1458 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1460 "port %u too many SGEs (%u) needed to handle"
1461 " requested maximum packet size %u, the maximum"
1462 " supported are %u", dev->data->port_id,
1463 tmpl->rxq.rxseg_n, max_rx_pktlen,
1465 rte_errno = ENOTSUP;
1468 /* Build the actual scattering element in the queue object. */
1469 hw_seg->mp = qs_seg->mp;
1470 MLX5_ASSERT(offset <= UINT16_MAX);
1471 MLX5_ASSERT(seg_len <= UINT16_MAX);
1472 hw_seg->offset = (uint16_t)offset;
1473 hw_seg->length = (uint16_t)seg_len;
1475 * Advance the segment descriptor, the padding is the based
1476 * on the attributes of the last descriptor.
1478 if (tmpl->rxq.rxseg_n < n_seg)
1480 tail_len -= RTE_MIN(tail_len, seg_len);
1481 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1482 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1483 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1484 if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
1485 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1486 " configured and no enough mbuf space(%u) to contain "
1487 "the maximum RX packet length(%u) with head-room(%u)",
1488 dev->data->port_id, idx, mb_len, max_rx_pktlen,
1489 RTE_PKTMBUF_HEADROOM);
1493 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1494 if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
1495 &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1496 /* rte_errno is already set. */
1499 tmpl->socket = socket;
1500 if (dev->data->dev_conf.intr_conf.rxq)
1503 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1504 * following conditions are met:
1505 * - MPRQ is enabled.
1506 * - The number of descs is more than the number of strides.
1507 * - max_rx_pktlen plus overhead is less than the max size
1508 * of a stride or mprq_stride_size is specified by a user.
1509 * Need to make sure that there are enough strides to encap
1510 * the maximum packet size in case mprq_stride_size is set.
1511 * Otherwise, enable Rx scatter if necessary.
1513 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1514 (non_scatter_min_mbuf_size <=
1515 (1U << config->mprq.max_stride_size_n) ||
1516 (config->mprq.stride_size_n &&
1517 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1518 /* TODO: Rx scatter isn't supported yet. */
1519 tmpl->rxq.sges_n = 0;
1520 /* Trim the number of descs needed. */
1521 desc >>= mprq_stride_nums;
1522 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1523 config->mprq.stride_num_n : mprq_stride_nums;
1524 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1525 config->mprq.stride_size_n : mprq_stride_size;
1526 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1527 tmpl->rxq.strd_scatter_en =
1528 !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
1529 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1530 config->mprq.max_memcpy_len);
1531 max_lro_size = RTE_MIN(max_rx_pktlen,
1532 (1u << tmpl->rxq.strd_num_n) *
1533 (1u << tmpl->rxq.strd_sz_n));
1535 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1536 " strd_num_n = %u, strd_sz_n = %u",
1537 dev->data->port_id, idx,
1538 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1539 } else if (tmpl->rxq.rxseg_n == 1) {
1540 MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
1541 tmpl->rxq.sges_n = 0;
1542 max_lro_size = max_rx_pktlen;
1543 } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1544 unsigned int sges_n;
1546 if (lro_on_queue && first_mb_free_size <
1547 MLX5_MAX_LRO_HEADER_FIX) {
1548 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1549 " to include the max header size(%u) for LRO",
1550 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1551 rte_errno = ENOTSUP;
1555 * Determine the number of SGEs needed for a full packet
1556 * and round it to the next power of two.
1558 sges_n = log2above(tmpl->rxq.rxseg_n);
1559 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1561 "port %u too many SGEs (%u) needed to handle"
1562 " requested maximum packet size %u, the maximum"
1563 " supported are %u", dev->data->port_id,
1564 1 << sges_n, max_rx_pktlen,
1565 1u << MLX5_MAX_LOG_RQ_SEGS);
1566 rte_errno = ENOTSUP;
1569 tmpl->rxq.sges_n = sges_n;
1570 max_lro_size = max_rx_pktlen;
1572 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1574 "port %u MPRQ is requested but cannot be enabled\n"
1575 " (requested: pkt_sz = %u, desc_num = %u,"
1576 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1577 " supported: min_rxqs_num = %u,"
1578 " min_stride_sz = %u, max_stride_sz = %u).",
1579 dev->data->port_id, non_scatter_min_mbuf_size,
1581 config->mprq.stride_size_n ?
1582 (1U << config->mprq.stride_size_n) :
1583 (1U << mprq_stride_size),
1584 config->mprq.stride_num_n ?
1585 (1U << config->mprq.stride_num_n) :
1586 (1U << mprq_stride_nums),
1587 config->mprq.min_rxqs_num,
1588 (1U << config->mprq.min_stride_size_n),
1589 (1U << config->mprq.max_stride_size_n));
1590 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1591 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1592 if (desc % (1 << tmpl->rxq.sges_n)) {
1594 "port %u number of Rx queue descriptors (%u) is not a"
1595 " multiple of SGEs per packet (%u)",
1598 1 << tmpl->rxq.sges_n);
1602 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1603 /* Toggle RX checksum offload if hardware supports it. */
1604 tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
1605 /* Configure Rx timestamp. */
1606 tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
1607 tmpl->rxq.timestamp_rx_flag = 0;
1608 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1609 &tmpl->rxq.timestamp_offset,
1610 &tmpl->rxq.timestamp_rx_flag) != 0) {
1611 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1614 /* Configure VLAN stripping. */
1615 tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1616 /* By default, FCS (CRC) is stripped by hardware. */
1617 tmpl->rxq.crc_present = 0;
1618 tmpl->rxq.lro = lro_on_queue;
1619 if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
1620 if (config->hw_fcs_strip) {
1622 * RQs used for LRO-enabled TIRs should not be
1623 * configured to scatter the FCS.
1627 "port %u CRC stripping has been "
1628 "disabled but will still be performed "
1629 "by hardware, because LRO is enabled",
1630 dev->data->port_id);
1632 tmpl->rxq.crc_present = 1;
1635 "port %u CRC stripping has been disabled but will"
1636 " still be performed by hardware, make sure MLNX_OFED"
1637 " and firmware are up to date",
1638 dev->data->port_id);
1642 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1643 " incoming frames to hide it",
1645 tmpl->rxq.crc_present ? "disabled" : "enabled",
1646 tmpl->rxq.crc_present << 2);
1648 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1649 (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
1650 tmpl->rxq.port_id = dev->data->port_id;
1651 tmpl->sh = priv->sh;
1652 tmpl->rxq.mp = rx_seg[0].mp;
1653 tmpl->rxq.elts_n = log2above(desc);
1654 tmpl->rxq.rq_repl_thresh =
1655 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1657 (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1658 tmpl->rxq.mprq_bufs =
1659 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1661 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1663 tmpl->rxq.idx = idx;
1664 mlx5_rxq_ref(dev, idx);
1665 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1668 mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
1674 * Create a DPDK Rx hairpin queue.
1677 * Pointer to Ethernet device.
1681 * Number of descriptors to configure in queue.
1682 * @param hairpin_conf
1683 * The hairpin binding configuration.
1686 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1688 struct mlx5_rxq_ctrl *
1689 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
1691 const struct rte_eth_hairpin_conf *hairpin_conf)
1693 uint16_t idx = rxq->idx;
1694 struct mlx5_priv *priv = dev->data->dev_private;
1695 struct mlx5_rxq_ctrl *tmpl;
1697 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1703 LIST_INIT(&tmpl->owners);
1705 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
1706 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1707 tmpl->socket = SOCKET_ID_ANY;
1708 tmpl->rxq.rss_hash = 0;
1709 tmpl->rxq.port_id = dev->data->port_id;
1710 tmpl->sh = priv->sh;
1711 tmpl->rxq.mp = NULL;
1712 tmpl->rxq.elts_n = log2above(desc);
1713 tmpl->rxq.elts = NULL;
1714 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1715 tmpl->rxq.idx = idx;
1716 rxq->hairpin_conf = *hairpin_conf;
1717 mlx5_rxq_ref(dev, idx);
1718 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1723 * Increase Rx queue reference count.
1726 * Pointer to Ethernet device.
1731 * A pointer to the queue if it exists, NULL otherwise.
1733 struct mlx5_rxq_priv *
1734 mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
1736 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1739 __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
1744 * Dereference a Rx queue.
1747 * Pointer to Ethernet device.
1752 * Updated reference count.
1755 mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
1757 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1761 return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
1768 * Pointer to Ethernet device.
1773 * A pointer to the queue if it exists, NULL otherwise.
1775 struct mlx5_rxq_priv *
1776 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1778 struct mlx5_priv *priv = dev->data->dev_private;
1780 if (priv->rxq_privs == NULL)
1782 return (*priv->rxq_privs)[idx];
1786 * Get Rx queue shareable control.
1789 * Pointer to Ethernet device.
1794 * A pointer to the queue control if it exists, NULL otherwise.
1796 struct mlx5_rxq_ctrl *
1797 mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
1799 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1801 return rxq == NULL ? NULL : rxq->ctrl;
1805 * Get Rx queue shareable data.
1808 * Pointer to Ethernet device.
1813 * A pointer to the queue data if it exists, NULL otherwise.
1815 struct mlx5_rxq_data *
1816 mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
1818 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1820 return rxq == NULL ? NULL : &rxq->ctrl->rxq;
1824 * Release a Rx queue.
1827 * Pointer to Ethernet device.
1832 * 1 while a reference on it exists, 0 when freed.
1835 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1837 struct mlx5_priv *priv = dev->data->dev_private;
1838 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1839 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
1841 if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
1843 if (mlx5_rxq_deref(dev, idx) > 1)
1845 if (rxq_ctrl->obj) {
1846 priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
1847 LIST_REMOVE(rxq_ctrl->obj, next);
1848 mlx5_free(rxq_ctrl->obj);
1849 rxq_ctrl->obj = NULL;
1851 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
1852 rxq_free_elts(rxq_ctrl);
1853 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1855 if (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {
1856 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1857 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1858 LIST_REMOVE(rxq, owner_entry);
1859 LIST_REMOVE(rxq_ctrl, next);
1860 mlx5_free(rxq_ctrl);
1861 (*priv->rxqs)[idx] = NULL;
1863 (*priv->rxq_privs)[idx] = NULL;
1869 * Verify the Rx Queue list is empty
1872 * Pointer to Ethernet device.
1875 * The number of object not released.
1878 mlx5_rxq_verify(struct rte_eth_dev *dev)
1880 struct mlx5_priv *priv = dev->data->dev_private;
1881 struct mlx5_rxq_ctrl *rxq_ctrl;
1884 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1885 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1886 dev->data->port_id, rxq_ctrl->rxq.idx);
1893 * Get a Rx queue type.
1896 * Pointer to Ethernet device.
1901 * The Rx queue type.
1904 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1906 struct mlx5_priv *priv = dev->data->dev_private;
1907 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1909 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1910 rxq_ctrl = container_of((*priv->rxqs)[idx],
1911 struct mlx5_rxq_ctrl,
1913 return rxq_ctrl->type;
1915 return MLX5_RXQ_TYPE_UNDEFINED;
1919 * Get a Rx hairpin queue configuration.
1922 * Pointer to Ethernet device.
1927 * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
1929 const struct rte_eth_hairpin_conf *
1930 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
1932 struct mlx5_priv *priv = dev->data->dev_private;
1933 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1935 if (idx < priv->rxqs_n && rxq != NULL) {
1936 if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
1937 return &rxq->hairpin_conf;
1943 * Match queues listed in arguments to queues contained in indirection table
1947 * Pointer to indirection table to match.
1949 * Queues to match to ques in indirection table.
1951 * Number of queues in the array.
1954 * 1 if all queues in indirection table match 0 othrwise.
1957 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
1958 const uint16_t *queues, uint32_t queues_n)
1960 return (ind_tbl->queues_n == queues_n) &&
1961 (!memcmp(ind_tbl->queues, queues,
1962 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
1966 * Get an indirection table.
1969 * Pointer to Ethernet device.
1971 * Queues entering in the indirection table.
1973 * Number of queues in the array.
1976 * An indirection table if found.
1978 struct mlx5_ind_table_obj *
1979 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1982 struct mlx5_priv *priv = dev->data->dev_private;
1983 struct mlx5_ind_table_obj *ind_tbl;
1985 rte_rwlock_read_lock(&priv->ind_tbls_lock);
1986 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1987 if ((ind_tbl->queues_n == queues_n) &&
1988 (memcmp(ind_tbl->queues, queues,
1989 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1991 __atomic_fetch_add(&ind_tbl->refcnt, 1,
1996 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2001 * Release an indirection table.
2004 * Pointer to Ethernet device.
2006 * Indirection table to release.
2008 * Indirection table for Standalone queue.
2011 * 1 while a reference on it exists, 0 when freed.
2014 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2015 struct mlx5_ind_table_obj *ind_tbl,
2018 struct mlx5_priv *priv = dev->data->dev_private;
2019 unsigned int i, ret;
2021 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2022 ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2023 if (!ret && !standalone)
2024 LIST_REMOVE(ind_tbl, next);
2025 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2028 priv->obj_ops.ind_table_destroy(ind_tbl);
2029 for (i = 0; i != ind_tbl->queues_n; ++i)
2030 claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
2036 * Verify the Rx Queue list is empty
2039 * Pointer to Ethernet device.
2042 * The number of object not released.
2045 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2047 struct mlx5_priv *priv = dev->data->dev_private;
2048 struct mlx5_ind_table_obj *ind_tbl;
2051 rte_rwlock_read_lock(&priv->ind_tbls_lock);
2052 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2054 "port %u indirection table obj %p still referenced",
2055 dev->data->port_id, (void *)ind_tbl);
2058 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2063 * Setup an indirection table structure fields.
2066 * Pointer to Ethernet device.
2068 * Indirection table to modify.
2071 * 0 on success, a negative errno value otherwise and rte_errno is set.
2074 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
2075 struct mlx5_ind_table_obj *ind_tbl)
2077 struct mlx5_priv *priv = dev->data->dev_private;
2078 uint32_t queues_n = ind_tbl->queues_n;
2079 uint16_t *queues = ind_tbl->queues;
2082 const unsigned int n = rte_is_power_of_2(queues_n) ?
2083 log2above(queues_n) :
2084 log2above(priv->config.ind_table_max_size);
2086 for (i = 0; i != queues_n; ++i) {
2087 if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
2092 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
2095 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2099 for (j = 0; j < i; j++)
2100 mlx5_rxq_deref(dev, ind_tbl->queues[j]);
2102 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2103 dev->data->port_id);
2108 * Create an indirection table.
2111 * Pointer to Ethernet device.
2113 * Queues entering in the indirection table.
2115 * Number of queues in the array.
2117 * Indirection table for Standalone queue.
2120 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2122 static struct mlx5_ind_table_obj *
2123 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2124 uint32_t queues_n, bool standalone)
2126 struct mlx5_priv *priv = dev->data->dev_private;
2127 struct mlx5_ind_table_obj *ind_tbl;
2130 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2131 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2136 ind_tbl->queues_n = queues_n;
2137 ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2138 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2139 ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
2145 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2146 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2147 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2153 mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
2154 struct mlx5_ind_table_obj *ind_tbl)
2158 refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
2162 * Modification of indirection tables having more than 1
2163 * reference is unsupported.
2166 "Port %u cannot modify indirection table %p (refcnt %u > 1).",
2167 dev->data->port_id, (void *)ind_tbl, refcnt);
2173 * Modify an indirection table.
2176 * Pointer to Ethernet device.
2178 * Indirection table to modify.
2180 * Queues replacement for the indirection table.
2182 * Number of queues in the array.
2184 * Indirection table for Standalone queue.
2187 * 0 on success, a negative errno value otherwise and rte_errno is set.
2190 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2191 struct mlx5_ind_table_obj *ind_tbl,
2192 uint16_t *queues, const uint32_t queues_n,
2195 struct mlx5_priv *priv = dev->data->dev_private;
2198 const unsigned int n = rte_is_power_of_2(queues_n) ?
2199 log2above(queues_n) :
2200 log2above(priv->config.ind_table_max_size);
2202 MLX5_ASSERT(standalone);
2203 RTE_SET_USED(standalone);
2204 if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
2206 for (i = 0; i != queues_n; ++i) {
2207 if (!mlx5_rxq_get(dev, queues[i])) {
2212 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2213 ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
2216 ind_tbl->queues_n = queues_n;
2217 ind_tbl->queues = queues;
2222 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2223 dev->data->port_id);
2228 * Attach an indirection table to its queues.
2231 * Pointer to Ethernet device.
2233 * Indirection table to attach.
2236 * 0 on success, a negative errno value otherwise and rte_errno is set.
2239 mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
2240 struct mlx5_ind_table_obj *ind_tbl)
2245 ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
2246 ind_tbl->queues_n, true);
2248 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2249 dev->data->port_id, (void *)ind_tbl);
2252 for (i = 0; i < ind_tbl->queues_n; i++)
2253 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2258 * Detach an indirection table from its queues.
2261 * Pointer to Ethernet device.
2263 * Indirection table to detach.
2266 * 0 on success, a negative errno value otherwise and rte_errno is set.
2269 mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
2270 struct mlx5_ind_table_obj *ind_tbl)
2272 struct mlx5_priv *priv = dev->data->dev_private;
2273 const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
2274 log2above(ind_tbl->queues_n) :
2275 log2above(priv->config.ind_table_max_size);
2279 ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);
2282 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2283 ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);
2285 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2286 dev->data->port_id, (void *)ind_tbl);
2289 for (i = 0; i < ind_tbl->queues_n; i++)
2290 mlx5_rxq_release(dev, ind_tbl->queues[i]);
2295 mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
2298 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2299 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2300 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2302 return (hrxq->rss_key_len != rss_desc->key_len ||
2303 memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
2304 hrxq->hash_fields != rss_desc->hash_fields ||
2305 hrxq->ind_table->queues_n != rss_desc->queue_num ||
2306 memcmp(hrxq->ind_table->queues, rss_desc->queue,
2307 rss_desc->queue_num * sizeof(rss_desc->queue[0])));
2311 * Modify an Rx Hash queue configuration.
2314 * Pointer to Ethernet device.
2316 * Index to Hash Rx queue to modify.
2318 * RSS key for the Rx hash queue.
2319 * @param rss_key_len
2321 * @param hash_fields
2322 * Verbs protocol hash field to make the RSS on.
2324 * Queues entering in hash queue. In case of empty hash_fields only the
2325 * first queue index will be taken for the indirection table.
2330 * 0 on success, a negative errno value otherwise and rte_errno is set.
2333 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2334 const uint8_t *rss_key, uint32_t rss_key_len,
2335 uint64_t hash_fields,
2336 const uint16_t *queues, uint32_t queues_n)
2339 struct mlx5_ind_table_obj *ind_tbl = NULL;
2340 struct mlx5_priv *priv = dev->data->dev_private;
2341 struct mlx5_hrxq *hrxq =
2342 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2350 if (hrxq->rss_key_len != rss_key_len) {
2351 /* rss_key_len is fixed size 40 byte & not supposed to change */
2355 queues_n = hash_fields ? queues_n : 1;
2356 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2357 queues, queues_n)) {
2358 ind_tbl = hrxq->ind_table;
2360 if (hrxq->standalone) {
2362 * Replacement of indirection table unsupported for
2363 * stanalone hrxq objects (used by shared RSS).
2365 rte_errno = ENOTSUP;
2368 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2370 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2377 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2378 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2379 hash_fields, ind_tbl);
2384 if (ind_tbl != hrxq->ind_table) {
2385 MLX5_ASSERT(!hrxq->standalone);
2386 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2388 hrxq->ind_table = ind_tbl;
2390 hrxq->hash_fields = hash_fields;
2391 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2395 if (ind_tbl != hrxq->ind_table) {
2396 MLX5_ASSERT(!hrxq->standalone);
2397 mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
2404 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2406 struct mlx5_priv *priv = dev->data->dev_private;
2408 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2409 mlx5_glue->destroy_flow_action(hrxq->action);
2411 priv->obj_ops.hrxq_destroy(hrxq);
2412 if (!hrxq->standalone) {
2413 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2416 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2420 * Release the hash Rx queue.
2423 * Pointer to Ethernet device.
2425 * Index to Hash Rx queue to release.
2428 * mlx5 list pointer.
2430 * Hash queue entry pointer.
2433 mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2435 struct rte_eth_dev *dev = tool_ctx;
2436 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2438 __mlx5_hrxq_remove(dev, hrxq);
2441 static struct mlx5_hrxq *
2442 __mlx5_hrxq_create(struct rte_eth_dev *dev,
2443 struct mlx5_flow_rss_desc *rss_desc)
2445 struct mlx5_priv *priv = dev->data->dev_private;
2446 const uint8_t *rss_key = rss_desc->key;
2447 uint32_t rss_key_len = rss_desc->key_len;
2448 bool standalone = !!rss_desc->shared_rss;
2449 const uint16_t *queues =
2450 standalone ? rss_desc->const_q : rss_desc->queue;
2451 uint32_t queues_n = rss_desc->queue_num;
2452 struct mlx5_hrxq *hrxq = NULL;
2453 uint32_t hrxq_idx = 0;
2454 struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
2457 queues_n = rss_desc->hash_fields ? queues_n : 1;
2459 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2461 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2465 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2468 hrxq->standalone = standalone;
2469 hrxq->idx = hrxq_idx;
2470 hrxq->ind_table = ind_tbl;
2471 hrxq->rss_key_len = rss_key_len;
2472 hrxq->hash_fields = rss_desc->hash_fields;
2473 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2474 ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2479 if (!rss_desc->ind_tbl)
2480 mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
2482 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2486 struct mlx5_list_entry *
2487 mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
2489 struct rte_eth_dev *dev = tool_ctx;
2490 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2491 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2492 struct mlx5_hrxq *hrxq;
2494 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2495 return hrxq ? &hrxq->entry : NULL;
2498 struct mlx5_list_entry *
2499 mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2500 void *cb_ctx __rte_unused)
2502 struct rte_eth_dev *dev = tool_ctx;
2503 struct mlx5_priv *priv = dev->data->dev_private;
2504 struct mlx5_hrxq *hrxq;
2505 uint32_t hrxq_idx = 0;
2507 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2510 memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
2511 hrxq->idx = hrxq_idx;
2512 return &hrxq->entry;
2516 mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2518 struct rte_eth_dev *dev = tool_ctx;
2519 struct mlx5_priv *priv = dev->data->dev_private;
2520 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2522 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2526 * Get an Rx Hash queue.
2529 * Pointer to Ethernet device.
2531 * RSS configuration for the Rx hash queue.
2534 * An hash Rx queue index on success.
2536 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
2537 struct mlx5_flow_rss_desc *rss_desc)
2539 struct mlx5_priv *priv = dev->data->dev_private;
2540 struct mlx5_hrxq *hrxq;
2541 struct mlx5_list_entry *entry;
2542 struct mlx5_flow_cb_ctx ctx = {
2546 if (rss_desc->shared_rss) {
2547 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2549 entry = mlx5_list_register(priv->hrxqs, &ctx);
2552 hrxq = container_of(entry, typeof(*hrxq), entry);
2560 * Release the hash Rx queue.
2563 * Pointer to Ethernet device.
2565 * Index to Hash Rx queue to release.
2568 * 1 while a reference on it exists, 0 when freed.
2570 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2572 struct mlx5_priv *priv = dev->data->dev_private;
2573 struct mlx5_hrxq *hrxq;
2575 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2578 if (!hrxq->standalone)
2579 return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
2580 __mlx5_hrxq_remove(dev, hrxq);
2585 * Create a drop Rx Hash queue.
2588 * Pointer to Ethernet device.
2591 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2594 mlx5_drop_action_create(struct rte_eth_dev *dev)
2596 struct mlx5_priv *priv = dev->data->dev_private;
2597 struct mlx5_hrxq *hrxq = NULL;
2600 if (priv->drop_queue.hrxq)
2601 return priv->drop_queue.hrxq;
2602 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2605 "Port %u cannot allocate memory for drop queue.",
2606 dev->data->port_id);
2610 priv->drop_queue.hrxq = hrxq;
2611 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2613 if (!hrxq->ind_table) {
2617 ret = priv->obj_ops.drop_action_create(dev);
2623 if (hrxq->ind_table)
2624 mlx5_free(hrxq->ind_table);
2625 priv->drop_queue.hrxq = NULL;
2632 * Release a drop hash Rx queue.
2635 * Pointer to Ethernet device.
2638 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2640 struct mlx5_priv *priv = dev->data->dev_private;
2641 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2643 if (!priv->drop_queue.hrxq)
2645 priv->obj_ops.drop_action_destroy(dev);
2646 mlx5_free(priv->drop_queue.rxq);
2647 mlx5_free(hrxq->ind_table);
2649 priv->drop_queue.rxq = NULL;
2650 priv->drop_queue.hrxq = NULL;
2654 * Verify the Rx Queue list is empty
2657 * Pointer to Ethernet device.
2660 * The number of object not released.
2663 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2665 struct mlx5_priv *priv = dev->data->dev_private;
2667 return mlx5_list_get_entry_num(priv->hrxqs);
2671 * Set the Rx queue timestamp conversion parameters
2674 * Pointer to the Ethernet device structure.
2677 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2679 struct mlx5_priv *priv = dev->data->dev_private;
2680 struct mlx5_dev_ctx_shared *sh = priv->sh;
2681 struct mlx5_rxq_data *data;
2684 for (i = 0; i != priv->rxqs_n; ++i) {
2685 if (!(*priv->rxqs)[i])
2687 data = (*priv->rxqs)[i];
2689 data->rt_timestamp = priv->config.rt_timestamp;