1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
24 #include <mlx5_common.h>
25 #include <mlx5_common_mr.h>
27 #include "mlx5_defs.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
32 #include "mlx5_devx.h"
33 #include "rte_pmd_mlx5.h"
36 /* Default RSS hash key also used for ConnectX-3. */
37 uint8_t rss_hash_default_key[] = {
38 0x2c, 0xc6, 0x81, 0xd1,
39 0x5b, 0xdb, 0xf4, 0xf7,
40 0xfc, 0xa2, 0x83, 0x19,
41 0xdb, 0x1a, 0x3e, 0x94,
42 0x6b, 0x9e, 0x38, 0xd9,
43 0x2c, 0x9c, 0x03, 0xd1,
44 0xad, 0x99, 0x44, 0xa7,
45 0xd9, 0x56, 0x3d, 0x59,
46 0x06, 0x3c, 0x25, 0xf3,
47 0xfc, 0x1f, 0xdc, 0x2a,
50 /* Length of the default RSS hash key. */
51 static_assert(MLX5_RSS_HASH_KEY_LEN ==
52 (unsigned int)sizeof(rss_hash_default_key),
53 "wrong RSS default key size.");
56 * Calculate the number of CQEs in CQ for the Rx queue.
59 * Pointer to receive queue structure.
62 * Number of CQEs in CQ.
65 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
68 unsigned int wqe_n = 1 << rxq_data->elts_n;
70 if (mlx5_rxq_mprq_enabled(rxq_data))
71 cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
78 * Allocate RX queue elements for Multi-Packet RQ.
81 * Pointer to RX queue structure.
84 * 0 on success, a negative errno value otherwise and rte_errno is set.
87 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
89 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
90 unsigned int wqe_n = 1 << rxq->elts_n;
94 /* Iterate on segments. */
95 for (i = 0; i <= wqe_n; ++i) {
96 struct mlx5_mprq_buf *buf;
98 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
99 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
104 (*rxq->mprq_bufs)[i] = buf;
106 rxq->mprq_repl = buf;
109 "port %u MPRQ queue %u allocated and configured %u segments",
110 rxq->port_id, rxq->idx, wqe_n);
113 err = rte_errno; /* Save rte_errno before cleanup. */
115 for (i = 0; (i != wqe_n); ++i) {
116 if ((*rxq->mprq_bufs)[i] != NULL)
117 rte_mempool_put(rxq->mprq_mp,
118 (*rxq->mprq_bufs)[i]);
119 (*rxq->mprq_bufs)[i] = NULL;
121 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
122 rxq->port_id, rxq->idx);
123 rte_errno = err; /* Restore rte_errno. */
128 * Allocate RX queue elements for Single-Packet RQ.
131 * Pointer to RX queue structure.
134 * 0 on success, negative errno value on failure.
137 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
139 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
140 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
141 RTE_BIT32(rxq_ctrl->rxq.elts_n) *
142 RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
143 RTE_BIT32(rxq_ctrl->rxq.elts_n);
144 bool has_vec_support = mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0;
148 /* Iterate on segments. */
149 for (i = 0; (i != elts_n); ++i) {
150 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
151 struct rte_mbuf *buf;
153 buf = rte_pktmbuf_alloc(seg->mp);
155 if (rxq_ctrl->share_group == 0)
156 DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
157 RXQ_PORT_ID(rxq_ctrl),
160 DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
161 rxq_ctrl->share_group,
162 rxq_ctrl->share_qid);
166 /* Only vectored Rx routines rely on headroom size. */
167 MLX5_ASSERT(!has_vec_support ||
168 DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
169 /* Buffer is supposed to be empty. */
170 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
171 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
172 MLX5_ASSERT(!buf->next);
173 SET_DATA_OFF(buf, seg->offset);
174 PORT(buf) = rxq_ctrl->rxq.port_id;
175 DATA_LEN(buf) = seg->length;
176 PKT_LEN(buf) = seg->length;
178 (*rxq_ctrl->rxq.elts)[i] = buf;
180 /* If Rx vector is activated. */
181 if (has_vec_support) {
182 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
183 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
184 struct rte_pktmbuf_pool_private *priv =
185 (struct rte_pktmbuf_pool_private *)
186 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
189 /* Initialize default rearm_data for vPMD. */
190 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
191 rte_mbuf_refcnt_set(mbuf_init, 1);
192 mbuf_init->nb_segs = 1;
193 /* For shared queues port is provided in CQE */
194 mbuf_init->port = rxq->shared ? 0 : rxq->port_id;
195 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
196 mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
198 * prevent compiler reordering:
199 * rearm_data covers previous fields.
201 rte_compiler_barrier();
202 rxq->mbuf_initializer =
203 *(rte_xmm_t *)&mbuf_init->rearm_data;
204 /* Padding with a fake mbuf for vectorized Rx. */
205 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
206 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
208 if (rxq_ctrl->share_group == 0)
210 "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
211 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
212 elts_n / (1 << rxq_ctrl->rxq.sges_n));
215 "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
216 rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
217 elts_n / (1 << rxq_ctrl->rxq.sges_n));
220 err = rte_errno; /* Save rte_errno before cleanup. */
222 for (i = 0; (i != elts_n); ++i) {
223 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
224 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
225 (*rxq_ctrl->rxq.elts)[i] = NULL;
227 if (rxq_ctrl->share_group == 0)
228 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
229 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
231 DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
232 rxq_ctrl->share_group, rxq_ctrl->share_qid);
233 rte_errno = err; /* Restore rte_errno. */
238 * Allocate RX queue elements.
241 * Pointer to RX queue structure.
244 * 0 on success, negative errno value on failure.
247 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
252 * For MPRQ we need to allocate both MPRQ buffers
253 * for WQEs and simple mbufs for vector processing.
255 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
256 ret = rxq_alloc_elts_mprq(rxq_ctrl);
258 ret = rxq_alloc_elts_sprq(rxq_ctrl);
263 * Free RX queue elements for Multi-Packet RQ.
266 * Pointer to RX queue structure.
269 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
271 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
274 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
275 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
276 if (rxq->mprq_bufs == NULL)
278 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
279 if ((*rxq->mprq_bufs)[i] != NULL)
280 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
281 (*rxq->mprq_bufs)[i] = NULL;
283 if (rxq->mprq_repl != NULL) {
284 mlx5_mprq_buf_free(rxq->mprq_repl);
285 rxq->mprq_repl = NULL;
290 * Free RX queue elements for Single-Packet RQ.
293 * Pointer to RX queue structure.
296 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
298 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
299 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
300 RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
301 RTE_BIT32(rxq->elts_n);
302 const uint16_t q_mask = q_n - 1;
303 uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
304 rxq->elts_ci : rxq->rq_ci;
305 uint16_t used = q_n - (elts_ci - rxq->rq_pi);
308 if (rxq_ctrl->share_group == 0)
309 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
310 RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
312 DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
313 rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
314 if (rxq->elts == NULL)
317 * Some mbuf in the Ring belongs to the application.
318 * They cannot be freed.
320 if (mlx5_rxq_check_vec_support(rxq) > 0) {
321 for (i = 0; i < used; ++i)
322 (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
323 rxq->rq_pi = elts_ci;
325 for (i = 0; i != q_n; ++i) {
326 if ((*rxq->elts)[i] != NULL)
327 rte_pktmbuf_free_seg((*rxq->elts)[i]);
328 (*rxq->elts)[i] = NULL;
333 * Free RX queue elements.
336 * Pointer to RX queue structure.
339 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
342 * For MPRQ we need to allocate both MPRQ buffers
343 * for WQEs and simple mbufs for vector processing.
345 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
346 rxq_free_elts_mprq(rxq_ctrl);
347 rxq_free_elts_sprq(rxq_ctrl);
351 * Returns the per-queue supported offloads.
354 * Pointer to Ethernet device.
357 * Supported Rx offloads.
360 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
362 struct mlx5_priv *priv = dev->data->dev_private;
363 uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
364 RTE_ETH_RX_OFFLOAD_TIMESTAMP |
365 RTE_ETH_RX_OFFLOAD_RSS_HASH);
367 if (!priv->config.mprq.enabled)
368 offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
369 if (priv->sh->config.hw_fcs_strip)
370 offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
371 if (priv->sh->dev_cap.hw_csum)
372 offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
373 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
374 RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
375 if (priv->sh->dev_cap.hw_vlan_strip)
376 offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
377 if (priv->sh->dev_cap.lro_supported)
378 offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
384 * Returns the per-port supported offloads.
387 * Supported Rx offloads.
390 mlx5_get_rx_port_offloads(void)
392 uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
398 * Verify if the queue can be released.
401 * Pointer to Ethernet device.
406 * 1 if the queue can be released
407 * 0 if the queue can not be released, there are references to it.
408 * Negative errno and rte_errno is set if queue doesn't exist.
411 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
413 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
419 return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
422 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
424 rxq_sync_cq(struct mlx5_rxq_data *rxq)
426 const uint16_t cqe_n = 1 << rxq->cqe_n;
427 const uint16_t cqe_mask = cqe_n - 1;
428 volatile struct mlx5_cqe *cqe;
433 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
434 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
435 if (ret == MLX5_CQE_STATUS_HW_OWN)
437 if (ret == MLX5_CQE_STATUS_ERR) {
441 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
442 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
446 /* Compute the next non compressed CQE. */
447 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
450 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
451 for (i = 0; i < cqe_n; i++) {
452 cqe = &(*rxq->cqes)[i];
453 cqe->op_own = MLX5_CQE_INVALIDATE;
455 /* Resync CQE and WQE (WQ in RESET state). */
457 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
459 *rxq->rq_db = rte_cpu_to_be_32(0);
464 * Rx queue stop. Device queue goes to the RESET state,
465 * all involved mbufs are freed from WQ.
468 * Pointer to Ethernet device structure.
473 * 0 on success, a negative errno value otherwise and rte_errno is set.
476 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
478 struct mlx5_priv *priv = dev->data->dev_private;
479 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
480 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
483 MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
484 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
485 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
487 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
492 /* Remove all processes CQEs. */
493 rxq_sync_cq(&rxq_ctrl->rxq);
494 /* Free all involved mbufs. */
495 rxq_free_elts(rxq_ctrl);
496 /* Set the actual queue state. */
497 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
502 * Rx queue stop. Device queue goes to the RESET state,
503 * all involved mbufs are freed from WQ.
506 * Pointer to Ethernet device structure.
511 * 0 on success, a negative errno value otherwise and rte_errno is set.
514 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
516 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
519 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
520 DRV_LOG(ERR, "Hairpin queue can't be stopped");
524 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
527 * Vectorized Rx burst requires the CQ and RQ indices
528 * synchronized, that might be broken on RQ restart
529 * and cause Rx malfunction, so queue stopping is
530 * not supported if vectorized Rx burst is engaged.
531 * The routine pointer depends on the process
532 * type, should perform check there.
534 if (pkt_burst == mlx5_rx_burst_vec) {
535 DRV_LOG(ERR, "Rx queue stop is not supported "
536 "for vectorized Rx");
540 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
541 ret = mlx5_mp_os_req_queue_control(dev, idx,
542 MLX5_MP_REQ_QUEUE_RX_STOP);
544 ret = mlx5_rx_queue_stop_primary(dev, idx);
550 * Rx queue start. Device queue goes to the ready state,
551 * all required mbufs are allocated and WQ is replenished.
554 * Pointer to Ethernet device structure.
559 * 0 on success, a negative errno value otherwise and rte_errno is set.
562 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
564 struct mlx5_priv *priv = dev->data->dev_private;
565 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
566 struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
569 MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
570 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
571 /* Allocate needed buffers. */
572 ret = rxq_alloc_elts(rxq->ctrl);
574 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
579 *rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
581 /* Reset RQ consumer before moving queue to READY state. */
582 *rxq_data->rq_db = rte_cpu_to_be_32(0);
584 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
586 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
591 /* Reinitialize RQ - set WQEs. */
592 mlx5_rxq_initialize(rxq_data);
593 rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
594 /* Set actual queue state. */
595 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
600 * Rx queue start. Device queue goes to the ready state,
601 * all required mbufs are allocated and WQ is replenished.
604 * Pointer to Ethernet device structure.
609 * 0 on success, a negative errno value otherwise and rte_errno is set.
612 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
616 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
617 DRV_LOG(ERR, "Hairpin queue can't be started");
621 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
623 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
624 ret = mlx5_mp_os_req_queue_control(dev, idx,
625 MLX5_MP_REQ_QUEUE_RX_START);
627 ret = mlx5_rx_queue_start_primary(dev, idx);
633 * Rx queue presetup checks.
636 * Pointer to Ethernet device structure.
640 * Number of descriptors to configure in queue.
641 * @param[out] rxq_ctrl
642 * Address of pointer to shared Rx queue control.
645 * 0 on success, a negative errno value otherwise and rte_errno is set.
648 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc,
649 struct mlx5_rxq_ctrl **rxq_ctrl)
651 struct mlx5_priv *priv = dev->data->dev_private;
652 struct mlx5_rxq_priv *rxq;
655 if (!rte_is_power_of_2(*desc)) {
656 *desc = 1 << log2above(*desc);
658 "port %u increased number of descriptors in Rx queue %u"
659 " to the next power of two (%d)",
660 dev->data->port_id, idx, *desc);
662 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
663 dev->data->port_id, idx, *desc);
664 if (idx >= priv->rxqs_n) {
665 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
666 dev->data->port_id, idx, priv->rxqs_n);
667 rte_errno = EOVERFLOW;
670 if (rxq_ctrl == NULL || *rxq_ctrl == NULL)
672 if (!(*rxq_ctrl)->rxq.shared) {
673 if (!mlx5_rxq_releasable(dev, idx)) {
674 DRV_LOG(ERR, "port %u unable to release queue index %u",
675 dev->data->port_id, idx);
679 mlx5_rxq_release(dev, idx);
681 if ((*rxq_ctrl)->obj != NULL)
682 /* Some port using shared Rx queue has been started. */
684 /* Release all owner RxQ to reconfigure Shared RxQ. */
686 rxq = LIST_FIRST(&(*rxq_ctrl)->owners);
687 LIST_REMOVE(rxq, owner_entry);
688 empty = LIST_EMPTY(&(*rxq_ctrl)->owners);
689 mlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx);
697 * Get the shared Rx queue object that matches group and queue index.
700 * Pointer to Ethernet device structure.
704 * Shared RX queue index.
707 * Shared RXQ object that matching, or NULL if not found.
709 static struct mlx5_rxq_ctrl *
710 mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid)
712 struct mlx5_rxq_ctrl *rxq_ctrl;
713 struct mlx5_priv *priv = dev->data->dev_private;
715 LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
716 if (rxq_ctrl->share_group == group &&
717 rxq_ctrl->share_qid == share_qid)
724 * Check whether requested Rx queue configuration matches shared RXQ.
727 * Pointer to shared RXQ.
729 * Pointer to Ethernet device structure.
733 * Number of descriptors to configure in queue.
735 * NUMA socket on which memory must be allocated.
737 * Thresholds parameters.
739 * Memory pool for buffer allocations.
742 * 0 on success, a negative errno value otherwise and rte_errno is set.
745 mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
746 uint16_t idx, uint16_t desc, unsigned int socket,
747 const struct rte_eth_rxconf *conf,
748 struct rte_mempool *mp)
750 struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
751 struct mlx5_priv *priv = dev->data->dev_private;
755 if (rxq_ctrl->socket != socket) {
756 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
757 dev->data->port_id, idx);
760 if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
761 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
762 dev->data->port_id, idx);
765 if (priv->mtu != spriv->mtu) {
766 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
767 dev->data->port_id, idx);
770 if (priv->dev_data->dev_conf.intr_conf.rxq !=
771 spriv->dev_data->dev_conf.intr_conf.rxq) {
772 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
773 dev->data->port_id, idx);
776 if (mp != NULL && rxq_ctrl->rxq.mp != mp) {
777 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
778 dev->data->port_id, idx);
780 } else if (mp == NULL) {
781 if (conf->rx_nseg != rxq_ctrl->rxseg_n) {
782 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch",
783 dev->data->port_id, idx);
786 for (i = 0; i < conf->rx_nseg; i++) {
787 if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i],
788 sizeof(struct rte_eth_rxseg_split))) {
789 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
790 dev->data->port_id, idx, i);
795 if (priv->config.hw_padding != spriv->config.hw_padding) {
796 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
797 dev->data->port_id, idx);
800 if (priv->config.cqe_comp != spriv->config.cqe_comp ||
801 (priv->config.cqe_comp &&
802 priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
803 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
804 dev->data->port_id, idx);
813 * Pointer to Ethernet device structure.
817 * Number of descriptors to configure in queue.
819 * NUMA socket on which memory must be allocated.
821 * Thresholds parameters.
823 * Memory pool for buffer allocations.
826 * 0 on success, a negative errno value otherwise and rte_errno is set.
829 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
830 unsigned int socket, const struct rte_eth_rxconf *conf,
831 struct rte_mempool *mp)
833 struct mlx5_priv *priv = dev->data->dev_private;
834 struct mlx5_rxq_priv *rxq;
835 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
836 struct rte_eth_rxseg_split *rx_seg =
837 (struct rte_eth_rxseg_split *)conf->rx_seg;
838 struct rte_eth_rxseg_split rx_single = {.mp = mp};
839 uint16_t n_seg = conf->rx_nseg;
841 uint64_t offloads = conf->offloads |
842 dev->data->dev_conf.rxmode.offloads;
846 * The parameters should be checked on rte_eth_dev layer.
847 * If mp is specified it means the compatible configuration
848 * without buffer split feature tuning.
854 /* The offloads should be checked on rte_eth_dev layer. */
855 MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
856 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
857 DRV_LOG(ERR, "port %u queue index %u split "
858 "offload not configured",
859 dev->data->port_id, idx);
863 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
865 if (conf->share_group > 0) {
866 if (!priv->sh->cdev->config.hca_attr.mem_rq_rmp) {
867 DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
868 dev->data->port_id, idx);
872 if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
873 DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
874 dev->data->port_id, idx);
878 if (conf->share_qid >= priv->rxqs_n) {
879 DRV_LOG(ERR, "port %u shared Rx queue index %u > number of Rx queues %u",
880 dev->data->port_id, conf->share_qid,
885 if (priv->config.mprq.enabled) {
886 DRV_LOG(ERR, "port %u shared Rx queue index %u: not supported when MPRQ enabled",
887 dev->data->port_id, conf->share_qid);
891 /* Try to reuse shared RXQ. */
892 rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group,
894 if (rxq_ctrl != NULL &&
895 !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
901 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);
905 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
908 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
909 dev->data->port_id, idx);
913 if (rxq_ctrl == NULL) {
914 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
916 if (rxq_ctrl == NULL) {
917 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
918 dev->data->port_id, idx);
926 (*priv->rxq_privs)[idx] = rxq;
927 /* Join owner list. */
928 LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
929 rxq->ctrl = rxq_ctrl;
930 mlx5_rxq_ref(dev, idx);
931 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
932 dev->data->port_id, idx);
933 dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
940 * Pointer to Ethernet device structure.
944 * Number of descriptors to configure in queue.
945 * @param hairpin_conf
946 * Hairpin configuration parameters.
949 * 0 on success, a negative errno value otherwise and rte_errno is set.
952 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
954 const struct rte_eth_hairpin_conf *hairpin_conf)
956 struct mlx5_priv *priv = dev->data->dev_private;
957 struct mlx5_rxq_priv *rxq;
958 struct mlx5_rxq_ctrl *rxq_ctrl;
961 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL);
964 if (hairpin_conf->peer_count != 1) {
966 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
967 " peer count is %u", dev->data->port_id,
968 idx, hairpin_conf->peer_count);
971 if (hairpin_conf->peers[0].port == dev->data->port_id) {
972 if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
974 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
975 " index %u, Tx %u is larger than %u",
976 dev->data->port_id, idx,
977 hairpin_conf->peers[0].queue, priv->txqs_n);
981 if (hairpin_conf->manual_bind == 0 ||
982 hairpin_conf->tx_explicit == 0) {
984 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
985 " index %u peer port %u with attributes %u %u",
986 dev->data->port_id, idx,
987 hairpin_conf->peers[0].port,
988 hairpin_conf->manual_bind,
989 hairpin_conf->tx_explicit);
993 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
996 DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
997 dev->data->port_id, idx);
1003 (*priv->rxq_privs)[idx] = rxq;
1004 rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
1006 DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
1007 dev->data->port_id, idx);
1009 (*priv->rxq_privs)[idx] = NULL;
1013 DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
1014 dev->data->port_id, idx);
1015 dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
1020 * DPDK callback to release a RX queue.
1023 * Pointer to Ethernet device structure.
1025 * Receive queue index.
1028 mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1030 struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
1034 if (!mlx5_rxq_releasable(dev, qid))
1035 rte_panic("port %u Rx queue %u is still used by a flow and"
1036 " cannot be removed\n", dev->data->port_id, qid);
1037 mlx5_rxq_release(dev, qid);
1041 * Allocate queue vector and fill epoll fd list for Rx interrupts.
1044 * Pointer to Ethernet device.
1047 * 0 on success, a negative errno value otherwise and rte_errno is set.
1050 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
1052 struct mlx5_priv *priv = dev->data->dev_private;
1054 unsigned int rxqs_n = priv->rxqs_n;
1055 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1056 unsigned int count = 0;
1057 struct rte_intr_handle *intr_handle = dev->intr_handle;
1059 if (!dev->data->dev_conf.intr_conf.rxq)
1061 mlx5_rx_intr_vec_disable(dev);
1062 if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
1064 "port %u failed to allocate memory for interrupt"
1065 " vector, Rx interrupts will not be supported",
1066 dev->data->port_id);
1071 if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
1074 for (i = 0; i != n; ++i) {
1075 /* This rxq obj must not be released in this function. */
1076 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1077 struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
1080 /* Skip queues that cannot request interrupts. */
1081 if (!rxq_obj || (!rxq_obj->ibv_channel &&
1082 !rxq_obj->devx_channel)) {
1083 /* Use invalid intr_vec[] index to disable entry. */
1084 if (rte_intr_vec_list_index_set(intr_handle, i,
1085 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
1089 mlx5_rxq_ref(dev, i);
1090 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1092 "port %u too many Rx queues for interrupt"
1093 " vector size (%d), Rx interrupts cannot be"
1095 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
1096 mlx5_rx_intr_vec_disable(dev);
1100 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
1104 "port %u failed to make Rx interrupt file"
1105 " descriptor %d non-blocking for queue index"
1107 dev->data->port_id, rxq_obj->fd, i);
1108 mlx5_rx_intr_vec_disable(dev);
1112 if (rte_intr_vec_list_index_set(intr_handle, i,
1113 RTE_INTR_VEC_RXTX_OFFSET + count))
1115 if (rte_intr_efds_index_set(intr_handle, count,
1121 mlx5_rx_intr_vec_disable(dev);
1122 else if (rte_intr_nb_efd_set(intr_handle, count))
1128 * Clean up Rx interrupts handler.
1131 * Pointer to Ethernet device.
1134 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
1136 struct mlx5_priv *priv = dev->data->dev_private;
1137 struct rte_intr_handle *intr_handle = dev->intr_handle;
1139 unsigned int rxqs_n = priv->rxqs_n;
1140 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1142 if (!dev->data->dev_conf.intr_conf.rxq)
1144 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
1146 for (i = 0; i != n; ++i) {
1147 if (rte_intr_vec_list_index_get(intr_handle, i) ==
1148 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
1151 * Need to access directly the queue to release the reference
1152 * kept in mlx5_rx_intr_vec_enable().
1154 mlx5_rxq_deref(dev, i);
1157 rte_intr_free_epoll_fd(intr_handle);
1159 rte_intr_vec_list_free(intr_handle);
1161 rte_intr_nb_efd_set(intr_handle, 0);
1165 * MLX5 CQ notification .
1168 * Pointer to receive queue structure.
1170 * Sequence number per receive queue .
1173 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1176 uint32_t doorbell_hi;
1179 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1180 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1181 doorbell = (uint64_t)doorbell_hi << 32;
1182 doorbell |= rxq->cqn;
1183 mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell),
1184 doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0);
1188 * DPDK callback for Rx queue interrupt enable.
1191 * Pointer to Ethernet device structure.
1192 * @param rx_queue_id
1196 * 0 on success, a negative errno value otherwise and rte_errno is set.
1199 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1201 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1204 if (rxq->ctrl->irq) {
1205 if (!rxq->ctrl->obj)
1207 mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
1216 * DPDK callback for Rx queue interrupt disable.
1219 * Pointer to Ethernet device structure.
1220 * @param rx_queue_id
1224 * 0 on success, a negative errno value otherwise and rte_errno is set.
1227 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1229 struct mlx5_priv *priv = dev->data->dev_private;
1230 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1237 if (!rxq->ctrl->obj)
1239 if (rxq->ctrl->irq) {
1240 ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
1243 rxq->ctrl->rxq.cq_arm_sn++;
1248 * The ret variable may be EAGAIN which means the get_event function was
1249 * called before receiving one.
1255 if (rte_errno != EAGAIN)
1256 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1257 dev->data->port_id, rx_queue_id);
1262 * Verify the Rx queue objects list is empty
1265 * Pointer to Ethernet device.
1268 * The number of objects not released.
1271 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1273 struct mlx5_priv *priv = dev->data->dev_private;
1275 struct mlx5_rxq_obj *rxq_obj;
1277 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1278 if (rxq_obj->rxq_ctrl == NULL)
1280 if (rxq_obj->rxq_ctrl->rxq.shared &&
1281 !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
1283 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1284 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1291 * Callback function to initialize mbufs for Multi-Packet RQ.
1294 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1295 void *_m, unsigned int i __rte_unused)
1297 struct mlx5_mprq_buf *buf = _m;
1298 struct rte_mbuf_ext_shared_info *shinfo;
1299 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1302 memset(_m, 0, sizeof(*buf));
1304 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1305 for (j = 0; j != strd_n; ++j) {
1306 shinfo = &buf->shinfos[j];
1307 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1308 shinfo->fcb_opaque = buf;
1313 * Free mempool of Multi-Packet RQ.
1316 * Pointer to Ethernet device.
1319 * 0 on success, negative errno value on failure.
1322 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1324 struct mlx5_priv *priv = dev->data->dev_private;
1325 struct rte_mempool *mp = priv->mprq_mp;
1330 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1331 dev->data->port_id, mp->name);
1333 * If a buffer in the pool has been externally attached to a mbuf and it
1334 * is still in use by application, destroying the Rx queue can spoil
1335 * the packet. It is unlikely to happen but if application dynamically
1336 * creates and destroys with holding Rx packets, this can happen.
1338 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1339 * RQ isn't provided by application but managed by PMD.
1341 if (!rte_mempool_full(mp)) {
1343 "port %u mempool for Multi-Packet RQ is still in use",
1344 dev->data->port_id);
1348 rte_mempool_free(mp);
1349 /* Unset mempool for each Rx queue. */
1350 for (i = 0; i != priv->rxqs_n; ++i) {
1351 struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
1355 rxq->mprq_mp = NULL;
1357 priv->mprq_mp = NULL;
1362 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1363 * mempool. If already allocated, reuse it if there're enough elements.
1364 * Otherwise, resize it.
1367 * Pointer to Ethernet device.
1370 * 0 on success, negative errno value on failure.
1373 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1375 struct mlx5_priv *priv = dev->data->dev_private;
1376 struct rte_mempool *mp = priv->mprq_mp;
1377 char name[RTE_MEMPOOL_NAMESIZE];
1378 unsigned int desc = 0;
1379 unsigned int buf_len;
1380 unsigned int obj_num;
1381 unsigned int obj_size;
1382 unsigned int log_strd_num = 0;
1383 unsigned int log_strd_sz = 0;
1385 unsigned int n_ibv = 0;
1388 if (!mlx5_mprq_enabled(dev))
1390 /* Count the total number of descriptors configured. */
1391 for (i = 0; i != priv->rxqs_n; ++i) {
1392 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
1393 struct mlx5_rxq_data *rxq;
1395 if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
1397 rxq = &rxq_ctrl->rxq;
1399 desc += 1 << rxq->elts_n;
1400 /* Get the max number of strides. */
1401 if (log_strd_num < rxq->log_strd_num)
1402 log_strd_num = rxq->log_strd_num;
1403 /* Get the max size of a stride. */
1404 if (log_strd_sz < rxq->log_strd_sz)
1405 log_strd_sz = rxq->log_strd_sz;
1407 MLX5_ASSERT(log_strd_num && log_strd_sz);
1408 buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
1409 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
1410 RTE_BIT32(log_strd_num) *
1411 sizeof(struct rte_mbuf_ext_shared_info) +
1412 RTE_PKTMBUF_HEADROOM;
1414 * Received packets can be either memcpy'd or externally referenced. In
1415 * case that the packet is attached to an mbuf as an external buffer, as
1416 * it isn't possible to predict how the buffers will be queued by
1417 * application, there's no option to exactly pre-allocate needed buffers
1418 * in advance but to speculatively prepares enough buffers.
1420 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1421 * received packets to buffers provided by application (rxq->mp) until
1422 * this Mempool gets available again.
1425 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1427 * rte_mempool_create_empty() has sanity check to refuse large cache
1428 * size compared to the number of elements.
1429 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1430 * constant number 2 instead.
1432 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1433 /* Check a mempool is already allocated and if it can be resued. */
1434 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1435 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1436 dev->data->port_id, mp->name);
1439 } else if (mp != NULL) {
1440 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1441 dev->data->port_id, mp->name);
1443 * If failed to free, which means it may be still in use, no way
1444 * but to keep using the existing one. On buffer underrun,
1445 * packets will be memcpy'd instead of external buffer
1448 if (mlx5_mprq_free_mp(dev)) {
1449 if (mp->elt_size >= obj_size)
1455 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1456 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1457 0, NULL, NULL, mlx5_mprq_buf_init,
1458 (void *)((uintptr_t)1 << log_strd_num),
1459 dev->device->numa_node, 0);
1462 "port %u failed to allocate a mempool for"
1463 " Multi-Packet RQ, count=%u, size=%u",
1464 dev->data->port_id, obj_num, obj_size);
1468 ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);
1469 if (ret < 0 && rte_errno != EEXIST) {
1471 DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
1472 dev->data->port_id);
1473 rte_mempool_free(mp);
1479 /* Set mempool for each Rx queue. */
1480 for (i = 0; i != priv->rxqs_n; ++i) {
1481 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
1483 if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
1485 rxq_ctrl->rxq.mprq_mp = mp;
1487 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1488 dev->data->port_id);
1492 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1493 sizeof(struct rte_vlan_hdr) * 2 + \
1494 sizeof(struct rte_ipv6_hdr)))
1495 #define MAX_TCP_OPTION_SIZE 40u
1496 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1497 sizeof(struct rte_tcp_hdr) + \
1498 MAX_TCP_OPTION_SIZE))
1501 * Adjust the maximum LRO massage size.
1504 * Pointer to Ethernet device.
1507 * @param max_lro_size
1508 * The maximum size for LRO packet.
1511 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1512 uint32_t max_lro_size)
1514 struct mlx5_priv *priv = dev->data->dev_private;
1516 if (priv->sh->cdev->config.hca_attr.lro_max_msg_sz_mode ==
1517 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1518 MLX5_MAX_TCP_HDR_OFFSET)
1519 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1520 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1521 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1522 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1523 if (priv->max_lro_msg_size)
1524 priv->max_lro_msg_size =
1525 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1527 priv->max_lro_msg_size = max_lro_size;
1529 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1530 dev->data->port_id, idx,
1531 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1535 * Prepare both size and number of stride for Multi-Packet RQ.
1538 * Pointer to Ethernet device.
1542 * Number of descriptors to configure in queue.
1544 * Indicator if Rx segment enables, if so Multi-Packet RQ doesn't enable.
1545 * @param min_mbuf_size
1546 * Non scatter min mbuf size, max_rx_pktlen plus overhead.
1547 * @param actual_log_stride_num
1548 * Log number of strides to configure for this queue.
1549 * @param actual_log_stride_size
1550 * Log stride size to configure for this queue.
1553 * 0 if Multi-Packet RQ is supported, otherwise -1.
1556 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1557 bool rx_seg_en, uint32_t min_mbuf_size,
1558 uint32_t *actual_log_stride_num,
1559 uint32_t *actual_log_stride_size)
1561 struct mlx5_priv *priv = dev->data->dev_private;
1562 struct mlx5_port_config *config = &priv->config;
1563 struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
1564 uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
1565 uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
1566 uint32_t log_def_stride_num =
1567 RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
1568 log_min_stride_num),
1569 log_max_stride_num);
1570 uint32_t log_min_stride_size = dev_cap->mprq.log_min_stride_size;
1571 uint32_t log_max_stride_size = dev_cap->mprq.log_max_stride_size;
1572 uint32_t log_def_stride_size =
1573 RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
1574 log_min_stride_size),
1575 log_max_stride_size);
1576 uint32_t log_stride_wqe_size;
1578 if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
1580 /* Checks if chosen number of strides is in supported range. */
1581 if (config->mprq.log_stride_num > log_max_stride_num ||
1582 config->mprq.log_stride_num < log_min_stride_num) {
1583 *actual_log_stride_num = log_def_stride_num;
1585 "Port %u Rx queue %u number of strides for Multi-Packet RQ is out of range, setting default value (%u)",
1586 dev->data->port_id, idx, RTE_BIT32(log_def_stride_num));
1588 *actual_log_stride_num = config->mprq.log_stride_num;
1590 if (config->mprq.log_stride_size) {
1591 /* Checks if chosen size of stride is in supported range. */
1592 if (config->mprq.log_stride_size > log_max_stride_size ||
1593 config->mprq.log_stride_size < log_min_stride_size) {
1594 *actual_log_stride_size = log_def_stride_size;
1596 "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
1597 dev->data->port_id, idx,
1598 RTE_BIT32(log_def_stride_size));
1600 *actual_log_stride_size = config->mprq.log_stride_size;
1603 if (min_mbuf_size <= RTE_BIT32(log_max_stride_size))
1604 *actual_log_stride_size = log2above(min_mbuf_size);
1608 log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
1609 /* Check if WQE buffer size is supported by hardware. */
1610 if (log_stride_wqe_size < dev_cap->mprq.log_min_stride_wqe_size) {
1611 *actual_log_stride_num = log_def_stride_num;
1612 *actual_log_stride_size = log_def_stride_size;
1614 "Port %u Rx queue %u size of WQE buffer for Multi-Packet RQ is too small, setting default values (stride_num_n=%u, stride_size_n=%u)",
1615 dev->data->port_id, idx, RTE_BIT32(log_def_stride_num),
1616 RTE_BIT32(log_def_stride_size));
1617 log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
1619 MLX5_ASSERT(log_stride_wqe_size >=
1620 dev_cap->mprq.log_min_stride_wqe_size);
1621 if (desc <= RTE_BIT32(*actual_log_stride_num))
1623 if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
1624 DRV_LOG(WARNING, "Port %u Rx queue %u "
1625 "Multi-Packet RQ is unsupported, WQE buffer size (%u) "
1626 "is smaller than min mbuf size (%u)",
1627 dev->data->port_id, idx, RTE_BIT32(log_stride_wqe_size),
1631 DRV_LOG(DEBUG, "Port %u Rx queue %u "
1632 "Multi-Packet RQ is enabled strd_num_n = %u, strd_sz_n = %u",
1633 dev->data->port_id, idx, RTE_BIT32(*actual_log_stride_num),
1634 RTE_BIT32(*actual_log_stride_size));
1637 if (config->mprq.enabled)
1639 "Port %u MPRQ is requested but cannot be enabled\n"
1640 " (requested: pkt_sz = %u, desc_num = %u,"
1641 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1642 " supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
1643 " min_stride_sz = %u, max_stride_sz = %u).\n"
1644 "Rx segment is %senable.",
1645 dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
1646 RTE_BIT32(config->mprq.log_stride_size),
1647 RTE_BIT32(config->mprq.log_stride_num),
1648 config->mprq.min_rxqs_num,
1649 RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
1650 RTE_BIT32(dev_cap->mprq.log_min_stride_size),
1651 RTE_BIT32(dev_cap->mprq.log_max_stride_size),
1652 rx_seg_en ? "" : "not ");
1657 * Create a DPDK Rx queue.
1660 * Pointer to Ethernet device.
1664 * Number of descriptors to configure in queue.
1666 * NUMA socket on which memory must be allocated.
1669 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1671 struct mlx5_rxq_ctrl *
1672 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1673 unsigned int socket, const struct rte_eth_rxconf *conf,
1674 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1676 struct mlx5_priv *priv = dev->data->dev_private;
1677 struct mlx5_rxq_ctrl *tmpl;
1678 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1679 struct mlx5_port_config *config = &priv->config;
1680 uint64_t offloads = conf->offloads |
1681 dev->data->dev_conf.rxmode.offloads;
1682 unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
1683 unsigned int max_rx_pktlen = lro_on_queue ?
1684 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1685 dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
1687 unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
1688 RTE_PKTMBUF_HEADROOM;
1689 unsigned int max_lro_size = 0;
1690 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1691 uint32_t mprq_log_actual_stride_num = 0;
1692 uint32_t mprq_log_actual_stride_size = 0;
1693 bool rx_seg_en = n_seg != 1 || rx_seg[0].offset || rx_seg[0].length;
1694 const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
1695 non_scatter_min_mbuf_size,
1696 &mprq_log_actual_stride_num,
1697 &mprq_log_actual_stride_size);
1699 * Always allocate extra slots, even if eventually
1700 * the vector Rx will not be used.
1702 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1703 size_t alloc_size = sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *);
1704 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1705 unsigned int tail_len;
1708 /* Trim the number of descs needed. */
1709 desc >>= mprq_log_actual_stride_num;
1710 alloc_size += desc * sizeof(struct mlx5_mprq_buf *);
1712 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, alloc_size, 0, socket);
1717 LIST_INIT(&tmpl->owners);
1718 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1720 * Save the original segment configuration in the shared queue
1721 * descriptor for the later check on the sibling queue creation.
1723 tmpl->rxseg_n = n_seg;
1724 rte_memcpy(tmpl->rxseg, qs_seg,
1725 sizeof(struct rte_eth_rxseg_split) * n_seg);
1727 * Build the array of actual buffer offsets and lengths.
1728 * Pad with the buffers from the last memory pool if
1729 * needed to handle max size packets, replace zero length
1730 * with the buffer length from the pool.
1732 tail_len = max_rx_pktlen;
1734 struct mlx5_eth_rxseg *hw_seg =
1735 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1736 uint32_t buf_len, offset, seg_len;
1739 * For the buffers beyond descriptions offset is zero,
1740 * the first buffer contains head room.
1742 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1743 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1744 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1746 * For the buffers beyond descriptions the length is
1747 * pool buffer length, zero lengths are replaced with
1748 * pool buffer length either.
1750 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1754 /* Check is done in long int, now overflows. */
1755 if (buf_len < seg_len + offset) {
1756 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1757 "%u/%u can't be satisfied",
1758 dev->data->port_id, idx,
1759 qs_seg->length, qs_seg->offset);
1763 if (seg_len > tail_len)
1764 seg_len = buf_len - offset;
1765 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1767 "port %u too many SGEs (%u) needed to handle"
1768 " requested maximum packet size %u, the maximum"
1769 " supported are %u", dev->data->port_id,
1770 tmpl->rxq.rxseg_n, max_rx_pktlen,
1772 rte_errno = ENOTSUP;
1775 /* Build the actual scattering element in the queue object. */
1776 hw_seg->mp = qs_seg->mp;
1777 MLX5_ASSERT(offset <= UINT16_MAX);
1778 MLX5_ASSERT(seg_len <= UINT16_MAX);
1779 hw_seg->offset = (uint16_t)offset;
1780 hw_seg->length = (uint16_t)seg_len;
1782 * Advance the segment descriptor, the padding is the based
1783 * on the attributes of the last descriptor.
1785 if (tmpl->rxq.rxseg_n < n_seg)
1787 tail_len -= RTE_MIN(tail_len, seg_len);
1788 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1789 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1790 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1791 if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
1792 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1793 " configured and no enough mbuf space(%u) to contain "
1794 "the maximum RX packet length(%u) with head-room(%u)",
1795 dev->data->port_id, idx, mb_len, max_rx_pktlen,
1796 RTE_PKTMBUF_HEADROOM);
1800 tmpl->is_hairpin = false;
1801 if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
1802 &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1803 /* rte_errno is already set. */
1806 tmpl->socket = socket;
1807 if (dev->data->dev_conf.intr_conf.rxq)
1810 /* TODO: Rx scatter isn't supported yet. */
1811 tmpl->rxq.sges_n = 0;
1812 tmpl->rxq.log_strd_num = mprq_log_actual_stride_num;
1813 tmpl->rxq.log_strd_sz = mprq_log_actual_stride_size;
1814 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1815 tmpl->rxq.strd_scatter_en =
1816 !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
1817 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1818 config->mprq.max_memcpy_len);
1819 max_lro_size = RTE_MIN(max_rx_pktlen,
1820 RTE_BIT32(tmpl->rxq.log_strd_num) *
1821 RTE_BIT32(tmpl->rxq.log_strd_sz));
1822 } else if (tmpl->rxq.rxseg_n == 1) {
1823 MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
1824 tmpl->rxq.sges_n = 0;
1825 max_lro_size = max_rx_pktlen;
1826 } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1827 unsigned int sges_n;
1829 if (lro_on_queue && first_mb_free_size <
1830 MLX5_MAX_LRO_HEADER_FIX) {
1831 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1832 " to include the max header size(%u) for LRO",
1833 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1834 rte_errno = ENOTSUP;
1838 * Determine the number of SGEs needed for a full packet
1839 * and round it to the next power of two.
1841 sges_n = log2above(tmpl->rxq.rxseg_n);
1842 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1844 "port %u too many SGEs (%u) needed to handle"
1845 " requested maximum packet size %u, the maximum"
1846 " supported are %u", dev->data->port_id,
1847 1 << sges_n, max_rx_pktlen,
1848 1u << MLX5_MAX_LOG_RQ_SEGS);
1849 rte_errno = ENOTSUP;
1852 tmpl->rxq.sges_n = sges_n;
1853 max_lro_size = max_rx_pktlen;
1855 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1856 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1857 if (desc % (1 << tmpl->rxq.sges_n)) {
1859 "port %u number of Rx queue descriptors (%u) is not a"
1860 " multiple of SGEs per packet (%u)",
1863 1 << tmpl->rxq.sges_n);
1867 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1868 /* Toggle RX checksum offload if hardware supports it. */
1869 tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
1870 /* Configure Rx timestamp. */
1871 tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
1872 tmpl->rxq.timestamp_rx_flag = 0;
1873 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1874 &tmpl->rxq.timestamp_offset,
1875 &tmpl->rxq.timestamp_rx_flag) != 0) {
1876 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1879 /* Configure VLAN stripping. */
1880 tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1881 /* By default, FCS (CRC) is stripped by hardware. */
1882 tmpl->rxq.crc_present = 0;
1883 tmpl->rxq.lro = lro_on_queue;
1884 if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
1885 if (priv->sh->config.hw_fcs_strip) {
1887 * RQs used for LRO-enabled TIRs should not be
1888 * configured to scatter the FCS.
1892 "port %u CRC stripping has been "
1893 "disabled but will still be performed "
1894 "by hardware, because LRO is enabled",
1895 dev->data->port_id);
1897 tmpl->rxq.crc_present = 1;
1900 "port %u CRC stripping has been disabled but will"
1901 " still be performed by hardware, make sure MLNX_OFED"
1902 " and firmware are up to date",
1903 dev->data->port_id);
1907 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1908 " incoming frames to hide it",
1910 tmpl->rxq.crc_present ? "disabled" : "enabled",
1911 tmpl->rxq.crc_present << 2);
1912 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1913 (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
1915 tmpl->rxq.port_id = dev->data->port_id;
1916 tmpl->sh = priv->sh;
1917 tmpl->rxq.mp = rx_seg[0].mp;
1918 tmpl->rxq.elts_n = log2above(desc);
1919 tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1920 tmpl->rxq.elts = (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1921 tmpl->rxq.mprq_bufs =
1922 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1923 tmpl->rxq.idx = idx;
1924 if (conf->share_group > 0) {
1925 tmpl->rxq.shared = 1;
1926 tmpl->share_group = conf->share_group;
1927 tmpl->share_qid = conf->share_qid;
1928 LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
1930 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1933 mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
1939 * Create a DPDK Rx hairpin queue.
1942 * Pointer to Ethernet device.
1946 * Number of descriptors to configure in queue.
1947 * @param hairpin_conf
1948 * The hairpin binding configuration.
1951 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1953 struct mlx5_rxq_ctrl *
1954 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
1956 const struct rte_eth_hairpin_conf *hairpin_conf)
1958 uint16_t idx = rxq->idx;
1959 struct mlx5_priv *priv = dev->data->dev_private;
1960 struct mlx5_rxq_ctrl *tmpl;
1962 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1968 LIST_INIT(&tmpl->owners);
1970 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
1971 tmpl->is_hairpin = true;
1972 tmpl->socket = SOCKET_ID_ANY;
1973 tmpl->rxq.rss_hash = 0;
1974 tmpl->rxq.port_id = dev->data->port_id;
1975 tmpl->sh = priv->sh;
1976 tmpl->rxq.mp = NULL;
1977 tmpl->rxq.elts_n = log2above(desc);
1978 tmpl->rxq.elts = NULL;
1979 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1980 tmpl->rxq.idx = idx;
1981 rxq->hairpin_conf = *hairpin_conf;
1982 mlx5_rxq_ref(dev, idx);
1983 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1988 * Increase Rx queue reference count.
1991 * Pointer to Ethernet device.
1996 * A pointer to the queue if it exists, NULL otherwise.
1998 struct mlx5_rxq_priv *
1999 mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
2001 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2004 __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
2009 * Dereference a Rx queue.
2012 * Pointer to Ethernet device.
2017 * Updated reference count.
2020 mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
2022 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2026 return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
2033 * Pointer to Ethernet device.
2038 * A pointer to the queue if it exists, NULL otherwise.
2040 struct mlx5_rxq_priv *
2041 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2043 struct mlx5_priv *priv = dev->data->dev_private;
2045 MLX5_ASSERT(priv->rxq_privs != NULL);
2046 return (*priv->rxq_privs)[idx];
2050 * Get Rx queue shareable control.
2053 * Pointer to Ethernet device.
2058 * A pointer to the queue control if it exists, NULL otherwise.
2060 struct mlx5_rxq_ctrl *
2061 mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
2063 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2065 return rxq == NULL ? NULL : rxq->ctrl;
2069 * Get Rx queue shareable data.
2072 * Pointer to Ethernet device.
2077 * A pointer to the queue data if it exists, NULL otherwise.
2079 struct mlx5_rxq_data *
2080 mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
2082 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2084 return rxq == NULL ? NULL : &rxq->ctrl->rxq;
2088 * Increase an external Rx queue reference count.
2091 * Pointer to Ethernet device.
2093 * External RX queue index.
2096 * A pointer to the queue if it exists, NULL otherwise.
2098 struct mlx5_external_rxq *
2099 mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
2101 struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
2103 __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
2108 * Decrease an external Rx queue reference count.
2111 * Pointer to Ethernet device.
2113 * External RX queue index.
2116 * Updated reference count.
2119 mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
2121 struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
2123 return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
2127 * Get an external Rx queue.
2130 * Pointer to Ethernet device.
2132 * External Rx queue index.
2135 * A pointer to the queue if it exists, NULL otherwise.
2137 struct mlx5_external_rxq *
2138 mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2140 struct mlx5_priv *priv = dev->data->dev_private;
2142 MLX5_ASSERT(mlx5_is_external_rxq(dev, idx));
2143 return &priv->ext_rxqs[idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
2147 * Dereference a list of Rx queues.
2150 * Pointer to Ethernet device.
2152 * List of Rx queues to deref.
2154 * Number of queues in the array.
2157 mlx5_rxqs_deref(struct rte_eth_dev *dev, uint16_t *queues,
2158 const uint32_t queues_n)
2162 for (i = 0; i < queues_n; i++) {
2163 if (mlx5_is_external_rxq(dev, queues[i]))
2164 claim_nonzero(mlx5_ext_rxq_deref(dev, queues[i]));
2166 claim_nonzero(mlx5_rxq_deref(dev, queues[i]));
2171 * Increase reference count for list of Rx queues.
2174 * Pointer to Ethernet device.
2176 * List of Rx queues to ref.
2178 * Number of queues in the array.
2181 * 0 on success, a negative errno value otherwise and rte_errno is set.
2184 mlx5_rxqs_ref(struct rte_eth_dev *dev, uint16_t *queues,
2185 const uint32_t queues_n)
2189 for (i = 0; i != queues_n; ++i) {
2190 if (mlx5_is_external_rxq(dev, queues[i])) {
2191 if (mlx5_ext_rxq_ref(dev, queues[i]) == NULL)
2194 if (mlx5_rxq_ref(dev, queues[i]) == NULL)
2200 mlx5_rxqs_deref(dev, queues, i);
2206 * Release a Rx queue.
2209 * Pointer to Ethernet device.
2214 * 1 while a reference on it exists, 0 when freed.
2217 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2219 struct mlx5_priv *priv = dev->data->dev_private;
2220 struct mlx5_rxq_priv *rxq;
2221 struct mlx5_rxq_ctrl *rxq_ctrl;
2224 if (priv->rxq_privs == NULL)
2226 rxq = mlx5_rxq_get(dev, idx);
2227 if (rxq == NULL || rxq->refcnt == 0)
2229 rxq_ctrl = rxq->ctrl;
2230 refcnt = mlx5_rxq_deref(dev, idx);
2233 } else if (refcnt == 1) { /* RxQ stopped. */
2234 priv->obj_ops.rxq_obj_release(rxq);
2235 if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {
2236 LIST_REMOVE(rxq_ctrl->obj, next);
2237 mlx5_free(rxq_ctrl->obj);
2238 rxq_ctrl->obj = NULL;
2240 if (!rxq_ctrl->is_hairpin) {
2241 if (!rxq_ctrl->started)
2242 rxq_free_elts(rxq_ctrl);
2243 dev->data->rx_queue_state[idx] =
2244 RTE_ETH_QUEUE_STATE_STOPPED;
2246 } else { /* Refcnt zero, closing device. */
2247 LIST_REMOVE(rxq, owner_entry);
2248 if (LIST_EMPTY(&rxq_ctrl->owners)) {
2249 if (!rxq_ctrl->is_hairpin)
2251 (&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2252 if (rxq_ctrl->rxq.shared)
2253 LIST_REMOVE(rxq_ctrl, share_entry);
2254 LIST_REMOVE(rxq_ctrl, next);
2255 mlx5_free(rxq_ctrl);
2257 dev->data->rx_queues[idx] = NULL;
2259 (*priv->rxq_privs)[idx] = NULL;
2265 * Verify the Rx Queue list is empty
2268 * Pointer to Ethernet device.
2271 * The number of object not released.
2274 mlx5_rxq_verify(struct rte_eth_dev *dev)
2276 struct mlx5_priv *priv = dev->data->dev_private;
2277 struct mlx5_rxq_ctrl *rxq_ctrl;
2280 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2281 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2282 dev->data->port_id, rxq_ctrl->rxq.idx);
2289 * Verify the external Rx Queue list is empty.
2292 * Pointer to Ethernet device.
2295 * The number of object not released.
2298 mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
2300 struct mlx5_priv *priv = dev->data->dev_private;
2301 struct mlx5_external_rxq *rxq;
2305 if (priv->ext_rxqs == NULL)
2308 for (i = MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
2309 rxq = mlx5_ext_rxq_get(dev, i);
2310 if (rxq->refcnt < 2)
2312 DRV_LOG(DEBUG, "Port %u external RxQ %u still referenced.",
2313 dev->data->port_id, i);
2320 * Check whether RxQ type is Hairpin.
2323 * Pointer to Ethernet device.
2328 * True if Rx queue type is Hairpin, otherwise False.
2331 mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx)
2333 struct mlx5_priv *priv = dev->data->dev_private;
2334 struct mlx5_rxq_ctrl *rxq_ctrl;
2336 if (mlx5_is_external_rxq(dev, idx))
2338 rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
2339 return (idx < priv->rxqs_n && rxq_ctrl != NULL && rxq_ctrl->is_hairpin);
2343 * Get a Rx hairpin queue configuration.
2346 * Pointer to Ethernet device.
2351 * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
2353 const struct rte_eth_hairpin_conf *
2354 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
2356 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2358 return mlx5_rxq_is_hairpin(dev, idx) ? &rxq->hairpin_conf : NULL;
2362 * Match queues listed in arguments to queues contained in indirection table
2366 * Pointer to indirection table to match.
2368 * Queues to match to ques in indirection table.
2370 * Number of queues in the array.
2373 * 1 if all queues in indirection table match 0 otherwise.
2376 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
2377 const uint16_t *queues, uint32_t queues_n)
2379 return (ind_tbl->queues_n == queues_n) &&
2380 (!memcmp(ind_tbl->queues, queues,
2381 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
2385 * Get an indirection table.
2388 * Pointer to Ethernet device.
2390 * Queues entering in the indirection table.
2392 * Number of queues in the array.
2395 * An indirection table if found.
2397 struct mlx5_ind_table_obj *
2398 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2401 struct mlx5_priv *priv = dev->data->dev_private;
2402 struct mlx5_ind_table_obj *ind_tbl;
2404 rte_rwlock_read_lock(&priv->ind_tbls_lock);
2405 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2406 if ((ind_tbl->queues_n == queues_n) &&
2407 (memcmp(ind_tbl->queues, queues,
2408 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2410 __atomic_fetch_add(&ind_tbl->refcnt, 1,
2415 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2420 * Release an indirection table.
2423 * Pointer to Ethernet device.
2425 * Indirection table to release.
2427 * If true, then dereference RX queues related to indirection table.
2428 * Otherwise, no additional action will be taken.
2431 * 1 while a reference on it exists, 0 when freed.
2434 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2435 struct mlx5_ind_table_obj *ind_tbl,
2438 struct mlx5_priv *priv = dev->data->dev_private;
2441 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2442 ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2444 LIST_REMOVE(ind_tbl, next);
2445 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2448 priv->obj_ops.ind_table_destroy(ind_tbl);
2450 mlx5_rxqs_deref(dev, ind_tbl->queues, ind_tbl->queues_n);
2456 * Verify the Rx Queue list is empty
2459 * Pointer to Ethernet device.
2462 * The number of object not released.
2465 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2467 struct mlx5_priv *priv = dev->data->dev_private;
2468 struct mlx5_ind_table_obj *ind_tbl;
2471 rte_rwlock_read_lock(&priv->ind_tbls_lock);
2472 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2474 "port %u indirection table obj %p still referenced",
2475 dev->data->port_id, (void *)ind_tbl);
2478 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2483 * Setup an indirection table structure fields.
2486 * Pointer to Ethernet device.
2488 * Indirection table to modify.
2490 * Whether to increment RxQ reference counters.
2493 * 0 on success, a negative errno value otherwise and rte_errno is set.
2496 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
2497 struct mlx5_ind_table_obj *ind_tbl,
2500 struct mlx5_priv *priv = dev->data->dev_private;
2501 uint32_t queues_n = ind_tbl->queues_n;
2503 const unsigned int n = rte_is_power_of_2(queues_n) ?
2504 log2above(queues_n) :
2505 log2above(priv->sh->dev_cap.ind_table_max_size);
2507 if (ref_qs && mlx5_rxqs_ref(dev, ind_tbl->queues, queues_n) < 0) {
2508 DRV_LOG(DEBUG, "Port %u invalid indirection table queues.",
2509 dev->data->port_id);
2512 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
2514 DRV_LOG(DEBUG, "Port %u cannot create a new indirection table.",
2515 dev->data->port_id);
2517 int err = rte_errno;
2519 mlx5_rxqs_deref(dev, ind_tbl->queues, queues_n);
2524 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2529 * Create an indirection table.
2532 * Pointer to Ethernet device.
2534 * Queues entering in the indirection table.
2536 * Number of queues in the array.
2538 * Indirection table for Standalone queue.
2540 * Whether to increment RxQ reference counters.
2543 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2545 struct mlx5_ind_table_obj *
2546 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2547 uint32_t queues_n, bool standalone, bool ref_qs)
2549 struct mlx5_priv *priv = dev->data->dev_private;
2550 struct mlx5_ind_table_obj *ind_tbl;
2554 * Allocate maximum queues for shared action as queue number
2555 * maybe modified later.
2557 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2558 (standalone ? priv->rxqs_n : queues_n) *
2559 sizeof(uint16_t), 0, SOCKET_ID_ANY);
2564 ind_tbl->queues_n = queues_n;
2565 ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2566 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2567 ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs);
2572 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2574 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2576 LIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next);
2577 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2583 mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
2584 struct mlx5_ind_table_obj *ind_tbl)
2588 refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
2592 * Modification of indirection tables having more than 1
2593 * reference is unsupported.
2596 "Port %u cannot modify indirection table %p (refcnt %u > 1).",
2597 dev->data->port_id, (void *)ind_tbl, refcnt);
2603 * Modify an indirection table.
2606 * Pointer to Ethernet device.
2608 * Indirection table to modify.
2610 * Queues replacement for the indirection table.
2612 * Number of queues in the array.
2614 * Indirection table for Standalone queue.
2616 * Whether to increment new RxQ set reference counters.
2617 * @param deref_old_qs
2618 * Whether to decrement old RxQ set reference counters.
2621 * 0 on success, a negative errno value otherwise and rte_errno is set.
2624 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2625 struct mlx5_ind_table_obj *ind_tbl,
2626 uint16_t *queues, const uint32_t queues_n,
2627 bool standalone, bool ref_new_qs, bool deref_old_qs)
2629 struct mlx5_priv *priv = dev->data->dev_private;
2631 const unsigned int n = rte_is_power_of_2(queues_n) ?
2632 log2above(queues_n) :
2633 log2above(priv->sh->dev_cap.ind_table_max_size);
2635 MLX5_ASSERT(standalone);
2636 RTE_SET_USED(standalone);
2637 if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
2639 if (ref_new_qs && mlx5_rxqs_ref(dev, queues, queues_n) < 0) {
2640 DRV_LOG(DEBUG, "Port %u invalid indirection table queues.",
2641 dev->data->port_id);
2644 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2645 ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
2647 DRV_LOG(DEBUG, "Port %u cannot modify indirection table.",
2648 dev->data->port_id);
2650 int err = rte_errno;
2652 mlx5_rxqs_deref(dev, queues, queues_n);
2658 mlx5_rxqs_deref(dev, ind_tbl->queues, ind_tbl->queues_n);
2659 ind_tbl->queues_n = queues_n;
2660 ind_tbl->queues = queues;
2665 * Attach an indirection table to its queues.
2668 * Pointer to Ethernet device.
2670 * Indirection table to attach.
2673 * 0 on success, a negative errno value otherwise and rte_errno is set.
2676 mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
2677 struct mlx5_ind_table_obj *ind_tbl)
2681 ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
2683 true /* standalone */,
2684 true /* ref_new_qs */,
2685 false /* deref_old_qs */);
2687 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2688 dev->data->port_id, (void *)ind_tbl);
2693 * Detach an indirection table from its queues.
2696 * Pointer to Ethernet device.
2698 * Indirection table to detach.
2701 * 0 on success, a negative errno value otherwise and rte_errno is set.
2704 mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
2705 struct mlx5_ind_table_obj *ind_tbl)
2707 struct mlx5_priv *priv = dev->data->dev_private;
2708 const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
2709 log2above(ind_tbl->queues_n) :
2710 log2above(priv->sh->dev_cap.ind_table_max_size);
2714 ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);
2717 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2718 ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);
2720 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2721 dev->data->port_id, (void *)ind_tbl);
2724 for (i = 0; i < ind_tbl->queues_n; i++)
2725 mlx5_rxq_release(dev, ind_tbl->queues[i]);
2730 mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
2733 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2734 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2735 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2737 return (hrxq->rss_key_len != rss_desc->key_len ||
2738 memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
2739 hrxq->hws_flags != rss_desc->hws_flags ||
2740 hrxq->hash_fields != rss_desc->hash_fields ||
2741 hrxq->ind_table->queues_n != rss_desc->queue_num ||
2742 memcmp(hrxq->ind_table->queues, rss_desc->queue,
2743 rss_desc->queue_num * sizeof(rss_desc->queue[0])));
2747 * Modify an Rx Hash queue configuration.
2750 * Pointer to Ethernet device.
2752 * Index to Hash Rx queue to modify.
2754 * RSS key for the Rx hash queue.
2755 * @param rss_key_len
2757 * @param hash_fields
2758 * Verbs protocol hash field to make the RSS on.
2760 * Queues entering in hash queue. In case of empty hash_fields only the
2761 * first queue index will be taken for the indirection table.
2766 * 0 on success, a negative errno value otherwise and rte_errno is set.
2769 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2770 const uint8_t *rss_key, uint32_t rss_key_len,
2771 uint64_t hash_fields,
2772 const uint16_t *queues, uint32_t queues_n)
2775 struct mlx5_ind_table_obj *ind_tbl = NULL;
2776 struct mlx5_priv *priv = dev->data->dev_private;
2777 struct mlx5_hrxq *hrxq =
2778 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2779 bool dev_started = !!dev->data->dev_started;
2787 if (hrxq->rss_key_len != rss_key_len) {
2788 /* rss_key_len is fixed size 40 byte & not supposed to change */
2792 queues_n = hash_fields ? queues_n : 1;
2793 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2794 queues, queues_n)) {
2795 ind_tbl = hrxq->ind_table;
2797 if (hrxq->standalone) {
2799 * Replacement of indirection table unsupported for
2800 * standalone hrxq objects (used by shared RSS).
2802 rte_errno = ENOTSUP;
2805 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2807 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2815 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2816 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2817 hash_fields, ind_tbl);
2822 if (ind_tbl != hrxq->ind_table) {
2823 MLX5_ASSERT(!hrxq->standalone);
2824 mlx5_ind_table_obj_release(dev, hrxq->ind_table, true);
2825 hrxq->ind_table = ind_tbl;
2827 hrxq->hash_fields = hash_fields;
2828 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2832 if (ind_tbl != hrxq->ind_table) {
2833 MLX5_ASSERT(!hrxq->standalone);
2834 mlx5_ind_table_obj_release(dev, ind_tbl, true);
2841 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2843 struct mlx5_priv *priv = dev->data->dev_private;
2845 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2846 if (hrxq->hws_flags)
2847 mlx5dr_action_destroy(hrxq->action);
2849 mlx5_glue->destroy_flow_action(hrxq->action);
2851 priv->obj_ops.hrxq_destroy(hrxq);
2852 if (!hrxq->standalone) {
2853 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2855 (!!dev->data->dev_started) : true);
2857 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2861 * Release the hash Rx queue.
2864 * Pointer to Ethernet device.
2866 * Index to Hash Rx queue to release.
2869 * mlx5 list pointer.
2871 * Hash queue entry pointer.
2874 mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2876 struct rte_eth_dev *dev = tool_ctx;
2877 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2879 __mlx5_hrxq_remove(dev, hrxq);
2882 static struct mlx5_hrxq *
2883 __mlx5_hrxq_create(struct rte_eth_dev *dev,
2884 struct mlx5_flow_rss_desc *rss_desc)
2886 struct mlx5_priv *priv = dev->data->dev_private;
2887 const uint8_t *rss_key = rss_desc->key;
2888 uint32_t rss_key_len = rss_desc->key_len;
2889 bool standalone = !!rss_desc->shared_rss;
2890 const uint16_t *queues =
2891 standalone ? rss_desc->const_q : rss_desc->queue;
2892 uint32_t queues_n = rss_desc->queue_num;
2893 struct mlx5_hrxq *hrxq = NULL;
2894 uint32_t hrxq_idx = 0;
2895 struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
2898 queues_n = rss_desc->hash_fields ? queues_n : 1;
2899 if (!ind_tbl && !rss_desc->hws_flags)
2900 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2902 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2904 rss_desc->hws_flags,
2905 !!dev->data->dev_started);
2908 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2911 hrxq->standalone = standalone;
2912 hrxq->idx = hrxq_idx;
2913 hrxq->ind_table = ind_tbl;
2914 hrxq->rss_key_len = rss_key_len;
2915 hrxq->hash_fields = rss_desc->hash_fields;
2916 hrxq->hws_flags = rss_desc->hws_flags;
2917 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2918 ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2923 if (!rss_desc->ind_tbl)
2924 mlx5_ind_table_obj_release(dev, ind_tbl, true);
2926 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2930 struct mlx5_list_entry *
2931 mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
2933 struct rte_eth_dev *dev = tool_ctx;
2934 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2935 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2936 struct mlx5_hrxq *hrxq;
2938 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2939 return hrxq ? &hrxq->entry : NULL;
2942 struct mlx5_list_entry *
2943 mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2944 void *cb_ctx __rte_unused)
2946 struct rte_eth_dev *dev = tool_ctx;
2947 struct mlx5_priv *priv = dev->data->dev_private;
2948 struct mlx5_hrxq *hrxq;
2949 uint32_t hrxq_idx = 0;
2951 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2954 memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
2955 hrxq->idx = hrxq_idx;
2956 return &hrxq->entry;
2960 mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2962 struct rte_eth_dev *dev = tool_ctx;
2963 struct mlx5_priv *priv = dev->data->dev_private;
2964 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2966 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2970 * Get an Rx Hash queue.
2973 * Pointer to Ethernet device.
2975 * RSS configuration for the Rx hash queue.
2978 * An hash Rx queue on success.
2980 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
2981 struct mlx5_flow_rss_desc *rss_desc)
2983 struct mlx5_priv *priv = dev->data->dev_private;
2984 struct mlx5_hrxq *hrxq = NULL;
2985 struct mlx5_list_entry *entry;
2986 struct mlx5_flow_cb_ctx ctx = {
2990 if (rss_desc->shared_rss) {
2991 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2993 entry = mlx5_list_register(priv->hrxqs, &ctx);
2996 hrxq = container_of(entry, typeof(*hrxq), entry);
3002 * Release the hash Rx queue.
3005 * Pointer to Ethernet device.
3007 * Hash Rx queue to release.
3010 * 1 while a reference on it exists, 0 when freed.
3012 int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
3014 struct mlx5_priv *priv = dev->data->dev_private;
3018 if (!hrxq->standalone)
3019 return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
3020 __mlx5_hrxq_remove(dev, hrxq);
3025 * Release the hash Rx queue with index.
3028 * Pointer to Ethernet device.
3030 * Index to Hash Rx queue to release.
3033 * 1 while a reference on it exists, 0 when freed.
3035 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
3037 struct mlx5_priv *priv = dev->data->dev_private;
3038 struct mlx5_hrxq *hrxq;
3040 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
3041 return mlx5_hrxq_obj_release(dev, hrxq);
3045 * Create a drop Rx Hash queue.
3048 * Pointer to Ethernet device.
3051 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
3054 mlx5_drop_action_create(struct rte_eth_dev *dev)
3056 struct mlx5_priv *priv = dev->data->dev_private;
3057 struct mlx5_hrxq *hrxq = NULL;
3060 if (priv->drop_queue.hrxq)
3061 return priv->drop_queue.hrxq;
3062 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
3065 "Port %u cannot allocate memory for drop queue.",
3066 dev->data->port_id);
3070 priv->drop_queue.hrxq = hrxq;
3071 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
3073 if (!hrxq->ind_table) {
3077 ret = priv->obj_ops.drop_action_create(dev);
3083 if (hrxq->ind_table)
3084 mlx5_free(hrxq->ind_table);
3085 priv->drop_queue.hrxq = NULL;
3092 * Release a drop hash Rx queue.
3095 * Pointer to Ethernet device.
3098 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
3100 struct mlx5_priv *priv = dev->data->dev_private;
3101 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
3103 if (!priv->drop_queue.hrxq)
3105 priv->obj_ops.drop_action_destroy(dev);
3106 mlx5_free(priv->drop_queue.rxq);
3107 mlx5_free(hrxq->ind_table);
3109 priv->drop_queue.rxq = NULL;
3110 priv->drop_queue.hrxq = NULL;
3114 * Verify the Rx Queue list is empty
3117 * Pointer to Ethernet device.
3120 * The number of object not released.
3123 mlx5_hrxq_verify(struct rte_eth_dev *dev)
3125 struct mlx5_priv *priv = dev->data->dev_private;
3127 return mlx5_list_get_entry_num(priv->hrxqs);
3131 * Set the Rx queue timestamp conversion parameters
3134 * Pointer to the Ethernet device structure.
3137 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
3139 struct mlx5_priv *priv = dev->data->dev_private;
3140 struct mlx5_dev_ctx_shared *sh = priv->sh;
3143 for (i = 0; i != priv->rxqs_n; ++i) {
3144 struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
3149 data->rt_timestamp = sh->dev_cap.rt_timestamp;
3154 * Validate given external RxQ rte_plow index, and get pointer to concurrent
3155 * external RxQ object to map/unmap.
3157 * @param[in] port_id
3158 * The port identifier of the Ethernet device.
3159 * @param[in] dpdk_idx
3160 * Queue index in rte_flow.
3163 * Pointer to concurrent external RxQ on success,
3164 * NULL otherwise and rte_errno is set.
3166 static struct mlx5_external_rxq *
3167 mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
3169 struct rte_eth_dev *dev;
3170 struct mlx5_priv *priv;
3172 if (dpdk_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
3173 DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
3174 dpdk_idx, MLX5_EXTERNAL_RX_QUEUE_ID_MIN, UINT16_MAX);
3178 if (rte_eth_dev_is_valid_port(port_id) < 0) {
3179 DRV_LOG(ERR, "There is no Ethernet device for port %u.",
3184 dev = &rte_eth_devices[port_id];
3185 priv = dev->data->dev_private;
3186 if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
3187 DRV_LOG(ERR, "Port %u "
3188 "external RxQ isn't supported on local PD and CTX.",
3190 rte_errno = ENOTSUP;
3193 if (!mlx5_devx_obj_ops_en(priv->sh)) {
3195 "Port %u external RxQ isn't supported by Verbs API.",
3197 rte_errno = ENOTSUP;
3201 * When user configures remote PD and CTX and device creates RxQ by
3202 * DevX, external RxQs array is allocated.
3204 MLX5_ASSERT(priv->ext_rxqs != NULL);
3205 return &priv->ext_rxqs[dpdk_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
3209 rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
3212 struct mlx5_external_rxq *ext_rxq;
3213 uint32_t unmapped = 0;
3215 ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
3216 if (ext_rxq == NULL)
3218 if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
3219 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
3220 if (ext_rxq->hw_id != hw_idx) {
3221 DRV_LOG(ERR, "Port %u external RxQ index %u "
3222 "is already mapped to HW index (requesting is "
3223 "%u, existing is %u).",
3224 port_id, dpdk_idx, hw_idx, ext_rxq->hw_id);
3228 DRV_LOG(WARNING, "Port %u external RxQ index %u "
3229 "is already mapped to the requested HW index (%u)",
3230 port_id, dpdk_idx, hw_idx);
3233 ext_rxq->hw_id = hw_idx;
3234 DRV_LOG(DEBUG, "Port %u external RxQ index %u "
3235 "is successfully mapped to the requested HW index (%u)",
3236 port_id, dpdk_idx, hw_idx);
3242 rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
3244 struct mlx5_external_rxq *ext_rxq;
3245 uint32_t mapped = 1;
3247 ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
3248 if (ext_rxq == NULL)
3250 if (ext_rxq->refcnt > 1) {
3251 DRV_LOG(ERR, "Port %u external RxQ index %u still referenced.",
3256 if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
3257 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
3258 DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
3264 "Port %u external RxQ index %u is successfully unmapped.",