1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
24 #include <mlx5_common_mr.h>
26 #include "mlx5_defs.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
32 #include "mlx5_devx.h"
35 /* Default RSS hash key also used for ConnectX-3. */
36 uint8_t rss_hash_default_key[] = {
37 0x2c, 0xc6, 0x81, 0xd1,
38 0x5b, 0xdb, 0xf4, 0xf7,
39 0xfc, 0xa2, 0x83, 0x19,
40 0xdb, 0x1a, 0x3e, 0x94,
41 0x6b, 0x9e, 0x38, 0xd9,
42 0x2c, 0x9c, 0x03, 0xd1,
43 0xad, 0x99, 0x44, 0xa7,
44 0xd9, 0x56, 0x3d, 0x59,
45 0x06, 0x3c, 0x25, 0xf3,
46 0xfc, 0x1f, 0xdc, 0x2a,
49 /* Length of the default RSS hash key. */
50 static_assert(MLX5_RSS_HASH_KEY_LEN ==
51 (unsigned int)sizeof(rss_hash_default_key),
52 "wrong RSS default key size.");
55 * Calculate the number of CQEs in CQ for the Rx queue.
58 * Pointer to receive queue structure.
61 * Number of CQEs in CQ.
64 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
67 unsigned int wqe_n = 1 << rxq_data->elts_n;
69 if (mlx5_rxq_mprq_enabled(rxq_data))
70 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
77 * Allocate RX queue elements for Multi-Packet RQ.
80 * Pointer to RX queue structure.
83 * 0 on success, a negative errno value otherwise and rte_errno is set.
86 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
88 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
89 unsigned int wqe_n = 1 << rxq->elts_n;
93 /* Iterate on segments. */
94 for (i = 0; i <= wqe_n; ++i) {
95 struct mlx5_mprq_buf *buf;
97 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
98 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
103 (*rxq->mprq_bufs)[i] = buf;
105 rxq->mprq_repl = buf;
108 "port %u MPRQ queue %u allocated and configured %u segments",
109 rxq->port_id, rxq->idx, wqe_n);
112 err = rte_errno; /* Save rte_errno before cleanup. */
114 for (i = 0; (i != wqe_n); ++i) {
115 if ((*rxq->mprq_bufs)[i] != NULL)
116 rte_mempool_put(rxq->mprq_mp,
117 (*rxq->mprq_bufs)[i]);
118 (*rxq->mprq_bufs)[i] = NULL;
120 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
121 rxq->port_id, rxq->idx);
122 rte_errno = err; /* Restore rte_errno. */
127 * Allocate RX queue elements for Single-Packet RQ.
130 * Pointer to RX queue structure.
133 * 0 on success, negative errno value on failure.
136 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
138 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
139 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
140 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
141 (1 << rxq_ctrl->rxq.elts_n);
145 /* Iterate on segments. */
146 for (i = 0; (i != elts_n); ++i) {
147 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
148 struct rte_mbuf *buf;
150 buf = rte_pktmbuf_alloc(seg->mp);
152 if (rxq_ctrl->share_group == 0)
153 DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
154 RXQ_PORT_ID(rxq_ctrl),
157 DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
158 rxq_ctrl->share_group,
159 rxq_ctrl->share_qid);
163 /* Headroom is reserved by rte_pktmbuf_alloc(). */
164 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
165 /* Buffer is supposed to be empty. */
166 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
167 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
168 MLX5_ASSERT(!buf->next);
169 SET_DATA_OFF(buf, seg->offset);
170 PORT(buf) = rxq_ctrl->rxq.port_id;
171 DATA_LEN(buf) = seg->length;
172 PKT_LEN(buf) = seg->length;
174 (*rxq_ctrl->rxq.elts)[i] = buf;
176 /* If Rx vector is activated. */
177 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
178 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
179 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
180 struct rte_pktmbuf_pool_private *priv =
181 (struct rte_pktmbuf_pool_private *)
182 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
185 /* Initialize default rearm_data for vPMD. */
186 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
187 rte_mbuf_refcnt_set(mbuf_init, 1);
188 mbuf_init->nb_segs = 1;
189 mbuf_init->port = rxq->port_id;
190 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
191 mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
193 * prevent compiler reordering:
194 * rearm_data covers previous fields.
196 rte_compiler_barrier();
197 rxq->mbuf_initializer =
198 *(rte_xmm_t *)&mbuf_init->rearm_data;
199 /* Padding with a fake mbuf for vectorized Rx. */
200 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
201 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
203 if (rxq_ctrl->share_group == 0)
205 "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
206 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
207 elts_n / (1 << rxq_ctrl->rxq.sges_n));
210 "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
211 rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
212 elts_n / (1 << rxq_ctrl->rxq.sges_n));
215 err = rte_errno; /* Save rte_errno before cleanup. */
217 for (i = 0; (i != elts_n); ++i) {
218 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
219 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
220 (*rxq_ctrl->rxq.elts)[i] = NULL;
222 if (rxq_ctrl->share_group == 0)
223 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
224 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
226 DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
227 rxq_ctrl->share_group, rxq_ctrl->share_qid);
228 rte_errno = err; /* Restore rte_errno. */
233 * Allocate RX queue elements.
236 * Pointer to RX queue structure.
239 * 0 on success, negative errno value on failure.
242 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
247 * For MPRQ we need to allocate both MPRQ buffers
248 * for WQEs and simple mbufs for vector processing.
250 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
251 ret = rxq_alloc_elts_mprq(rxq_ctrl);
253 ret = rxq_alloc_elts_sprq(rxq_ctrl);
258 * Free RX queue elements for Multi-Packet RQ.
261 * Pointer to RX queue structure.
264 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
266 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
269 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
270 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
271 if (rxq->mprq_bufs == NULL)
273 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
274 if ((*rxq->mprq_bufs)[i] != NULL)
275 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
276 (*rxq->mprq_bufs)[i] = NULL;
278 if (rxq->mprq_repl != NULL) {
279 mlx5_mprq_buf_free(rxq->mprq_repl);
280 rxq->mprq_repl = NULL;
285 * Free RX queue elements for Single-Packet RQ.
288 * Pointer to RX queue structure.
291 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
293 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
294 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
295 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
297 const uint16_t q_mask = q_n - 1;
298 uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
299 rxq->elts_ci : rxq->rq_ci;
300 uint16_t used = q_n - (elts_ci - rxq->rq_pi);
303 if (rxq_ctrl->share_group == 0)
304 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
305 RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
307 DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
308 rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
309 if (rxq->elts == NULL)
312 * Some mbuf in the Ring belongs to the application.
313 * They cannot be freed.
315 if (mlx5_rxq_check_vec_support(rxq) > 0) {
316 for (i = 0; i < used; ++i)
317 (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
318 rxq->rq_pi = elts_ci;
320 for (i = 0; i != q_n; ++i) {
321 if ((*rxq->elts)[i] != NULL)
322 rte_pktmbuf_free_seg((*rxq->elts)[i]);
323 (*rxq->elts)[i] = NULL;
328 * Free RX queue elements.
331 * Pointer to RX queue structure.
334 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
337 * For MPRQ we need to allocate both MPRQ buffers
338 * for WQEs and simple mbufs for vector processing.
340 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
341 rxq_free_elts_mprq(rxq_ctrl);
342 rxq_free_elts_sprq(rxq_ctrl);
346 * Returns the per-queue supported offloads.
349 * Pointer to Ethernet device.
352 * Supported Rx offloads.
355 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
357 struct mlx5_priv *priv = dev->data->dev_private;
358 struct mlx5_dev_config *config = &priv->config;
359 uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
360 RTE_ETH_RX_OFFLOAD_TIMESTAMP |
361 RTE_ETH_RX_OFFLOAD_RSS_HASH);
363 if (!config->mprq.enabled)
364 offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
365 if (config->hw_fcs_strip)
366 offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
368 offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
369 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
370 RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
371 if (config->hw_vlan_strip)
372 offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
373 if (MLX5_LRO_SUPPORTED(dev))
374 offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
380 * Returns the per-port supported offloads.
383 * Supported Rx offloads.
386 mlx5_get_rx_port_offloads(void)
388 uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
394 * Verify if the queue can be released.
397 * Pointer to Ethernet device.
402 * 1 if the queue can be released
403 * 0 if the queue can not be released, there are references to it.
404 * Negative errno and rte_errno is set if queue doesn't exist.
407 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
409 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
415 return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
418 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
420 rxq_sync_cq(struct mlx5_rxq_data *rxq)
422 const uint16_t cqe_n = 1 << rxq->cqe_n;
423 const uint16_t cqe_mask = cqe_n - 1;
424 volatile struct mlx5_cqe *cqe;
429 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
430 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
431 if (ret == MLX5_CQE_STATUS_HW_OWN)
433 if (ret == MLX5_CQE_STATUS_ERR) {
437 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
438 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
442 /* Compute the next non compressed CQE. */
443 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
446 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
447 for (i = 0; i < cqe_n; i++) {
448 cqe = &(*rxq->cqes)[i];
449 cqe->op_own = MLX5_CQE_INVALIDATE;
451 /* Resync CQE and WQE (WQ in RESET state). */
453 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
455 *rxq->rq_db = rte_cpu_to_be_32(0);
460 * Rx queue stop. Device queue goes to the RESET state,
461 * all involved mbufs are freed from WQ.
464 * Pointer to Ethernet device structure.
469 * 0 on success, a negative errno value otherwise and rte_errno is set.
472 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
474 struct mlx5_priv *priv = dev->data->dev_private;
475 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
476 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
479 MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
480 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
481 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
483 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
488 /* Remove all processes CQEs. */
489 rxq_sync_cq(&rxq_ctrl->rxq);
490 /* Free all involved mbufs. */
491 rxq_free_elts(rxq_ctrl);
492 /* Set the actual queue state. */
493 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
498 * Rx queue stop. Device queue goes to the RESET state,
499 * all involved mbufs are freed from WQ.
502 * Pointer to Ethernet device structure.
507 * 0 on success, a negative errno value otherwise and rte_errno is set.
510 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
512 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
515 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
516 DRV_LOG(ERR, "Hairpin queue can't be stopped");
520 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
523 * Vectorized Rx burst requires the CQ and RQ indices
524 * synchronized, that might be broken on RQ restart
525 * and cause Rx malfunction, so queue stopping is
526 * not supported if vectorized Rx burst is engaged.
527 * The routine pointer depends on the process
528 * type, should perform check there.
530 if (pkt_burst == mlx5_rx_burst_vec) {
531 DRV_LOG(ERR, "Rx queue stop is not supported "
532 "for vectorized Rx");
536 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
537 ret = mlx5_mp_os_req_queue_control(dev, idx,
538 MLX5_MP_REQ_QUEUE_RX_STOP);
540 ret = mlx5_rx_queue_stop_primary(dev, idx);
546 * Rx queue start. Device queue goes to the ready state,
547 * all required mbufs are allocated and WQ is replenished.
550 * Pointer to Ethernet device structure.
555 * 0 on success, a negative errno value otherwise and rte_errno is set.
558 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
560 struct mlx5_priv *priv = dev->data->dev_private;
561 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
562 struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
565 MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
566 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
567 /* Allocate needed buffers. */
568 ret = rxq_alloc_elts(rxq->ctrl);
570 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
575 *rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
577 /* Reset RQ consumer before moving queue to READY state. */
578 *rxq_data->rq_db = rte_cpu_to_be_32(0);
580 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
582 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
587 /* Reinitialize RQ - set WQEs. */
588 mlx5_rxq_initialize(rxq_data);
589 rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
590 /* Set actual queue state. */
591 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
596 * Rx queue start. Device queue goes to the ready state,
597 * all required mbufs are allocated and WQ is replenished.
600 * Pointer to Ethernet device structure.
605 * 0 on success, a negative errno value otherwise and rte_errno is set.
608 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
612 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
613 DRV_LOG(ERR, "Hairpin queue can't be started");
617 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
619 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
620 ret = mlx5_mp_os_req_queue_control(dev, idx,
621 MLX5_MP_REQ_QUEUE_RX_START);
623 ret = mlx5_rx_queue_start_primary(dev, idx);
629 * Rx queue presetup checks.
632 * Pointer to Ethernet device structure.
636 * Number of descriptors to configure in queue.
637 * @param[out] rxq_ctrl
638 * Address of pointer to shared Rx queue control.
641 * 0 on success, a negative errno value otherwise and rte_errno is set.
644 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc,
645 struct mlx5_rxq_ctrl **rxq_ctrl)
647 struct mlx5_priv *priv = dev->data->dev_private;
648 struct mlx5_rxq_priv *rxq;
651 if (!rte_is_power_of_2(*desc)) {
652 *desc = 1 << log2above(*desc);
654 "port %u increased number of descriptors in Rx queue %u"
655 " to the next power of two (%d)",
656 dev->data->port_id, idx, *desc);
658 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
659 dev->data->port_id, idx, *desc);
660 if (idx >= priv->rxqs_n) {
661 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
662 dev->data->port_id, idx, priv->rxqs_n);
663 rte_errno = EOVERFLOW;
666 if (rxq_ctrl == NULL || *rxq_ctrl == NULL)
668 if (!(*rxq_ctrl)->rxq.shared) {
669 if (!mlx5_rxq_releasable(dev, idx)) {
670 DRV_LOG(ERR, "port %u unable to release queue index %u",
671 dev->data->port_id, idx);
675 mlx5_rxq_release(dev, idx);
677 if ((*rxq_ctrl)->obj != NULL)
678 /* Some port using shared Rx queue has been started. */
680 /* Release all owner RxQ to reconfigure Shared RxQ. */
682 rxq = LIST_FIRST(&(*rxq_ctrl)->owners);
683 LIST_REMOVE(rxq, owner_entry);
684 empty = LIST_EMPTY(&(*rxq_ctrl)->owners);
685 mlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx);
693 * Get the shared Rx queue object that matches group and queue index.
696 * Pointer to Ethernet device structure.
700 * Shared RX queue index.
703 * Shared RXQ object that matching, or NULL if not found.
705 static struct mlx5_rxq_ctrl *
706 mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid)
708 struct mlx5_rxq_ctrl *rxq_ctrl;
709 struct mlx5_priv *priv = dev->data->dev_private;
711 LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
712 if (rxq_ctrl->share_group == group &&
713 rxq_ctrl->share_qid == share_qid)
720 * Check whether requested Rx queue configuration matches shared RXQ.
723 * Pointer to shared RXQ.
725 * Pointer to Ethernet device structure.
729 * Number of descriptors to configure in queue.
731 * NUMA socket on which memory must be allocated.
733 * Thresholds parameters.
735 * Memory pool for buffer allocations.
738 * 0 on success, a negative errno value otherwise and rte_errno is set.
741 mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
742 uint16_t idx, uint16_t desc, unsigned int socket,
743 const struct rte_eth_rxconf *conf,
744 struct rte_mempool *mp)
746 struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
747 struct mlx5_priv *priv = dev->data->dev_private;
751 if (rxq_ctrl->socket != socket) {
752 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
753 dev->data->port_id, idx);
756 if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
757 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
758 dev->data->port_id, idx);
761 if (priv->mtu != spriv->mtu) {
762 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
763 dev->data->port_id, idx);
766 if (priv->dev_data->dev_conf.intr_conf.rxq !=
767 spriv->dev_data->dev_conf.intr_conf.rxq) {
768 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
769 dev->data->port_id, idx);
772 if (mp != NULL && rxq_ctrl->rxq.mp != mp) {
773 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
774 dev->data->port_id, idx);
776 } else if (mp == NULL) {
777 for (i = 0; i < conf->rx_nseg; i++) {
778 if (conf->rx_seg[i].split.mp !=
779 rxq_ctrl->rxq.rxseg[i].mp ||
780 conf->rx_seg[i].split.length !=
781 rxq_ctrl->rxq.rxseg[i].length) {
782 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
783 dev->data->port_id, idx, i);
788 if (priv->config.hw_padding != spriv->config.hw_padding) {
789 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
790 dev->data->port_id, idx);
793 if (priv->config.cqe_comp != spriv->config.cqe_comp ||
794 (priv->config.cqe_comp &&
795 priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
796 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
797 dev->data->port_id, idx);
806 * Pointer to Ethernet device structure.
810 * Number of descriptors to configure in queue.
812 * NUMA socket on which memory must be allocated.
814 * Thresholds parameters.
816 * Memory pool for buffer allocations.
819 * 0 on success, a negative errno value otherwise and rte_errno is set.
822 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
823 unsigned int socket, const struct rte_eth_rxconf *conf,
824 struct rte_mempool *mp)
826 struct mlx5_priv *priv = dev->data->dev_private;
827 struct mlx5_rxq_priv *rxq;
828 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
829 struct rte_eth_rxseg_split *rx_seg =
830 (struct rte_eth_rxseg_split *)conf->rx_seg;
831 struct rte_eth_rxseg_split rx_single = {.mp = mp};
832 uint16_t n_seg = conf->rx_nseg;
834 uint64_t offloads = conf->offloads |
835 dev->data->dev_conf.rxmode.offloads;
839 * The parameters should be checked on rte_eth_dev layer.
840 * If mp is specified it means the compatible configuration
841 * without buffer split feature tuning.
847 /* The offloads should be checked on rte_eth_dev layer. */
848 MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
849 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
850 DRV_LOG(ERR, "port %u queue index %u split "
851 "offload not configured",
852 dev->data->port_id, idx);
856 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
858 if (conf->share_group > 0) {
859 if (!priv->config.hca_attr.mem_rq_rmp) {
860 DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
861 dev->data->port_id, idx);
865 if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
866 DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
867 dev->data->port_id, idx);
871 if (conf->share_qid >= priv->rxqs_n) {
872 DRV_LOG(ERR, "port %u shared Rx queue index %u > number of Rx queues %u",
873 dev->data->port_id, conf->share_qid,
878 if (priv->config.mprq.enabled) {
879 DRV_LOG(ERR, "port %u shared Rx queue index %u: not supported when MPRQ enabled",
880 dev->data->port_id, conf->share_qid);
884 /* Try to reuse shared RXQ. */
885 rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group,
887 if (rxq_ctrl != NULL &&
888 !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
894 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);
898 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
901 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
902 dev->data->port_id, idx);
908 (*priv->rxq_privs)[idx] = rxq;
909 if (rxq_ctrl != NULL) {
910 /* Join owner list. */
911 LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
912 rxq->ctrl = rxq_ctrl;
914 rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
916 if (rxq_ctrl == NULL) {
917 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
918 dev->data->port_id, idx);
920 (*priv->rxq_privs)[idx] = NULL;
925 mlx5_rxq_ref(dev, idx);
926 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
927 dev->data->port_id, idx);
928 dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
935 * Pointer to Ethernet device structure.
939 * Number of descriptors to configure in queue.
940 * @param hairpin_conf
941 * Hairpin configuration parameters.
944 * 0 on success, a negative errno value otherwise and rte_errno is set.
947 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
949 const struct rte_eth_hairpin_conf *hairpin_conf)
951 struct mlx5_priv *priv = dev->data->dev_private;
952 struct mlx5_rxq_priv *rxq;
953 struct mlx5_rxq_ctrl *rxq_ctrl;
956 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL);
959 if (hairpin_conf->peer_count != 1) {
961 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
962 " peer count is %u", dev->data->port_id,
963 idx, hairpin_conf->peer_count);
966 if (hairpin_conf->peers[0].port == dev->data->port_id) {
967 if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
969 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
970 " index %u, Tx %u is larger than %u",
971 dev->data->port_id, idx,
972 hairpin_conf->peers[0].queue, priv->txqs_n);
976 if (hairpin_conf->manual_bind == 0 ||
977 hairpin_conf->tx_explicit == 0) {
979 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
980 " index %u peer port %u with attributes %u %u",
981 dev->data->port_id, idx,
982 hairpin_conf->peers[0].port,
983 hairpin_conf->manual_bind,
984 hairpin_conf->tx_explicit);
988 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
991 DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
992 dev->data->port_id, idx);
998 (*priv->rxq_privs)[idx] = rxq;
999 rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
1001 DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
1002 dev->data->port_id, idx);
1004 (*priv->rxq_privs)[idx] = NULL;
1008 DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
1009 dev->data->port_id, idx);
1010 dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
1015 * DPDK callback to release a RX queue.
1018 * Pointer to Ethernet device structure.
1020 * Receive queue index.
1023 mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1025 struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
1029 if (!mlx5_rxq_releasable(dev, qid))
1030 rte_panic("port %u Rx queue %u is still used by a flow and"
1031 " cannot be removed\n", dev->data->port_id, qid);
1032 mlx5_rxq_release(dev, qid);
1036 * Allocate queue vector and fill epoll fd list for Rx interrupts.
1039 * Pointer to Ethernet device.
1042 * 0 on success, a negative errno value otherwise and rte_errno is set.
1045 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
1047 struct mlx5_priv *priv = dev->data->dev_private;
1049 unsigned int rxqs_n = priv->rxqs_n;
1050 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1051 unsigned int count = 0;
1052 struct rte_intr_handle *intr_handle = dev->intr_handle;
1054 if (!dev->data->dev_conf.intr_conf.rxq)
1056 mlx5_rx_intr_vec_disable(dev);
1057 if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
1059 "port %u failed to allocate memory for interrupt"
1060 " vector, Rx interrupts will not be supported",
1061 dev->data->port_id);
1066 if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
1069 for (i = 0; i != n; ++i) {
1070 /* This rxq obj must not be released in this function. */
1071 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1072 struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
1075 /* Skip queues that cannot request interrupts. */
1076 if (!rxq_obj || (!rxq_obj->ibv_channel &&
1077 !rxq_obj->devx_channel)) {
1078 /* Use invalid intr_vec[] index to disable entry. */
1079 if (rte_intr_vec_list_index_set(intr_handle, i,
1080 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
1084 mlx5_rxq_ref(dev, i);
1085 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1087 "port %u too many Rx queues for interrupt"
1088 " vector size (%d), Rx interrupts cannot be"
1090 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
1091 mlx5_rx_intr_vec_disable(dev);
1095 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
1099 "port %u failed to make Rx interrupt file"
1100 " descriptor %d non-blocking for queue index"
1102 dev->data->port_id, rxq_obj->fd, i);
1103 mlx5_rx_intr_vec_disable(dev);
1107 if (rte_intr_vec_list_index_set(intr_handle, i,
1108 RTE_INTR_VEC_RXTX_OFFSET + count))
1110 if (rte_intr_efds_index_set(intr_handle, count,
1116 mlx5_rx_intr_vec_disable(dev);
1117 else if (rte_intr_nb_efd_set(intr_handle, count))
1123 * Clean up Rx interrupts handler.
1126 * Pointer to Ethernet device.
1129 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
1131 struct mlx5_priv *priv = dev->data->dev_private;
1132 struct rte_intr_handle *intr_handle = dev->intr_handle;
1134 unsigned int rxqs_n = priv->rxqs_n;
1135 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1137 if (!dev->data->dev_conf.intr_conf.rxq)
1139 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
1141 for (i = 0; i != n; ++i) {
1142 if (rte_intr_vec_list_index_get(intr_handle, i) ==
1143 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
1146 * Need to access directly the queue to release the reference
1147 * kept in mlx5_rx_intr_vec_enable().
1149 mlx5_rxq_deref(dev, i);
1152 rte_intr_free_epoll_fd(intr_handle);
1154 rte_intr_vec_list_free(intr_handle);
1156 rte_intr_nb_efd_set(intr_handle, 0);
1160 * MLX5 CQ notification .
1163 * Pointer to receive queue structure.
1165 * Sequence number per receive queue .
1168 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1171 uint32_t doorbell_hi;
1173 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
1175 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1176 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1177 doorbell = (uint64_t)doorbell_hi << 32;
1178 doorbell |= rxq->cqn;
1179 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1180 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
1181 cq_db_reg, rxq->uar_lock_cq);
1185 * DPDK callback for Rx queue interrupt enable.
1188 * Pointer to Ethernet device structure.
1189 * @param rx_queue_id
1193 * 0 on success, a negative errno value otherwise and rte_errno is set.
1196 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1198 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1201 if (rxq->ctrl->irq) {
1202 if (!rxq->ctrl->obj)
1204 mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
1213 * DPDK callback for Rx queue interrupt disable.
1216 * Pointer to Ethernet device structure.
1217 * @param rx_queue_id
1221 * 0 on success, a negative errno value otherwise and rte_errno is set.
1224 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1226 struct mlx5_priv *priv = dev->data->dev_private;
1227 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1234 if (!rxq->ctrl->obj)
1236 if (rxq->ctrl->irq) {
1237 ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
1240 rxq->ctrl->rxq.cq_arm_sn++;
1245 * The ret variable may be EAGAIN which means the get_event function was
1246 * called before receiving one.
1252 if (rte_errno != EAGAIN)
1253 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1254 dev->data->port_id, rx_queue_id);
1259 * Verify the Rx queue objects list is empty
1262 * Pointer to Ethernet device.
1265 * The number of objects not released.
1268 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1270 struct mlx5_priv *priv = dev->data->dev_private;
1272 struct mlx5_rxq_obj *rxq_obj;
1274 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1275 if (rxq_obj->rxq_ctrl->rxq.shared &&
1276 !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
1278 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1279 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1286 * Callback function to initialize mbufs for Multi-Packet RQ.
1289 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1290 void *_m, unsigned int i __rte_unused)
1292 struct mlx5_mprq_buf *buf = _m;
1293 struct rte_mbuf_ext_shared_info *shinfo;
1294 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1297 memset(_m, 0, sizeof(*buf));
1299 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1300 for (j = 0; j != strd_n; ++j) {
1301 shinfo = &buf->shinfos[j];
1302 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1303 shinfo->fcb_opaque = buf;
1308 * Free mempool of Multi-Packet RQ.
1311 * Pointer to Ethernet device.
1314 * 0 on success, negative errno value on failure.
1317 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1319 struct mlx5_priv *priv = dev->data->dev_private;
1320 struct rte_mempool *mp = priv->mprq_mp;
1325 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1326 dev->data->port_id, mp->name);
1328 * If a buffer in the pool has been externally attached to a mbuf and it
1329 * is still in use by application, destroying the Rx queue can spoil
1330 * the packet. It is unlikely to happen but if application dynamically
1331 * creates and destroys with holding Rx packets, this can happen.
1333 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1334 * RQ isn't provided by application but managed by PMD.
1336 if (!rte_mempool_full(mp)) {
1338 "port %u mempool for Multi-Packet RQ is still in use",
1339 dev->data->port_id);
1343 rte_mempool_free(mp);
1344 /* Unset mempool for each Rx queue. */
1345 for (i = 0; i != priv->rxqs_n; ++i) {
1346 struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
1350 rxq->mprq_mp = NULL;
1352 priv->mprq_mp = NULL;
1357 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1358 * mempool. If already allocated, reuse it if there're enough elements.
1359 * Otherwise, resize it.
1362 * Pointer to Ethernet device.
1365 * 0 on success, negative errno value on failure.
1368 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1370 struct mlx5_priv *priv = dev->data->dev_private;
1371 struct rte_mempool *mp = priv->mprq_mp;
1372 char name[RTE_MEMPOOL_NAMESIZE];
1373 unsigned int desc = 0;
1374 unsigned int buf_len;
1375 unsigned int obj_num;
1376 unsigned int obj_size;
1377 unsigned int strd_num_n = 0;
1378 unsigned int strd_sz_n = 0;
1380 unsigned int n_ibv = 0;
1383 if (!mlx5_mprq_enabled(dev))
1385 /* Count the total number of descriptors configured. */
1386 for (i = 0; i != priv->rxqs_n; ++i) {
1387 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
1388 struct mlx5_rxq_data *rxq;
1390 if (rxq_ctrl == NULL ||
1391 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1393 rxq = &rxq_ctrl->rxq;
1395 desc += 1 << rxq->elts_n;
1396 /* Get the max number of strides. */
1397 if (strd_num_n < rxq->strd_num_n)
1398 strd_num_n = rxq->strd_num_n;
1399 /* Get the max size of a stride. */
1400 if (strd_sz_n < rxq->strd_sz_n)
1401 strd_sz_n = rxq->strd_sz_n;
1403 MLX5_ASSERT(strd_num_n && strd_sz_n);
1404 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1405 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1406 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1408 * Received packets can be either memcpy'd or externally referenced. In
1409 * case that the packet is attached to an mbuf as an external buffer, as
1410 * it isn't possible to predict how the buffers will be queued by
1411 * application, there's no option to exactly pre-allocate needed buffers
1412 * in advance but to speculatively prepares enough buffers.
1414 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1415 * received packets to buffers provided by application (rxq->mp) until
1416 * this Mempool gets available again.
1419 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1421 * rte_mempool_create_empty() has sanity check to refuse large cache
1422 * size compared to the number of elements.
1423 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1424 * constant number 2 instead.
1426 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1427 /* Check a mempool is already allocated and if it can be resued. */
1428 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1429 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1430 dev->data->port_id, mp->name);
1433 } else if (mp != NULL) {
1434 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1435 dev->data->port_id, mp->name);
1437 * If failed to free, which means it may be still in use, no way
1438 * but to keep using the existing one. On buffer underrun,
1439 * packets will be memcpy'd instead of external buffer
1442 if (mlx5_mprq_free_mp(dev)) {
1443 if (mp->elt_size >= obj_size)
1449 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1450 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1451 0, NULL, NULL, mlx5_mprq_buf_init,
1452 (void *)((uintptr_t)1 << strd_num_n),
1453 dev->device->numa_node, 0);
1456 "port %u failed to allocate a mempool for"
1457 " Multi-Packet RQ, count=%u, size=%u",
1458 dev->data->port_id, obj_num, obj_size);
1462 ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
1463 priv->sh->cdev->pd, mp, &priv->mp_id);
1464 if (ret < 0 && rte_errno != EEXIST) {
1466 DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
1467 dev->data->port_id);
1468 rte_mempool_free(mp);
1474 /* Set mempool for each Rx queue. */
1475 for (i = 0; i != priv->rxqs_n; ++i) {
1476 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
1478 if (rxq_ctrl == NULL ||
1479 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1481 rxq_ctrl->rxq.mprq_mp = mp;
1483 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1484 dev->data->port_id);
1488 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1489 sizeof(struct rte_vlan_hdr) * 2 + \
1490 sizeof(struct rte_ipv6_hdr)))
1491 #define MAX_TCP_OPTION_SIZE 40u
1492 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1493 sizeof(struct rte_tcp_hdr) + \
1494 MAX_TCP_OPTION_SIZE))
1497 * Adjust the maximum LRO massage size.
1500 * Pointer to Ethernet device.
1503 * @param max_lro_size
1504 * The maximum size for LRO packet.
1507 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1508 uint32_t max_lro_size)
1510 struct mlx5_priv *priv = dev->data->dev_private;
1512 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1513 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1514 MLX5_MAX_TCP_HDR_OFFSET)
1515 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1516 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1517 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1518 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1519 if (priv->max_lro_msg_size)
1520 priv->max_lro_msg_size =
1521 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1523 priv->max_lro_msg_size = max_lro_size;
1525 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1526 dev->data->port_id, idx,
1527 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1531 * Create a DPDK Rx queue.
1534 * Pointer to Ethernet device.
1536 * RX queue private data.
1538 * Number of descriptors to configure in queue.
1540 * NUMA socket on which memory must be allocated.
1543 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1545 struct mlx5_rxq_ctrl *
1546 mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
1548 unsigned int socket, const struct rte_eth_rxconf *conf,
1549 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1551 uint16_t idx = rxq->idx;
1552 struct mlx5_priv *priv = dev->data->dev_private;
1553 struct mlx5_rxq_ctrl *tmpl;
1554 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1555 struct mlx5_dev_config *config = &priv->config;
1556 uint64_t offloads = conf->offloads |
1557 dev->data->dev_conf.rxmode.offloads;
1558 unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
1559 unsigned int max_rx_pktlen = lro_on_queue ?
1560 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1561 dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
1563 unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
1564 RTE_PKTMBUF_HEADROOM;
1565 unsigned int max_lro_size = 0;
1566 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1567 const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
1568 !rx_seg[0].offset && !rx_seg[0].length;
1569 unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1570 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1571 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1572 (1U << config->mprq.max_stride_size_n) ?
1573 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1574 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1575 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1576 (config->mprq.stride_size_n ?
1577 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1579 * Always allocate extra slots, even if eventually
1580 * the vector Rx will not be used.
1582 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1583 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1584 unsigned int tail_len;
1586 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1587 sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
1589 (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
1595 LIST_INIT(&tmpl->owners);
1596 if (conf->share_group > 0) {
1597 tmpl->rxq.shared = 1;
1598 tmpl->share_group = conf->share_group;
1599 tmpl->share_qid = conf->share_qid;
1600 LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
1603 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
1604 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1606 * Build the array of actual buffer offsets and lengths.
1607 * Pad with the buffers from the last memory pool if
1608 * needed to handle max size packets, replace zero length
1609 * with the buffer length from the pool.
1611 tail_len = max_rx_pktlen;
1613 struct mlx5_eth_rxseg *hw_seg =
1614 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1615 uint32_t buf_len, offset, seg_len;
1618 * For the buffers beyond descriptions offset is zero,
1619 * the first buffer contains head room.
1621 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1622 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1623 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1625 * For the buffers beyond descriptions the length is
1626 * pool buffer length, zero lengths are replaced with
1627 * pool buffer length either.
1629 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1633 /* Check is done in long int, now overflows. */
1634 if (buf_len < seg_len + offset) {
1635 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1636 "%u/%u can't be satisfied",
1637 dev->data->port_id, idx,
1638 qs_seg->length, qs_seg->offset);
1642 if (seg_len > tail_len)
1643 seg_len = buf_len - offset;
1644 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1646 "port %u too many SGEs (%u) needed to handle"
1647 " requested maximum packet size %u, the maximum"
1648 " supported are %u", dev->data->port_id,
1649 tmpl->rxq.rxseg_n, max_rx_pktlen,
1651 rte_errno = ENOTSUP;
1654 /* Build the actual scattering element in the queue object. */
1655 hw_seg->mp = qs_seg->mp;
1656 MLX5_ASSERT(offset <= UINT16_MAX);
1657 MLX5_ASSERT(seg_len <= UINT16_MAX);
1658 hw_seg->offset = (uint16_t)offset;
1659 hw_seg->length = (uint16_t)seg_len;
1661 * Advance the segment descriptor, the padding is the based
1662 * on the attributes of the last descriptor.
1664 if (tmpl->rxq.rxseg_n < n_seg)
1666 tail_len -= RTE_MIN(tail_len, seg_len);
1667 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1668 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1669 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1670 if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
1671 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1672 " configured and no enough mbuf space(%u) to contain "
1673 "the maximum RX packet length(%u) with head-room(%u)",
1674 dev->data->port_id, idx, mb_len, max_rx_pktlen,
1675 RTE_PKTMBUF_HEADROOM);
1679 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1680 if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
1681 &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1682 /* rte_errno is already set. */
1685 tmpl->socket = socket;
1686 if (dev->data->dev_conf.intr_conf.rxq)
1689 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1690 * following conditions are met:
1691 * - MPRQ is enabled.
1692 * - The number of descs is more than the number of strides.
1693 * - max_rx_pktlen plus overhead is less than the max size
1694 * of a stride or mprq_stride_size is specified by a user.
1695 * Need to make sure that there are enough strides to encap
1696 * the maximum packet size in case mprq_stride_size is set.
1697 * Otherwise, enable Rx scatter if necessary.
1699 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1700 (non_scatter_min_mbuf_size <=
1701 (1U << config->mprq.max_stride_size_n) ||
1702 (config->mprq.stride_size_n &&
1703 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1704 /* TODO: Rx scatter isn't supported yet. */
1705 tmpl->rxq.sges_n = 0;
1706 /* Trim the number of descs needed. */
1707 desc >>= mprq_stride_nums;
1708 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1709 config->mprq.stride_num_n : mprq_stride_nums;
1710 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1711 config->mprq.stride_size_n : mprq_stride_size;
1712 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1713 tmpl->rxq.strd_scatter_en =
1714 !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
1715 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1716 config->mprq.max_memcpy_len);
1717 max_lro_size = RTE_MIN(max_rx_pktlen,
1718 (1u << tmpl->rxq.strd_num_n) *
1719 (1u << tmpl->rxq.strd_sz_n));
1721 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1722 " strd_num_n = %u, strd_sz_n = %u",
1723 dev->data->port_id, idx,
1724 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1725 } else if (tmpl->rxq.rxseg_n == 1) {
1726 MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
1727 tmpl->rxq.sges_n = 0;
1728 max_lro_size = max_rx_pktlen;
1729 } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1730 unsigned int sges_n;
1732 if (lro_on_queue && first_mb_free_size <
1733 MLX5_MAX_LRO_HEADER_FIX) {
1734 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1735 " to include the max header size(%u) for LRO",
1736 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1737 rte_errno = ENOTSUP;
1741 * Determine the number of SGEs needed for a full packet
1742 * and round it to the next power of two.
1744 sges_n = log2above(tmpl->rxq.rxseg_n);
1745 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1747 "port %u too many SGEs (%u) needed to handle"
1748 " requested maximum packet size %u, the maximum"
1749 " supported are %u", dev->data->port_id,
1750 1 << sges_n, max_rx_pktlen,
1751 1u << MLX5_MAX_LOG_RQ_SEGS);
1752 rte_errno = ENOTSUP;
1755 tmpl->rxq.sges_n = sges_n;
1756 max_lro_size = max_rx_pktlen;
1758 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1760 "port %u MPRQ is requested but cannot be enabled\n"
1761 " (requested: pkt_sz = %u, desc_num = %u,"
1762 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1763 " supported: min_rxqs_num = %u,"
1764 " min_stride_sz = %u, max_stride_sz = %u).",
1765 dev->data->port_id, non_scatter_min_mbuf_size,
1767 config->mprq.stride_size_n ?
1768 (1U << config->mprq.stride_size_n) :
1769 (1U << mprq_stride_size),
1770 config->mprq.stride_num_n ?
1771 (1U << config->mprq.stride_num_n) :
1772 (1U << mprq_stride_nums),
1773 config->mprq.min_rxqs_num,
1774 (1U << config->mprq.min_stride_size_n),
1775 (1U << config->mprq.max_stride_size_n));
1776 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1777 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1778 if (desc % (1 << tmpl->rxq.sges_n)) {
1780 "port %u number of Rx queue descriptors (%u) is not a"
1781 " multiple of SGEs per packet (%u)",
1784 1 << tmpl->rxq.sges_n);
1788 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1789 /* Toggle RX checksum offload if hardware supports it. */
1790 tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
1791 /* Configure Rx timestamp. */
1792 tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
1793 tmpl->rxq.timestamp_rx_flag = 0;
1794 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1795 &tmpl->rxq.timestamp_offset,
1796 &tmpl->rxq.timestamp_rx_flag) != 0) {
1797 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1800 /* Configure VLAN stripping. */
1801 tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1802 /* By default, FCS (CRC) is stripped by hardware. */
1803 tmpl->rxq.crc_present = 0;
1804 tmpl->rxq.lro = lro_on_queue;
1805 if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
1806 if (config->hw_fcs_strip) {
1808 * RQs used for LRO-enabled TIRs should not be
1809 * configured to scatter the FCS.
1813 "port %u CRC stripping has been "
1814 "disabled but will still be performed "
1815 "by hardware, because LRO is enabled",
1816 dev->data->port_id);
1818 tmpl->rxq.crc_present = 1;
1821 "port %u CRC stripping has been disabled but will"
1822 " still be performed by hardware, make sure MLNX_OFED"
1823 " and firmware are up to date",
1824 dev->data->port_id);
1828 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1829 " incoming frames to hide it",
1831 tmpl->rxq.crc_present ? "disabled" : "enabled",
1832 tmpl->rxq.crc_present << 2);
1834 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1835 (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
1836 tmpl->rxq.port_id = dev->data->port_id;
1837 tmpl->sh = priv->sh;
1838 tmpl->rxq.mp = rx_seg[0].mp;
1839 tmpl->rxq.elts_n = log2above(desc);
1840 tmpl->rxq.rq_repl_thresh =
1841 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1843 (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1844 tmpl->rxq.mprq_bufs =
1845 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1847 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1849 tmpl->rxq.idx = idx;
1850 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1853 mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
1859 * Create a DPDK Rx hairpin queue.
1862 * Pointer to Ethernet device.
1866 * Number of descriptors to configure in queue.
1867 * @param hairpin_conf
1868 * The hairpin binding configuration.
1871 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1873 struct mlx5_rxq_ctrl *
1874 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
1876 const struct rte_eth_hairpin_conf *hairpin_conf)
1878 uint16_t idx = rxq->idx;
1879 struct mlx5_priv *priv = dev->data->dev_private;
1880 struct mlx5_rxq_ctrl *tmpl;
1882 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1888 LIST_INIT(&tmpl->owners);
1890 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
1891 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1892 tmpl->socket = SOCKET_ID_ANY;
1893 tmpl->rxq.rss_hash = 0;
1894 tmpl->rxq.port_id = dev->data->port_id;
1895 tmpl->sh = priv->sh;
1896 tmpl->rxq.mp = NULL;
1897 tmpl->rxq.elts_n = log2above(desc);
1898 tmpl->rxq.elts = NULL;
1899 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1900 tmpl->rxq.idx = idx;
1901 rxq->hairpin_conf = *hairpin_conf;
1902 mlx5_rxq_ref(dev, idx);
1903 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1908 * Increase Rx queue reference count.
1911 * Pointer to Ethernet device.
1916 * A pointer to the queue if it exists, NULL otherwise.
1918 struct mlx5_rxq_priv *
1919 mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
1921 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1924 __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
1929 * Dereference a Rx queue.
1932 * Pointer to Ethernet device.
1937 * Updated reference count.
1940 mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
1942 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1946 return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
1953 * Pointer to Ethernet device.
1958 * A pointer to the queue if it exists, NULL otherwise.
1960 struct mlx5_rxq_priv *
1961 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1963 struct mlx5_priv *priv = dev->data->dev_private;
1965 MLX5_ASSERT(priv->rxq_privs != NULL);
1966 return (*priv->rxq_privs)[idx];
1970 * Get Rx queue shareable control.
1973 * Pointer to Ethernet device.
1978 * A pointer to the queue control if it exists, NULL otherwise.
1980 struct mlx5_rxq_ctrl *
1981 mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
1983 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
1985 return rxq == NULL ? NULL : rxq->ctrl;
1989 * Get Rx queue shareable data.
1992 * Pointer to Ethernet device.
1997 * A pointer to the queue data if it exists, NULL otherwise.
1999 struct mlx5_rxq_data *
2000 mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
2002 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2004 return rxq == NULL ? NULL : &rxq->ctrl->rxq;
2008 * Release a Rx queue.
2011 * Pointer to Ethernet device.
2016 * 1 while a reference on it exists, 0 when freed.
2019 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2021 struct mlx5_priv *priv = dev->data->dev_private;
2022 struct mlx5_rxq_priv *rxq;
2023 struct mlx5_rxq_ctrl *rxq_ctrl;
2026 if (priv->rxq_privs == NULL)
2028 rxq = mlx5_rxq_get(dev, idx);
2029 if (rxq == NULL || rxq->refcnt == 0)
2031 rxq_ctrl = rxq->ctrl;
2032 refcnt = mlx5_rxq_deref(dev, idx);
2035 } else if (refcnt == 1) { /* RxQ stopped. */
2036 priv->obj_ops.rxq_obj_release(rxq);
2037 if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {
2038 LIST_REMOVE(rxq_ctrl->obj, next);
2039 mlx5_free(rxq_ctrl->obj);
2040 rxq_ctrl->obj = NULL;
2042 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
2043 if (!rxq_ctrl->started)
2044 rxq_free_elts(rxq_ctrl);
2045 dev->data->rx_queue_state[idx] =
2046 RTE_ETH_QUEUE_STATE_STOPPED;
2048 } else { /* Refcnt zero, closing device. */
2049 LIST_REMOVE(rxq, owner_entry);
2050 if (LIST_EMPTY(&rxq_ctrl->owners)) {
2051 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2053 (&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2054 if (rxq_ctrl->rxq.shared)
2055 LIST_REMOVE(rxq_ctrl, share_entry);
2056 LIST_REMOVE(rxq_ctrl, next);
2057 mlx5_free(rxq_ctrl);
2059 dev->data->rx_queues[idx] = NULL;
2061 (*priv->rxq_privs)[idx] = NULL;
2067 * Verify the Rx Queue list is empty
2070 * Pointer to Ethernet device.
2073 * The number of object not released.
2076 mlx5_rxq_verify(struct rte_eth_dev *dev)
2078 struct mlx5_priv *priv = dev->data->dev_private;
2079 struct mlx5_rxq_ctrl *rxq_ctrl;
2082 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2083 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2084 dev->data->port_id, rxq_ctrl->rxq.idx);
2091 * Get a Rx queue type.
2094 * Pointer to Ethernet device.
2099 * The Rx queue type.
2102 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2104 struct mlx5_priv *priv = dev->data->dev_private;
2105 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
2107 if (idx < priv->rxqs_n && rxq_ctrl != NULL)
2108 return rxq_ctrl->type;
2109 return MLX5_RXQ_TYPE_UNDEFINED;
2113 * Get a Rx hairpin queue configuration.
2116 * Pointer to Ethernet device.
2121 * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
2123 const struct rte_eth_hairpin_conf *
2124 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
2126 struct mlx5_priv *priv = dev->data->dev_private;
2127 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2129 if (idx < priv->rxqs_n && rxq != NULL) {
2130 if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
2131 return &rxq->hairpin_conf;
2137 * Match queues listed in arguments to queues contained in indirection table
2141 * Pointer to indirection table to match.
2143 * Queues to match to ques in indirection table.
2145 * Number of queues in the array.
2148 * 1 if all queues in indirection table match 0 othrwise.
2151 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
2152 const uint16_t *queues, uint32_t queues_n)
2154 return (ind_tbl->queues_n == queues_n) &&
2155 (!memcmp(ind_tbl->queues, queues,
2156 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
2160 * Get an indirection table.
2163 * Pointer to Ethernet device.
2165 * Queues entering in the indirection table.
2167 * Number of queues in the array.
2170 * An indirection table if found.
2172 struct mlx5_ind_table_obj *
2173 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2176 struct mlx5_priv *priv = dev->data->dev_private;
2177 struct mlx5_ind_table_obj *ind_tbl;
2179 rte_rwlock_read_lock(&priv->ind_tbls_lock);
2180 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2181 if ((ind_tbl->queues_n == queues_n) &&
2182 (memcmp(ind_tbl->queues, queues,
2183 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2185 __atomic_fetch_add(&ind_tbl->refcnt, 1,
2190 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2195 * Release an indirection table.
2198 * Pointer to Ethernet device.
2200 * Indirection table to release.
2202 * Indirection table for Standalone queue.
2205 * 1 while a reference on it exists, 0 when freed.
2208 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2209 struct mlx5_ind_table_obj *ind_tbl,
2212 struct mlx5_priv *priv = dev->data->dev_private;
2213 unsigned int i, ret;
2215 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2216 ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2217 if (!ret && !standalone)
2218 LIST_REMOVE(ind_tbl, next);
2219 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2222 priv->obj_ops.ind_table_destroy(ind_tbl);
2223 for (i = 0; i != ind_tbl->queues_n; ++i)
2224 claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
2230 * Verify the Rx Queue list is empty
2233 * Pointer to Ethernet device.
2236 * The number of object not released.
2239 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2241 struct mlx5_priv *priv = dev->data->dev_private;
2242 struct mlx5_ind_table_obj *ind_tbl;
2245 rte_rwlock_read_lock(&priv->ind_tbls_lock);
2246 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2248 "port %u indirection table obj %p still referenced",
2249 dev->data->port_id, (void *)ind_tbl);
2252 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2257 * Setup an indirection table structure fields.
2260 * Pointer to Ethernet device.
2262 * Indirection table to modify.
2265 * 0 on success, a negative errno value otherwise and rte_errno is set.
2268 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
2269 struct mlx5_ind_table_obj *ind_tbl)
2271 struct mlx5_priv *priv = dev->data->dev_private;
2272 uint32_t queues_n = ind_tbl->queues_n;
2273 uint16_t *queues = ind_tbl->queues;
2276 const unsigned int n = rte_is_power_of_2(queues_n) ?
2277 log2above(queues_n) :
2278 log2above(priv->config.ind_table_max_size);
2280 for (i = 0; i != queues_n; ++i) {
2281 if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
2286 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
2289 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2293 for (j = 0; j < i; j++)
2294 mlx5_rxq_deref(dev, ind_tbl->queues[j]);
2296 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2297 dev->data->port_id);
2302 * Create an indirection table.
2305 * Pointer to Ethernet device.
2307 * Queues entering in the indirection table.
2309 * Number of queues in the array.
2311 * Indirection table for Standalone queue.
2314 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2316 static struct mlx5_ind_table_obj *
2317 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2318 uint32_t queues_n, bool standalone)
2320 struct mlx5_priv *priv = dev->data->dev_private;
2321 struct mlx5_ind_table_obj *ind_tbl;
2324 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2325 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2330 ind_tbl->queues_n = queues_n;
2331 ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2332 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2333 ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
2339 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2340 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2341 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2347 mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
2348 struct mlx5_ind_table_obj *ind_tbl)
2352 refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
2356 * Modification of indirection tables having more than 1
2357 * reference is unsupported.
2360 "Port %u cannot modify indirection table %p (refcnt %u > 1).",
2361 dev->data->port_id, (void *)ind_tbl, refcnt);
2367 * Modify an indirection table.
2370 * Pointer to Ethernet device.
2372 * Indirection table to modify.
2374 * Queues replacement for the indirection table.
2376 * Number of queues in the array.
2378 * Indirection table for Standalone queue.
2381 * 0 on success, a negative errno value otherwise and rte_errno is set.
2384 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2385 struct mlx5_ind_table_obj *ind_tbl,
2386 uint16_t *queues, const uint32_t queues_n,
2389 struct mlx5_priv *priv = dev->data->dev_private;
2392 const unsigned int n = rte_is_power_of_2(queues_n) ?
2393 log2above(queues_n) :
2394 log2above(priv->config.ind_table_max_size);
2396 MLX5_ASSERT(standalone);
2397 RTE_SET_USED(standalone);
2398 if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
2400 for (i = 0; i != queues_n; ++i) {
2401 if (!mlx5_rxq_get(dev, queues[i])) {
2406 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2407 ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
2410 ind_tbl->queues_n = queues_n;
2411 ind_tbl->queues = queues;
2416 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2417 dev->data->port_id);
2422 * Attach an indirection table to its queues.
2425 * Pointer to Ethernet device.
2427 * Indirection table to attach.
2430 * 0 on success, a negative errno value otherwise and rte_errno is set.
2433 mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
2434 struct mlx5_ind_table_obj *ind_tbl)
2439 ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
2440 ind_tbl->queues_n, true);
2442 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2443 dev->data->port_id, (void *)ind_tbl);
2446 for (i = 0; i < ind_tbl->queues_n; i++)
2447 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2452 * Detach an indirection table from its queues.
2455 * Pointer to Ethernet device.
2457 * Indirection table to detach.
2460 * 0 on success, a negative errno value otherwise and rte_errno is set.
2463 mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
2464 struct mlx5_ind_table_obj *ind_tbl)
2466 struct mlx5_priv *priv = dev->data->dev_private;
2467 const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
2468 log2above(ind_tbl->queues_n) :
2469 log2above(priv->config.ind_table_max_size);
2473 ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);
2476 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2477 ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);
2479 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2480 dev->data->port_id, (void *)ind_tbl);
2483 for (i = 0; i < ind_tbl->queues_n; i++)
2484 mlx5_rxq_release(dev, ind_tbl->queues[i]);
2489 mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
2492 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2493 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2494 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2496 return (hrxq->rss_key_len != rss_desc->key_len ||
2497 memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
2498 hrxq->hash_fields != rss_desc->hash_fields ||
2499 hrxq->ind_table->queues_n != rss_desc->queue_num ||
2500 memcmp(hrxq->ind_table->queues, rss_desc->queue,
2501 rss_desc->queue_num * sizeof(rss_desc->queue[0])));
2505 * Modify an Rx Hash queue configuration.
2508 * Pointer to Ethernet device.
2510 * Index to Hash Rx queue to modify.
2512 * RSS key for the Rx hash queue.
2513 * @param rss_key_len
2515 * @param hash_fields
2516 * Verbs protocol hash field to make the RSS on.
2518 * Queues entering in hash queue. In case of empty hash_fields only the
2519 * first queue index will be taken for the indirection table.
2524 * 0 on success, a negative errno value otherwise and rte_errno is set.
2527 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2528 const uint8_t *rss_key, uint32_t rss_key_len,
2529 uint64_t hash_fields,
2530 const uint16_t *queues, uint32_t queues_n)
2533 struct mlx5_ind_table_obj *ind_tbl = NULL;
2534 struct mlx5_priv *priv = dev->data->dev_private;
2535 struct mlx5_hrxq *hrxq =
2536 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2544 if (hrxq->rss_key_len != rss_key_len) {
2545 /* rss_key_len is fixed size 40 byte & not supposed to change */
2549 queues_n = hash_fields ? queues_n : 1;
2550 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2551 queues, queues_n)) {
2552 ind_tbl = hrxq->ind_table;
2554 if (hrxq->standalone) {
2556 * Replacement of indirection table unsupported for
2557 * stanalone hrxq objects (used by shared RSS).
2559 rte_errno = ENOTSUP;
2562 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2564 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2571 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2572 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2573 hash_fields, ind_tbl);
2578 if (ind_tbl != hrxq->ind_table) {
2579 MLX5_ASSERT(!hrxq->standalone);
2580 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2582 hrxq->ind_table = ind_tbl;
2584 hrxq->hash_fields = hash_fields;
2585 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2589 if (ind_tbl != hrxq->ind_table) {
2590 MLX5_ASSERT(!hrxq->standalone);
2591 mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
2598 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2600 struct mlx5_priv *priv = dev->data->dev_private;
2602 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2603 mlx5_glue->destroy_flow_action(hrxq->action);
2605 priv->obj_ops.hrxq_destroy(hrxq);
2606 if (!hrxq->standalone) {
2607 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2610 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2614 * Release the hash Rx queue.
2617 * Pointer to Ethernet device.
2619 * Index to Hash Rx queue to release.
2622 * mlx5 list pointer.
2624 * Hash queue entry pointer.
2627 mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2629 struct rte_eth_dev *dev = tool_ctx;
2630 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2632 __mlx5_hrxq_remove(dev, hrxq);
2635 static struct mlx5_hrxq *
2636 __mlx5_hrxq_create(struct rte_eth_dev *dev,
2637 struct mlx5_flow_rss_desc *rss_desc)
2639 struct mlx5_priv *priv = dev->data->dev_private;
2640 const uint8_t *rss_key = rss_desc->key;
2641 uint32_t rss_key_len = rss_desc->key_len;
2642 bool standalone = !!rss_desc->shared_rss;
2643 const uint16_t *queues =
2644 standalone ? rss_desc->const_q : rss_desc->queue;
2645 uint32_t queues_n = rss_desc->queue_num;
2646 struct mlx5_hrxq *hrxq = NULL;
2647 uint32_t hrxq_idx = 0;
2648 struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
2651 queues_n = rss_desc->hash_fields ? queues_n : 1;
2653 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2655 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2659 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2662 hrxq->standalone = standalone;
2663 hrxq->idx = hrxq_idx;
2664 hrxq->ind_table = ind_tbl;
2665 hrxq->rss_key_len = rss_key_len;
2666 hrxq->hash_fields = rss_desc->hash_fields;
2667 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2668 ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2673 if (!rss_desc->ind_tbl)
2674 mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
2676 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2680 struct mlx5_list_entry *
2681 mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
2683 struct rte_eth_dev *dev = tool_ctx;
2684 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2685 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2686 struct mlx5_hrxq *hrxq;
2688 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2689 return hrxq ? &hrxq->entry : NULL;
2692 struct mlx5_list_entry *
2693 mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2694 void *cb_ctx __rte_unused)
2696 struct rte_eth_dev *dev = tool_ctx;
2697 struct mlx5_priv *priv = dev->data->dev_private;
2698 struct mlx5_hrxq *hrxq;
2699 uint32_t hrxq_idx = 0;
2701 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2704 memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
2705 hrxq->idx = hrxq_idx;
2706 return &hrxq->entry;
2710 mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2712 struct rte_eth_dev *dev = tool_ctx;
2713 struct mlx5_priv *priv = dev->data->dev_private;
2714 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2716 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2720 * Get an Rx Hash queue.
2723 * Pointer to Ethernet device.
2725 * RSS configuration for the Rx hash queue.
2728 * An hash Rx queue index on success.
2730 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
2731 struct mlx5_flow_rss_desc *rss_desc)
2733 struct mlx5_priv *priv = dev->data->dev_private;
2734 struct mlx5_hrxq *hrxq;
2735 struct mlx5_list_entry *entry;
2736 struct mlx5_flow_cb_ctx ctx = {
2740 if (rss_desc->shared_rss) {
2741 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2743 entry = mlx5_list_register(priv->hrxqs, &ctx);
2746 hrxq = container_of(entry, typeof(*hrxq), entry);
2754 * Release the hash Rx queue.
2757 * Pointer to Ethernet device.
2759 * Index to Hash Rx queue to release.
2762 * 1 while a reference on it exists, 0 when freed.
2764 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2766 struct mlx5_priv *priv = dev->data->dev_private;
2767 struct mlx5_hrxq *hrxq;
2769 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2772 if (!hrxq->standalone)
2773 return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
2774 __mlx5_hrxq_remove(dev, hrxq);
2779 * Create a drop Rx Hash queue.
2782 * Pointer to Ethernet device.
2785 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2788 mlx5_drop_action_create(struct rte_eth_dev *dev)
2790 struct mlx5_priv *priv = dev->data->dev_private;
2791 struct mlx5_hrxq *hrxq = NULL;
2794 if (priv->drop_queue.hrxq)
2795 return priv->drop_queue.hrxq;
2796 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2799 "Port %u cannot allocate memory for drop queue.",
2800 dev->data->port_id);
2804 priv->drop_queue.hrxq = hrxq;
2805 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2807 if (!hrxq->ind_table) {
2811 ret = priv->obj_ops.drop_action_create(dev);
2817 if (hrxq->ind_table)
2818 mlx5_free(hrxq->ind_table);
2819 priv->drop_queue.hrxq = NULL;
2826 * Release a drop hash Rx queue.
2829 * Pointer to Ethernet device.
2832 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2834 struct mlx5_priv *priv = dev->data->dev_private;
2835 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2837 if (!priv->drop_queue.hrxq)
2839 priv->obj_ops.drop_action_destroy(dev);
2840 mlx5_free(priv->drop_queue.rxq);
2841 mlx5_free(hrxq->ind_table);
2843 priv->drop_queue.rxq = NULL;
2844 priv->drop_queue.hrxq = NULL;
2848 * Verify the Rx Queue list is empty
2851 * Pointer to Ethernet device.
2854 * The number of object not released.
2857 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2859 struct mlx5_priv *priv = dev->data->dev_private;
2861 return mlx5_list_get_entry_num(priv->hrxqs);
2865 * Set the Rx queue timestamp conversion parameters
2868 * Pointer to the Ethernet device structure.
2871 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2873 struct mlx5_priv *priv = dev->data->dev_private;
2874 struct mlx5_dev_ctx_shared *sh = priv->sh;
2877 for (i = 0; i != priv->rxqs_n; ++i) {
2878 struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
2883 data->rt_timestamp = priv->config.rt_timestamp;