1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
25 #include "mlx5_defs.h"
27 #include "mlx5_rxtx.h"
28 #include "mlx5_utils.h"
29 #include "mlx5_autoconf.h"
32 /* Default RSS hash key also used for ConnectX-3. */
33 uint8_t rss_hash_default_key[] = {
34 0x2c, 0xc6, 0x81, 0xd1,
35 0x5b, 0xdb, 0xf4, 0xf7,
36 0xfc, 0xa2, 0x83, 0x19,
37 0xdb, 0x1a, 0x3e, 0x94,
38 0x6b, 0x9e, 0x38, 0xd9,
39 0x2c, 0x9c, 0x03, 0xd1,
40 0xad, 0x99, 0x44, 0xa7,
41 0xd9, 0x56, 0x3d, 0x59,
42 0x06, 0x3c, 0x25, 0xf3,
43 0xfc, 0x1f, 0xdc, 0x2a,
46 /* Length of the default RSS hash key. */
47 static_assert(MLX5_RSS_HASH_KEY_LEN ==
48 (unsigned int)sizeof(rss_hash_default_key),
49 "wrong RSS default key size.");
52 * Check whether Multi-Packet RQ can be enabled for the device.
55 * Pointer to Ethernet device.
58 * 1 if supported, negative errno value if not.
61 mlx5_check_mprq_support(struct rte_eth_dev *dev)
63 struct mlx5_priv *priv = dev->data->dev_private;
65 if (priv->config.mprq.enabled &&
66 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
72 * Check whether Multi-Packet RQ is enabled for the Rx queue.
75 * Pointer to receive queue structure.
78 * 0 if disabled, otherwise enabled.
81 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
83 return rxq->strd_num_n > 0;
87 * Check whether Multi-Packet RQ is enabled for the device.
90 * Pointer to Ethernet device.
93 * 0 if disabled, otherwise enabled.
96 mlx5_mprq_enabled(struct rte_eth_dev *dev)
98 struct mlx5_priv *priv = dev->data->dev_private;
103 if (mlx5_check_mprq_support(dev) < 0)
105 /* All the configured queues should be enabled. */
106 for (i = 0; i < priv->rxqs_n; ++i) {
107 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
108 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
109 (rxq, struct mlx5_rxq_ctrl, rxq);
111 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
114 if (mlx5_rxq_mprq_enabled(rxq))
117 /* Multi-Packet RQ can't be partially configured. */
118 MLX5_ASSERT(n == 0 || n == n_ibv);
123 * Calculate the number of CQEs in CQ for the Rx queue.
126 * Pointer to receive queue structure.
129 * Number of CQEs in CQ.
132 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
135 unsigned int wqe_n = 1 << rxq_data->elts_n;
137 if (mlx5_rxq_mprq_enabled(rxq_data))
138 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
145 * Allocate RX queue elements for Multi-Packet RQ.
148 * Pointer to RX queue structure.
151 * 0 on success, a negative errno value otherwise and rte_errno is set.
154 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
156 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
157 unsigned int wqe_n = 1 << rxq->elts_n;
161 /* Iterate on segments. */
162 for (i = 0; i <= wqe_n; ++i) {
163 struct mlx5_mprq_buf *buf;
165 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
166 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
171 (*rxq->mprq_bufs)[i] = buf;
173 rxq->mprq_repl = buf;
176 "port %u MPRQ queue %u allocated and configured %u segments",
177 rxq->port_id, rxq->idx, wqe_n);
180 err = rte_errno; /* Save rte_errno before cleanup. */
182 for (i = 0; (i != wqe_n); ++i) {
183 if ((*rxq->mprq_bufs)[i] != NULL)
184 rte_mempool_put(rxq->mprq_mp,
185 (*rxq->mprq_bufs)[i]);
186 (*rxq->mprq_bufs)[i] = NULL;
188 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
189 rxq->port_id, rxq->idx);
190 rte_errno = err; /* Restore rte_errno. */
195 * Allocate RX queue elements for Single-Packet RQ.
198 * Pointer to RX queue structure.
201 * 0 on success, errno value on failure.
204 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
206 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
207 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
208 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
209 (1 << rxq_ctrl->rxq.elts_n);
213 /* Iterate on segments. */
214 for (i = 0; (i != elts_n); ++i) {
215 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
216 struct rte_mbuf *buf;
218 buf = rte_pktmbuf_alloc(seg->mp);
220 DRV_LOG(ERR, "port %u empty mbuf pool",
221 PORT_ID(rxq_ctrl->priv));
225 /* Headroom is reserved by rte_pktmbuf_alloc(). */
226 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
227 /* Buffer is supposed to be empty. */
228 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
229 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
230 MLX5_ASSERT(!buf->next);
231 SET_DATA_OFF(buf, seg->offset);
232 PORT(buf) = rxq_ctrl->rxq.port_id;
233 DATA_LEN(buf) = seg->length;
234 PKT_LEN(buf) = seg->length;
236 (*rxq_ctrl->rxq.elts)[i] = buf;
238 /* If Rx vector is activated. */
239 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
240 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
241 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
242 struct rte_pktmbuf_pool_private *priv =
243 (struct rte_pktmbuf_pool_private *)
244 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
247 /* Initialize default rearm_data for vPMD. */
248 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
249 rte_mbuf_refcnt_set(mbuf_init, 1);
250 mbuf_init->nb_segs = 1;
251 mbuf_init->port = rxq->port_id;
252 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
253 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
255 * prevent compiler reordering:
256 * rearm_data covers previous fields.
258 rte_compiler_barrier();
259 rxq->mbuf_initializer =
260 *(rte_xmm_t *)&mbuf_init->rearm_data;
261 /* Padding with a fake mbuf for vectorized Rx. */
262 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
263 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
266 "port %u SPRQ queue %u allocated and configured %u segments"
268 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
269 elts_n / (1 << rxq_ctrl->rxq.sges_n));
272 err = rte_errno; /* Save rte_errno before cleanup. */
274 for (i = 0; (i != elts_n); ++i) {
275 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
276 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
277 (*rxq_ctrl->rxq.elts)[i] = NULL;
279 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
280 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
281 rte_errno = err; /* Restore rte_errno. */
286 * Allocate RX queue elements.
289 * Pointer to RX queue structure.
292 * 0 on success, errno value on failure.
295 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
300 * For MPRQ we need to allocate both MPRQ buffers
301 * for WQEs and simple mbufs for vector processing.
303 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
304 ret = rxq_alloc_elts_mprq(rxq_ctrl);
305 return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
309 * Free RX queue elements for Multi-Packet RQ.
312 * Pointer to RX queue structure.
315 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
317 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
320 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
321 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
322 if (rxq->mprq_bufs == NULL)
324 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
325 if ((*rxq->mprq_bufs)[i] != NULL)
326 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
327 (*rxq->mprq_bufs)[i] = NULL;
329 if (rxq->mprq_repl != NULL) {
330 mlx5_mprq_buf_free(rxq->mprq_repl);
331 rxq->mprq_repl = NULL;
336 * Free RX queue elements for Single-Packet RQ.
339 * Pointer to RX queue structure.
342 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
344 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
345 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
346 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
348 const uint16_t q_mask = q_n - 1;
349 uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
350 rxq->elts_ci : rxq->rq_ci;
351 uint16_t used = q_n - (elts_ci - rxq->rq_pi);
354 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
355 PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
356 if (rxq->elts == NULL)
359 * Some mbuf in the Ring belongs to the application.
360 * They cannot be freed.
362 if (mlx5_rxq_check_vec_support(rxq) > 0) {
363 for (i = 0; i < used; ++i)
364 (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
365 rxq->rq_pi = elts_ci;
367 for (i = 0; i != q_n; ++i) {
368 if ((*rxq->elts)[i] != NULL)
369 rte_pktmbuf_free_seg((*rxq->elts)[i]);
370 (*rxq->elts)[i] = NULL;
375 * Free RX queue elements.
378 * Pointer to RX queue structure.
381 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
384 * For MPRQ we need to allocate both MPRQ buffers
385 * for WQEs and simple mbufs for vector processing.
387 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
388 rxq_free_elts_mprq(rxq_ctrl);
389 rxq_free_elts_sprq(rxq_ctrl);
393 * Returns the per-queue supported offloads.
396 * Pointer to Ethernet device.
399 * Supported Rx offloads.
402 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
404 struct mlx5_priv *priv = dev->data->dev_private;
405 struct mlx5_dev_config *config = &priv->config;
406 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
407 DEV_RX_OFFLOAD_TIMESTAMP |
408 DEV_RX_OFFLOAD_JUMBO_FRAME |
409 DEV_RX_OFFLOAD_RSS_HASH);
411 if (!config->mprq.enabled)
412 offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
413 if (config->hw_fcs_strip)
414 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
416 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
417 DEV_RX_OFFLOAD_UDP_CKSUM |
418 DEV_RX_OFFLOAD_TCP_CKSUM);
419 if (config->hw_vlan_strip)
420 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
421 if (MLX5_LRO_SUPPORTED(dev))
422 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
428 * Returns the per-port supported offloads.
431 * Supported Rx offloads.
434 mlx5_get_rx_port_offloads(void)
436 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
442 * Verify if the queue can be released.
445 * Pointer to Ethernet device.
450 * 1 if the queue can be released
451 * 0 if the queue can not be released, there are references to it.
452 * Negative errno and rte_errno is set if queue doesn't exist.
455 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
457 struct mlx5_priv *priv = dev->data->dev_private;
458 struct mlx5_rxq_ctrl *rxq_ctrl;
460 if (!(*priv->rxqs)[idx]) {
464 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
465 return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
468 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
470 rxq_sync_cq(struct mlx5_rxq_data *rxq)
472 const uint16_t cqe_n = 1 << rxq->cqe_n;
473 const uint16_t cqe_mask = cqe_n - 1;
474 volatile struct mlx5_cqe *cqe;
479 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
480 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
481 if (ret == MLX5_CQE_STATUS_HW_OWN)
483 if (ret == MLX5_CQE_STATUS_ERR) {
487 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
488 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
492 /* Compute the next non compressed CQE. */
493 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
496 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
497 for (i = 0; i < cqe_n; i++) {
498 cqe = &(*rxq->cqes)[i];
499 cqe->op_own = MLX5_CQE_INVALIDATE;
501 /* Resync CQE and WQE (WQ in RESET state). */
503 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
505 *rxq->rq_db = rte_cpu_to_be_32(0);
510 * Rx queue stop. Device queue goes to the RESET state,
511 * all involved mbufs are freed from WQ.
514 * Pointer to Ethernet device structure.
519 * 0 on success, a negative errno value otherwise and rte_errno is set.
522 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
524 struct mlx5_priv *priv = dev->data->dev_private;
525 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
526 struct mlx5_rxq_ctrl *rxq_ctrl =
527 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
530 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
531 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
533 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
538 /* Remove all processes CQEs. */
540 /* Free all involved mbufs. */
541 rxq_free_elts(rxq_ctrl);
542 /* Set the actual queue state. */
543 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
548 * Rx queue stop. Device queue goes to the RESET state,
549 * all involved mbufs are freed from WQ.
552 * Pointer to Ethernet device structure.
557 * 0 on success, a negative errno value otherwise and rte_errno is set.
560 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
562 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
565 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
566 DRV_LOG(ERR, "Hairpin queue can't be stopped");
570 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
573 * Vectorized Rx burst requires the CQ and RQ indices
574 * synchronized, that might be broken on RQ restart
575 * and cause Rx malfunction, so queue stopping is
576 * not supported if vectorized Rx burst is engaged.
577 * The routine pointer depends on the process
578 * type, should perform check there.
580 if (pkt_burst == mlx5_rx_burst_vec) {
581 DRV_LOG(ERR, "Rx queue stop is not supported "
582 "for vectorized Rx");
586 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
587 ret = mlx5_mp_os_req_queue_control(dev, idx,
588 MLX5_MP_REQ_QUEUE_RX_STOP);
590 ret = mlx5_rx_queue_stop_primary(dev, idx);
596 * Rx queue start. Device queue goes to the ready state,
597 * all required mbufs are allocated and WQ is replenished.
600 * Pointer to Ethernet device structure.
605 * 0 on success, a negative errno value otherwise and rte_errno is set.
608 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
610 struct mlx5_priv *priv = dev->data->dev_private;
611 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
612 struct mlx5_rxq_ctrl *rxq_ctrl =
613 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
616 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
617 /* Allocate needed buffers. */
618 ret = rxq_alloc_elts(rxq_ctrl);
620 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
625 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
627 /* Reset RQ consumer before moving queue to READY state. */
628 *rxq->rq_db = rte_cpu_to_be_32(0);
630 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
632 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
637 /* Reinitialize RQ - set WQEs. */
638 mlx5_rxq_initialize(rxq);
639 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
640 /* Set actual queue state. */
641 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
646 * Rx queue start. Device queue goes to the ready state,
647 * all required mbufs are allocated and WQ is replenished.
650 * Pointer to Ethernet device structure.
655 * 0 on success, a negative errno value otherwise and rte_errno is set.
658 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
662 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
663 DRV_LOG(ERR, "Hairpin queue can't be started");
667 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
669 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
670 ret = mlx5_mp_os_req_queue_control(dev, idx,
671 MLX5_MP_REQ_QUEUE_RX_START);
673 ret = mlx5_rx_queue_start_primary(dev, idx);
679 * Rx queue presetup checks.
682 * Pointer to Ethernet device structure.
686 * Number of descriptors to configure in queue.
689 * 0 on success, a negative errno value otherwise and rte_errno is set.
692 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
694 struct mlx5_priv *priv = dev->data->dev_private;
696 if (!rte_is_power_of_2(*desc)) {
697 *desc = 1 << log2above(*desc);
699 "port %u increased number of descriptors in Rx queue %u"
700 " to the next power of two (%d)",
701 dev->data->port_id, idx, *desc);
703 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
704 dev->data->port_id, idx, *desc);
705 if (idx >= priv->rxqs_n) {
706 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
707 dev->data->port_id, idx, priv->rxqs_n);
708 rte_errno = EOVERFLOW;
711 if (!mlx5_rxq_releasable(dev, idx)) {
712 DRV_LOG(ERR, "port %u unable to release queue index %u",
713 dev->data->port_id, idx);
717 mlx5_rxq_release(dev, idx);
724 * Pointer to Ethernet device structure.
728 * Number of descriptors to configure in queue.
730 * NUMA socket on which memory must be allocated.
732 * Thresholds parameters.
734 * Memory pool for buffer allocations.
737 * 0 on success, a negative errno value otherwise and rte_errno is set.
740 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
741 unsigned int socket, const struct rte_eth_rxconf *conf,
742 struct rte_mempool *mp)
744 struct mlx5_priv *priv = dev->data->dev_private;
745 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
746 struct mlx5_rxq_ctrl *rxq_ctrl =
747 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
748 struct rte_eth_rxseg_split *rx_seg =
749 (struct rte_eth_rxseg_split *)conf->rx_seg;
750 struct rte_eth_rxseg_split rx_single = {.mp = mp};
751 uint16_t n_seg = conf->rx_nseg;
756 * The parameters should be checked on rte_eth_dev layer.
757 * If mp is specified it means the compatible configuration
758 * without buffer split feature tuning.
764 uint64_t offloads = conf->offloads |
765 dev->data->dev_conf.rxmode.offloads;
767 /* The offloads should be checked on rte_eth_dev layer. */
768 MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
769 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
770 DRV_LOG(ERR, "port %u queue index %u split "
771 "offload not configured",
772 dev->data->port_id, idx);
776 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
778 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
781 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
783 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
784 dev->data->port_id, idx);
788 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
789 dev->data->port_id, idx);
790 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
797 * Pointer to Ethernet device structure.
801 * Number of descriptors to configure in queue.
802 * @param hairpin_conf
803 * Hairpin configuration parameters.
806 * 0 on success, a negative errno value otherwise and rte_errno is set.
809 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
811 const struct rte_eth_hairpin_conf *hairpin_conf)
813 struct mlx5_priv *priv = dev->data->dev_private;
814 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
815 struct mlx5_rxq_ctrl *rxq_ctrl =
816 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
819 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
822 if (hairpin_conf->peer_count != 1) {
824 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
825 " peer count is %u", dev->data->port_id,
826 idx, hairpin_conf->peer_count);
829 if (hairpin_conf->peers[0].port == dev->data->port_id) {
830 if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
832 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
833 " index %u, Tx %u is larger than %u",
834 dev->data->port_id, idx,
835 hairpin_conf->peers[0].queue, priv->txqs_n);
839 if (hairpin_conf->manual_bind == 0 ||
840 hairpin_conf->tx_explicit == 0) {
842 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
843 " index %u peer port %u with attributes %u %u",
844 dev->data->port_id, idx,
845 hairpin_conf->peers[0].port,
846 hairpin_conf->manual_bind,
847 hairpin_conf->tx_explicit);
851 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
853 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
854 dev->data->port_id, idx);
858 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
859 dev->data->port_id, idx);
860 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
865 * DPDK callback to release a RX queue.
868 * Generic RX queue pointer.
871 mlx5_rx_queue_release(void *dpdk_rxq)
873 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
874 struct mlx5_rxq_ctrl *rxq_ctrl;
875 struct mlx5_priv *priv;
879 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
880 priv = rxq_ctrl->priv;
881 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
882 rte_panic("port %u Rx queue %u is still used by a flow and"
883 " cannot be removed\n",
884 PORT_ID(priv), rxq->idx);
885 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
889 * Allocate queue vector and fill epoll fd list for Rx interrupts.
892 * Pointer to Ethernet device.
895 * 0 on success, a negative errno value otherwise and rte_errno is set.
898 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
900 struct mlx5_priv *priv = dev->data->dev_private;
902 unsigned int rxqs_n = priv->rxqs_n;
903 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
904 unsigned int count = 0;
905 struct rte_intr_handle *intr_handle = dev->intr_handle;
907 /* Representor shares dev->intr_handle with PF. */
908 if (priv->representor)
910 if (!dev->data->dev_conf.intr_conf.rxq)
912 mlx5_rx_intr_vec_disable(dev);
913 intr_handle->intr_vec = mlx5_malloc(0,
914 n * sizeof(intr_handle->intr_vec[0]),
916 if (intr_handle->intr_vec == NULL) {
918 "port %u failed to allocate memory for interrupt"
919 " vector, Rx interrupts will not be supported",
924 intr_handle->type = RTE_INTR_HANDLE_EXT;
925 for (i = 0; i != n; ++i) {
926 /* This rxq obj must not be released in this function. */
927 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
928 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
931 /* Skip queues that cannot request interrupts. */
932 if (!rxq_obj || (!rxq_obj->ibv_channel &&
933 !rxq_obj->devx_channel)) {
934 /* Use invalid intr_vec[] index to disable entry. */
935 intr_handle->intr_vec[i] =
936 RTE_INTR_VEC_RXTX_OFFSET +
937 RTE_MAX_RXTX_INTR_VEC_ID;
938 /* Decrease the rxq_ctrl's refcnt */
940 mlx5_rxq_release(dev, i);
943 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
945 "port %u too many Rx queues for interrupt"
946 " vector size (%d), Rx interrupts cannot be"
948 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
949 mlx5_rx_intr_vec_disable(dev);
953 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
957 "port %u failed to make Rx interrupt file"
958 " descriptor %d non-blocking for queue index"
960 dev->data->port_id, rxq_obj->fd, i);
961 mlx5_rx_intr_vec_disable(dev);
964 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
965 intr_handle->efds[count] = rxq_obj->fd;
969 mlx5_rx_intr_vec_disable(dev);
971 intr_handle->nb_efd = count;
976 * Clean up Rx interrupts handler.
979 * Pointer to Ethernet device.
982 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
984 struct mlx5_priv *priv = dev->data->dev_private;
985 struct rte_intr_handle *intr_handle = dev->intr_handle;
987 unsigned int rxqs_n = priv->rxqs_n;
988 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
990 /* Representor shares dev->intr_handle with PF. */
991 if (priv->representor)
993 if (!dev->data->dev_conf.intr_conf.rxq)
995 if (!intr_handle->intr_vec)
997 for (i = 0; i != n; ++i) {
998 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
999 RTE_MAX_RXTX_INTR_VEC_ID)
1002 * Need to access directly the queue to release the reference
1003 * kept in mlx5_rx_intr_vec_enable().
1005 mlx5_rxq_release(dev, i);
1008 rte_intr_free_epoll_fd(intr_handle);
1009 if (intr_handle->intr_vec)
1010 mlx5_free(intr_handle->intr_vec);
1011 intr_handle->nb_efd = 0;
1012 intr_handle->intr_vec = NULL;
1016 * MLX5 CQ notification .
1019 * Pointer to receive queue structure.
1021 * Sequence number per receive queue .
1024 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1027 uint32_t doorbell_hi;
1029 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
1031 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1032 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1033 doorbell = (uint64_t)doorbell_hi << 32;
1034 doorbell |= rxq->cqn;
1035 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1036 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
1037 cq_db_reg, rxq->uar_lock_cq);
1041 * DPDK callback for Rx queue interrupt enable.
1044 * Pointer to Ethernet device structure.
1045 * @param rx_queue_id
1049 * 0 on success, a negative errno value otherwise and rte_errno is set.
1052 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1054 struct mlx5_rxq_ctrl *rxq_ctrl;
1056 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1059 if (rxq_ctrl->irq) {
1060 if (!rxq_ctrl->obj) {
1061 mlx5_rxq_release(dev, rx_queue_id);
1064 mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
1066 mlx5_rxq_release(dev, rx_queue_id);
1074 * DPDK callback for Rx queue interrupt disable.
1077 * Pointer to Ethernet device structure.
1078 * @param rx_queue_id
1082 * 0 on success, a negative errno value otherwise and rte_errno is set.
1085 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1087 struct mlx5_priv *priv = dev->data->dev_private;
1088 struct mlx5_rxq_ctrl *rxq_ctrl;
1091 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1098 if (rxq_ctrl->irq) {
1099 ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
1102 rxq_ctrl->rxq.cq_arm_sn++;
1104 mlx5_rxq_release(dev, rx_queue_id);
1108 * The ret variable may be EAGAIN which means the get_event function was
1109 * called before receiving one.
1115 ret = rte_errno; /* Save rte_errno before cleanup. */
1116 mlx5_rxq_release(dev, rx_queue_id);
1118 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1119 dev->data->port_id, rx_queue_id);
1120 rte_errno = ret; /* Restore rte_errno. */
1125 * Verify the Rx queue objects list is empty
1128 * Pointer to Ethernet device.
1131 * The number of objects not released.
1134 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1136 struct mlx5_priv *priv = dev->data->dev_private;
1138 struct mlx5_rxq_obj *rxq_obj;
1140 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1141 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1142 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1149 * Callback function to initialize mbufs for Multi-Packet RQ.
1152 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1153 void *_m, unsigned int i __rte_unused)
1155 struct mlx5_mprq_buf *buf = _m;
1156 struct rte_mbuf_ext_shared_info *shinfo;
1157 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1160 memset(_m, 0, sizeof(*buf));
1162 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1163 for (j = 0; j != strd_n; ++j) {
1164 shinfo = &buf->shinfos[j];
1165 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1166 shinfo->fcb_opaque = buf;
1171 * Free mempool of Multi-Packet RQ.
1174 * Pointer to Ethernet device.
1177 * 0 on success, negative errno value on failure.
1180 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1182 struct mlx5_priv *priv = dev->data->dev_private;
1183 struct rte_mempool *mp = priv->mprq_mp;
1188 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1189 dev->data->port_id, mp->name);
1191 * If a buffer in the pool has been externally attached to a mbuf and it
1192 * is still in use by application, destroying the Rx queue can spoil
1193 * the packet. It is unlikely to happen but if application dynamically
1194 * creates and destroys with holding Rx packets, this can happen.
1196 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1197 * RQ isn't provided by application but managed by PMD.
1199 if (!rte_mempool_full(mp)) {
1201 "port %u mempool for Multi-Packet RQ is still in use",
1202 dev->data->port_id);
1206 rte_mempool_free(mp);
1207 /* Unset mempool for each Rx queue. */
1208 for (i = 0; i != priv->rxqs_n; ++i) {
1209 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1213 rxq->mprq_mp = NULL;
1215 priv->mprq_mp = NULL;
1220 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1221 * mempool. If already allocated, reuse it if there're enough elements.
1222 * Otherwise, resize it.
1225 * Pointer to Ethernet device.
1228 * 0 on success, negative errno value on failure.
1231 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1233 struct mlx5_priv *priv = dev->data->dev_private;
1234 struct rte_mempool *mp = priv->mprq_mp;
1235 char name[RTE_MEMPOOL_NAMESIZE];
1236 unsigned int desc = 0;
1237 unsigned int buf_len;
1238 unsigned int obj_num;
1239 unsigned int obj_size;
1240 unsigned int strd_num_n = 0;
1241 unsigned int strd_sz_n = 0;
1243 unsigned int n_ibv = 0;
1245 if (!mlx5_mprq_enabled(dev))
1247 /* Count the total number of descriptors configured. */
1248 for (i = 0; i != priv->rxqs_n; ++i) {
1249 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1250 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1251 (rxq, struct mlx5_rxq_ctrl, rxq);
1253 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1256 desc += 1 << rxq->elts_n;
1257 /* Get the max number of strides. */
1258 if (strd_num_n < rxq->strd_num_n)
1259 strd_num_n = rxq->strd_num_n;
1260 /* Get the max size of a stride. */
1261 if (strd_sz_n < rxq->strd_sz_n)
1262 strd_sz_n = rxq->strd_sz_n;
1264 MLX5_ASSERT(strd_num_n && strd_sz_n);
1265 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1266 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1267 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1269 * Received packets can be either memcpy'd or externally referenced. In
1270 * case that the packet is attached to an mbuf as an external buffer, as
1271 * it isn't possible to predict how the buffers will be queued by
1272 * application, there's no option to exactly pre-allocate needed buffers
1273 * in advance but to speculatively prepares enough buffers.
1275 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1276 * received packets to buffers provided by application (rxq->mp) until
1277 * this Mempool gets available again.
1280 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1282 * rte_mempool_create_empty() has sanity check to refuse large cache
1283 * size compared to the number of elements.
1284 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1285 * constant number 2 instead.
1287 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1288 /* Check a mempool is already allocated and if it can be resued. */
1289 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1290 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1291 dev->data->port_id, mp->name);
1294 } else if (mp != NULL) {
1295 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1296 dev->data->port_id, mp->name);
1298 * If failed to free, which means it may be still in use, no way
1299 * but to keep using the existing one. On buffer underrun,
1300 * packets will be memcpy'd instead of external buffer
1303 if (mlx5_mprq_free_mp(dev)) {
1304 if (mp->elt_size >= obj_size)
1310 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1311 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1312 0, NULL, NULL, mlx5_mprq_buf_init,
1313 (void *)(uintptr_t)(1 << strd_num_n),
1314 dev->device->numa_node, 0);
1317 "port %u failed to allocate a mempool for"
1318 " Multi-Packet RQ, count=%u, size=%u",
1319 dev->data->port_id, obj_num, obj_size);
1325 /* Set mempool for each Rx queue. */
1326 for (i = 0; i != priv->rxqs_n; ++i) {
1327 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1328 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1329 (rxq, struct mlx5_rxq_ctrl, rxq);
1331 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1335 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1336 dev->data->port_id);
1340 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1341 sizeof(struct rte_vlan_hdr) * 2 + \
1342 sizeof(struct rte_ipv6_hdr)))
1343 #define MAX_TCP_OPTION_SIZE 40u
1344 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1345 sizeof(struct rte_tcp_hdr) + \
1346 MAX_TCP_OPTION_SIZE))
1349 * Adjust the maximum LRO massage size.
1352 * Pointer to Ethernet device.
1355 * @param max_lro_size
1356 * The maximum size for LRO packet.
1359 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1360 uint32_t max_lro_size)
1362 struct mlx5_priv *priv = dev->data->dev_private;
1364 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1365 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1366 MLX5_MAX_TCP_HDR_OFFSET)
1367 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1368 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1369 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1370 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1371 if (priv->max_lro_msg_size)
1372 priv->max_lro_msg_size =
1373 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1375 priv->max_lro_msg_size = max_lro_size;
1377 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1378 dev->data->port_id, idx,
1379 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1383 * Create a DPDK Rx queue.
1386 * Pointer to Ethernet device.
1390 * Number of descriptors to configure in queue.
1392 * NUMA socket on which memory must be allocated.
1395 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1397 struct mlx5_rxq_ctrl *
1398 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1399 unsigned int socket, const struct rte_eth_rxconf *conf,
1400 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1402 struct mlx5_priv *priv = dev->data->dev_private;
1403 struct mlx5_rxq_ctrl *tmpl;
1404 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1405 struct mlx5_dev_config *config = &priv->config;
1406 uint64_t offloads = conf->offloads |
1407 dev->data->dev_conf.rxmode.offloads;
1408 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1409 unsigned int max_rx_pkt_len = lro_on_queue ?
1410 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1411 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1412 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1413 RTE_PKTMBUF_HEADROOM;
1414 unsigned int max_lro_size = 0;
1415 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1416 const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
1417 !rx_seg[0].offset && !rx_seg[0].length;
1418 unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1419 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1420 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1421 (1U << config->mprq.max_stride_size_n) ?
1422 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1423 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1424 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1425 (config->mprq.stride_size_n ?
1426 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1428 * Always allocate extra slots, even if eventually
1429 * the vector Rx will not be used.
1431 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1432 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1433 unsigned int tail_len;
1435 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1436 sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
1438 (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
1444 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1446 * Build the array of actual buffer offsets and lengths.
1447 * Pad with the buffers from the last memory pool if
1448 * needed to handle max size packets, replace zero length
1449 * with the buffer length from the pool.
1451 tail_len = max_rx_pkt_len;
1453 struct mlx5_eth_rxseg *hw_seg =
1454 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1455 uint32_t buf_len, offset, seg_len;
1458 * For the buffers beyond descriptions offset is zero,
1459 * the first buffer contains head room.
1461 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1462 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1463 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1465 * For the buffers beyond descriptions the length is
1466 * pool buffer length, zero lengths are replaced with
1467 * pool buffer length either.
1469 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1473 /* Check is done in long int, now overflows. */
1474 if (buf_len < seg_len + offset) {
1475 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1476 "%u/%u can't be satisfied",
1477 dev->data->port_id, idx,
1478 qs_seg->length, qs_seg->offset);
1482 if (seg_len > tail_len)
1483 seg_len = buf_len - offset;
1484 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1486 "port %u too many SGEs (%u) needed to handle"
1487 " requested maximum packet size %u, the maximum"
1488 " supported are %u", dev->data->port_id,
1489 tmpl->rxq.rxseg_n, max_rx_pkt_len,
1491 rte_errno = ENOTSUP;
1494 /* Build the actual scattering element in the queue object. */
1495 hw_seg->mp = qs_seg->mp;
1496 MLX5_ASSERT(offset <= UINT16_MAX);
1497 MLX5_ASSERT(seg_len <= UINT16_MAX);
1498 hw_seg->offset = (uint16_t)offset;
1499 hw_seg->length = (uint16_t)seg_len;
1501 * Advance the segment descriptor, the padding is the based
1502 * on the attributes of the last descriptor.
1504 if (tmpl->rxq.rxseg_n < n_seg)
1506 tail_len -= RTE_MIN(tail_len, seg_len);
1507 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1508 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1509 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1510 if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
1511 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1512 " configured and no enough mbuf space(%u) to contain "
1513 "the maximum RX packet length(%u) with head-room(%u)",
1514 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1515 RTE_PKTMBUF_HEADROOM);
1519 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1520 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1521 MLX5_MR_BTREE_CACHE_N, socket)) {
1522 /* rte_errno is already set. */
1525 tmpl->socket = socket;
1526 if (dev->data->dev_conf.intr_conf.rxq)
1529 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1530 * following conditions are met:
1531 * - MPRQ is enabled.
1532 * - The number of descs is more than the number of strides.
1533 * - max_rx_pkt_len plus overhead is less than the max size
1534 * of a stride or mprq_stride_size is specified by a user.
1535 * Need to make sure that there are enough strides to encap
1536 * the maximum packet size in case mprq_stride_size is set.
1537 * Otherwise, enable Rx scatter if necessary.
1539 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1540 (non_scatter_min_mbuf_size <=
1541 (1U << config->mprq.max_stride_size_n) ||
1542 (config->mprq.stride_size_n &&
1543 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1544 /* TODO: Rx scatter isn't supported yet. */
1545 tmpl->rxq.sges_n = 0;
1546 /* Trim the number of descs needed. */
1547 desc >>= mprq_stride_nums;
1548 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1549 config->mprq.stride_num_n : mprq_stride_nums;
1550 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1551 config->mprq.stride_size_n : mprq_stride_size;
1552 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1553 tmpl->rxq.strd_scatter_en =
1554 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1555 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1556 config->mprq.max_memcpy_len);
1557 max_lro_size = RTE_MIN(max_rx_pkt_len,
1558 (1u << tmpl->rxq.strd_num_n) *
1559 (1u << tmpl->rxq.strd_sz_n));
1561 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1562 " strd_num_n = %u, strd_sz_n = %u",
1563 dev->data->port_id, idx,
1564 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1565 } else if (tmpl->rxq.rxseg_n == 1) {
1566 MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
1567 tmpl->rxq.sges_n = 0;
1568 max_lro_size = max_rx_pkt_len;
1569 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1570 unsigned int sges_n;
1572 if (lro_on_queue && first_mb_free_size <
1573 MLX5_MAX_LRO_HEADER_FIX) {
1574 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1575 " to include the max header size(%u) for LRO",
1576 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1577 rte_errno = ENOTSUP;
1581 * Determine the number of SGEs needed for a full packet
1582 * and round it to the next power of two.
1584 sges_n = log2above(tmpl->rxq.rxseg_n);
1585 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1587 "port %u too many SGEs (%u) needed to handle"
1588 " requested maximum packet size %u, the maximum"
1589 " supported are %u", dev->data->port_id,
1590 1 << sges_n, max_rx_pkt_len,
1591 1u << MLX5_MAX_LOG_RQ_SEGS);
1592 rte_errno = ENOTSUP;
1595 tmpl->rxq.sges_n = sges_n;
1596 max_lro_size = max_rx_pkt_len;
1598 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1600 "port %u MPRQ is requested but cannot be enabled\n"
1601 " (requested: pkt_sz = %u, desc_num = %u,"
1602 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1603 " supported: min_rxqs_num = %u,"
1604 " min_stride_sz = %u, max_stride_sz = %u).",
1605 dev->data->port_id, non_scatter_min_mbuf_size,
1607 config->mprq.stride_size_n ?
1608 (1U << config->mprq.stride_size_n) :
1609 (1U << mprq_stride_size),
1610 config->mprq.stride_num_n ?
1611 (1U << config->mprq.stride_num_n) :
1612 (1U << mprq_stride_nums),
1613 config->mprq.min_rxqs_num,
1614 (1U << config->mprq.min_stride_size_n),
1615 (1U << config->mprq.max_stride_size_n));
1616 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1617 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1618 if (desc % (1 << tmpl->rxq.sges_n)) {
1620 "port %u number of Rx queue descriptors (%u) is not a"
1621 " multiple of SGEs per packet (%u)",
1624 1 << tmpl->rxq.sges_n);
1628 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1629 /* Toggle RX checksum offload if hardware supports it. */
1630 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1631 /* Configure Rx timestamp. */
1632 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1633 tmpl->rxq.timestamp_rx_flag = 0;
1634 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1635 &tmpl->rxq.timestamp_offset,
1636 &tmpl->rxq.timestamp_rx_flag) != 0) {
1637 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1640 /* Configure VLAN stripping. */
1641 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1642 /* By default, FCS (CRC) is stripped by hardware. */
1643 tmpl->rxq.crc_present = 0;
1644 tmpl->rxq.lro = lro_on_queue;
1645 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1646 if (config->hw_fcs_strip) {
1648 * RQs used for LRO-enabled TIRs should not be
1649 * configured to scatter the FCS.
1653 "port %u CRC stripping has been "
1654 "disabled but will still be performed "
1655 "by hardware, because LRO is enabled",
1656 dev->data->port_id);
1658 tmpl->rxq.crc_present = 1;
1661 "port %u CRC stripping has been disabled but will"
1662 " still be performed by hardware, make sure MLNX_OFED"
1663 " and firmware are up to date",
1664 dev->data->port_id);
1668 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1669 " incoming frames to hide it",
1671 tmpl->rxq.crc_present ? "disabled" : "enabled",
1672 tmpl->rxq.crc_present << 2);
1674 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1675 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1676 tmpl->rxq.port_id = dev->data->port_id;
1678 tmpl->rxq.mp = rx_seg[0].mp;
1679 tmpl->rxq.elts_n = log2above(desc);
1680 tmpl->rxq.rq_repl_thresh =
1681 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1683 (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1684 tmpl->rxq.mprq_bufs =
1685 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1687 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1689 tmpl->rxq.idx = idx;
1690 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1691 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1699 * Create a DPDK Rx hairpin queue.
1702 * Pointer to Ethernet device.
1706 * Number of descriptors to configure in queue.
1707 * @param hairpin_conf
1708 * The hairpin binding configuration.
1711 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1713 struct mlx5_rxq_ctrl *
1714 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1715 const struct rte_eth_hairpin_conf *hairpin_conf)
1717 struct mlx5_priv *priv = dev->data->dev_private;
1718 struct mlx5_rxq_ctrl *tmpl;
1720 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1726 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1727 tmpl->socket = SOCKET_ID_ANY;
1728 tmpl->rxq.rss_hash = 0;
1729 tmpl->rxq.port_id = dev->data->port_id;
1731 tmpl->rxq.mp = NULL;
1732 tmpl->rxq.elts_n = log2above(desc);
1733 tmpl->rxq.elts = NULL;
1734 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1735 tmpl->hairpin_conf = *hairpin_conf;
1736 tmpl->rxq.idx = idx;
1737 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1738 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1746 * Pointer to Ethernet device.
1751 * A pointer to the queue if it exists, NULL otherwise.
1753 struct mlx5_rxq_ctrl *
1754 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1756 struct mlx5_priv *priv = dev->data->dev_private;
1757 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1758 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1761 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1762 __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
1768 * Release a Rx queue.
1771 * Pointer to Ethernet device.
1776 * 1 while a reference on it exists, 0 when freed.
1779 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1781 struct mlx5_priv *priv = dev->data->dev_private;
1782 struct mlx5_rxq_ctrl *rxq_ctrl;
1784 if (!(*priv->rxqs)[idx])
1786 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1787 if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1789 if (rxq_ctrl->obj) {
1790 priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
1791 LIST_REMOVE(rxq_ctrl->obj, next);
1792 mlx5_free(rxq_ctrl->obj);
1793 rxq_ctrl->obj = NULL;
1795 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
1796 rxq_free_elts(rxq_ctrl);
1797 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1799 if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1800 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1801 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1802 LIST_REMOVE(rxq_ctrl, next);
1803 mlx5_free(rxq_ctrl);
1804 (*priv->rxqs)[idx] = NULL;
1810 * Verify the Rx Queue list is empty
1813 * Pointer to Ethernet device.
1816 * The number of object not released.
1819 mlx5_rxq_verify(struct rte_eth_dev *dev)
1821 struct mlx5_priv *priv = dev->data->dev_private;
1822 struct mlx5_rxq_ctrl *rxq_ctrl;
1825 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1826 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1827 dev->data->port_id, rxq_ctrl->rxq.idx);
1834 * Get a Rx queue type.
1837 * Pointer to Ethernet device.
1842 * The Rx queue type.
1845 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1847 struct mlx5_priv *priv = dev->data->dev_private;
1848 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1850 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1851 rxq_ctrl = container_of((*priv->rxqs)[idx],
1852 struct mlx5_rxq_ctrl,
1854 return rxq_ctrl->type;
1856 return MLX5_RXQ_TYPE_UNDEFINED;
1860 * Get a Rx hairpin queue configuration.
1863 * Pointer to Ethernet device.
1868 * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
1870 const struct rte_eth_hairpin_conf *
1871 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
1873 struct mlx5_priv *priv = dev->data->dev_private;
1874 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1876 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1877 rxq_ctrl = container_of((*priv->rxqs)[idx],
1878 struct mlx5_rxq_ctrl,
1880 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
1881 return &rxq_ctrl->hairpin_conf;
1887 * Match queues listed in arguments to queues contained in indirection table
1891 * Pointer to indirection table to match.
1893 * Queues to match to ques in indirection table.
1895 * Number of queues in the array.
1898 * 1 if all queues in indirection table match 0 othrwise.
1901 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
1902 const uint16_t *queues, uint32_t queues_n)
1904 return (ind_tbl->queues_n == queues_n) &&
1905 (!memcmp(ind_tbl->queues, queues,
1906 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
1910 * Get an indirection table.
1913 * Pointer to Ethernet device.
1915 * Queues entering in the indirection table.
1917 * Number of queues in the array.
1920 * An indirection table if found.
1922 struct mlx5_ind_table_obj *
1923 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1926 struct mlx5_priv *priv = dev->data->dev_private;
1927 struct mlx5_ind_table_obj *ind_tbl;
1929 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1930 if ((ind_tbl->queues_n == queues_n) &&
1931 (memcmp(ind_tbl->queues, queues,
1932 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1939 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
1940 for (i = 0; i != ind_tbl->queues_n; ++i)
1941 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1947 * Release an indirection table.
1950 * Pointer to Ethernet device.
1952 * Indirection table to release.
1954 * Indirection table for Standalone queue.
1957 * 1 while a reference on it exists, 0 when freed.
1960 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1961 struct mlx5_ind_table_obj *ind_tbl,
1964 struct mlx5_priv *priv = dev->data->dev_private;
1967 if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0)
1968 priv->obj_ops.ind_table_destroy(ind_tbl);
1969 for (i = 0; i != ind_tbl->queues_n; ++i)
1970 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1971 if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
1973 LIST_REMOVE(ind_tbl, next);
1981 * Verify the Rx Queue list is empty
1984 * Pointer to Ethernet device.
1987 * The number of object not released.
1990 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1992 struct mlx5_priv *priv = dev->data->dev_private;
1993 struct mlx5_ind_table_obj *ind_tbl;
1996 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1998 "port %u indirection table obj %p still referenced",
1999 dev->data->port_id, (void *)ind_tbl);
2006 * Setup an indirection table structure fields.
2009 * Pointer to Ethernet device.
2011 * Indirection table to modify.
2014 * 0 on success, a negative errno value otherwise and rte_errno is set.
2017 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
2018 struct mlx5_ind_table_obj *ind_tbl)
2020 struct mlx5_priv *priv = dev->data->dev_private;
2021 uint32_t queues_n = ind_tbl->queues_n;
2022 uint16_t *queues = ind_tbl->queues;
2025 const unsigned int n = rte_is_power_of_2(queues_n) ?
2026 log2above(queues_n) :
2027 log2above(priv->config.ind_table_max_size);
2029 for (i = 0; i != queues_n; ++i) {
2030 if (!mlx5_rxq_get(dev, queues[i])) {
2035 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
2038 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2042 for (j = 0; j < i; j++)
2043 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2045 DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
2050 * Create an indirection table.
2053 * Pointer to Ethernet device.
2055 * Queues entering in the indirection table.
2057 * Number of queues in the array.
2059 * Indirection table for Standalone queue.
2062 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2064 static struct mlx5_ind_table_obj *
2065 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2066 uint32_t queues_n, bool standalone)
2068 struct mlx5_priv *priv = dev->data->dev_private;
2069 struct mlx5_ind_table_obj *ind_tbl;
2072 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2073 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2078 ind_tbl->queues_n = queues_n;
2079 ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2080 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2081 ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
2087 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2092 * Modify an indirection table.
2095 * Pointer to Ethernet device.
2097 * Indirection table to modify.
2099 * Queues replacement for the indirection table.
2101 * Number of queues in the array.
2103 * Indirection table for Standalone queue.
2106 * 0 on success, a negative errno value otherwise and rte_errno is set.
2109 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2110 struct mlx5_ind_table_obj *ind_tbl,
2111 uint16_t *queues, const uint32_t queues_n,
2114 struct mlx5_priv *priv = dev->data->dev_private;
2117 const unsigned int n = rte_is_power_of_2(queues_n) ?
2118 log2above(queues_n) :
2119 log2above(priv->config.ind_table_max_size);
2121 MLX5_ASSERT(standalone);
2122 RTE_SET_USED(standalone);
2123 if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {
2125 * Modification of indirection ntables having more than 1
2126 * reference unsupported. Intended for standalone indirection
2129 DEBUG("Port %u cannot modify indirection table (refcnt> 1).",
2130 dev->data->port_id);
2134 for (i = 0; i != queues_n; ++i) {
2135 if (!mlx5_rxq_get(dev, queues[i])) {
2140 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2141 ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
2144 for (j = 0; j < ind_tbl->queues_n; j++)
2145 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2146 ind_tbl->queues_n = queues_n;
2147 ind_tbl->queues = queues;
2151 for (j = 0; j < i; j++)
2152 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2154 DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
2159 * Match an Rx Hash queue.
2162 * Cache list pointer.
2164 * Hash queue entry pointer.
2166 * Context of the callback function.
2169 * 0 if match, none zero if not match.
2172 mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
2173 struct mlx5_cache_entry *entry,
2176 struct rte_eth_dev *dev = list->ctx;
2177 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2178 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2179 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2180 struct mlx5_ind_table_obj *ind_tbl;
2182 if (hrxq->rss_key_len != rss_desc->key_len ||
2183 memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
2184 hrxq->hash_fields != rss_desc->hash_fields)
2186 ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
2187 rss_desc->queue_num);
2189 mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
2190 return ind_tbl != hrxq->ind_table;
2194 * Modify an Rx Hash queue configuration.
2197 * Pointer to Ethernet device.
2199 * Index to Hash Rx queue to modify.
2201 * RSS key for the Rx hash queue.
2202 * @param rss_key_len
2204 * @param hash_fields
2205 * Verbs protocol hash field to make the RSS on.
2207 * Queues entering in hash queue. In case of empty hash_fields only the
2208 * first queue index will be taken for the indirection table.
2213 * 0 on success, a negative errno value otherwise and rte_errno is set.
2216 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2217 const uint8_t *rss_key, uint32_t rss_key_len,
2218 uint64_t hash_fields,
2219 const uint16_t *queues, uint32_t queues_n)
2222 struct mlx5_ind_table_obj *ind_tbl = NULL;
2223 struct mlx5_priv *priv = dev->data->dev_private;
2224 struct mlx5_hrxq *hrxq =
2225 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2233 if (hrxq->rss_key_len != rss_key_len) {
2234 /* rss_key_len is fixed size 40 byte & not supposed to change */
2238 queues_n = hash_fields ? queues_n : 1;
2239 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2240 queues, queues_n)) {
2241 ind_tbl = hrxq->ind_table;
2243 if (hrxq->standalone) {
2245 * Replacement of indirection table unsupported for
2246 * stanalone hrxq objects (used by shared RSS).
2248 rte_errno = ENOTSUP;
2251 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2253 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2260 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2261 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2262 hash_fields, ind_tbl);
2267 if (ind_tbl != hrxq->ind_table) {
2268 MLX5_ASSERT(!hrxq->standalone);
2269 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2271 hrxq->ind_table = ind_tbl;
2273 hrxq->hash_fields = hash_fields;
2274 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2278 if (ind_tbl != hrxq->ind_table) {
2279 MLX5_ASSERT(!hrxq->standalone);
2280 mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
2287 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2289 struct mlx5_priv *priv = dev->data->dev_private;
2291 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2292 mlx5_glue->destroy_flow_action(hrxq->action);
2294 priv->obj_ops.hrxq_destroy(hrxq);
2295 if (!hrxq->standalone) {
2296 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2299 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2303 * Release the hash Rx queue.
2306 * Pointer to Ethernet device.
2308 * Index to Hash Rx queue to release.
2311 * Cache list pointer.
2313 * Hash queue entry pointer.
2316 mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
2317 struct mlx5_cache_entry *entry)
2319 struct rte_eth_dev *dev = list->ctx;
2320 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2322 __mlx5_hrxq_remove(dev, hrxq);
2325 static struct mlx5_hrxq *
2326 __mlx5_hrxq_create(struct rte_eth_dev *dev,
2327 struct mlx5_flow_rss_desc *rss_desc)
2329 struct mlx5_priv *priv = dev->data->dev_private;
2330 const uint8_t *rss_key = rss_desc->key;
2331 uint32_t rss_key_len = rss_desc->key_len;
2332 bool standalone = !!rss_desc->shared_rss;
2333 const uint16_t *queues =
2334 standalone ? rss_desc->const_q : rss_desc->queue;
2335 uint32_t queues_n = rss_desc->queue_num;
2336 struct mlx5_hrxq *hrxq = NULL;
2337 uint32_t hrxq_idx = 0;
2338 struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
2341 queues_n = rss_desc->hash_fields ? queues_n : 1;
2343 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2345 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2349 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2352 hrxq->standalone = standalone;
2353 hrxq->idx = hrxq_idx;
2354 hrxq->ind_table = ind_tbl;
2355 hrxq->rss_key_len = rss_key_len;
2356 hrxq->hash_fields = rss_desc->hash_fields;
2357 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2358 ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2363 if (!rss_desc->ind_tbl)
2364 mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
2366 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2371 * Create an Rx Hash queue.
2374 * Cache list pointer.
2376 * Hash queue entry pointer.
2378 * Context of the callback function.
2381 * queue entry on success, NULL otherwise.
2383 struct mlx5_cache_entry *
2384 mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
2385 struct mlx5_cache_entry *entry __rte_unused,
2388 struct rte_eth_dev *dev = list->ctx;
2389 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2390 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2391 struct mlx5_hrxq *hrxq;
2393 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2394 return hrxq ? &hrxq->entry : NULL;
2398 * Get an Rx Hash queue.
2401 * Pointer to Ethernet device.
2403 * RSS configuration for the Rx hash queue.
2406 * An hash Rx queue index on success.
2408 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
2409 struct mlx5_flow_rss_desc *rss_desc)
2411 struct mlx5_priv *priv = dev->data->dev_private;
2412 struct mlx5_hrxq *hrxq;
2413 struct mlx5_cache_entry *entry;
2414 struct mlx5_flow_cb_ctx ctx = {
2418 if (rss_desc->shared_rss) {
2419 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2421 entry = mlx5_cache_register(&priv->hrxqs, &ctx);
2424 hrxq = container_of(entry, typeof(*hrxq), entry);
2430 * Release the hash Rx queue.
2433 * Pointer to Ethernet device.
2435 * Index to Hash Rx queue to release.
2438 * 1 while a reference on it exists, 0 when freed.
2440 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2442 struct mlx5_priv *priv = dev->data->dev_private;
2443 struct mlx5_hrxq *hrxq;
2445 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2448 if (!hrxq->standalone)
2449 return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
2450 __mlx5_hrxq_remove(dev, hrxq);
2455 * Create a drop Rx Hash queue.
2458 * Pointer to Ethernet device.
2461 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2464 mlx5_drop_action_create(struct rte_eth_dev *dev)
2466 struct mlx5_priv *priv = dev->data->dev_private;
2467 struct mlx5_hrxq *hrxq = NULL;
2470 if (priv->drop_queue.hrxq)
2471 return priv->drop_queue.hrxq;
2472 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2475 "Port %u cannot allocate memory for drop queue.",
2476 dev->data->port_id);
2480 priv->drop_queue.hrxq = hrxq;
2481 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2483 if (!hrxq->ind_table) {
2487 ret = priv->obj_ops.drop_action_create(dev);
2493 if (hrxq->ind_table)
2494 mlx5_free(hrxq->ind_table);
2495 priv->drop_queue.hrxq = NULL;
2502 * Release a drop hash Rx queue.
2505 * Pointer to Ethernet device.
2508 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2510 struct mlx5_priv *priv = dev->data->dev_private;
2511 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2513 if (!priv->drop_queue.hrxq)
2515 priv->obj_ops.drop_action_destroy(dev);
2516 mlx5_free(priv->drop_queue.rxq);
2517 mlx5_free(hrxq->ind_table);
2519 priv->drop_queue.rxq = NULL;
2520 priv->drop_queue.hrxq = NULL;
2524 * Verify the Rx Queue list is empty
2527 * Pointer to Ethernet device.
2530 * The number of object not released.
2533 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2535 struct mlx5_priv *priv = dev->data->dev_private;
2537 return mlx5_cache_list_get_entry_num(&priv->hrxqs);
2541 * Set the Rx queue timestamp conversion parameters
2544 * Pointer to the Ethernet device structure.
2547 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2549 struct mlx5_priv *priv = dev->data->dev_private;
2550 struct mlx5_dev_ctx_shared *sh = priv->sh;
2551 struct mlx5_rxq_data *data;
2554 for (i = 0; i != priv->rxqs_n; ++i) {
2555 if (!(*priv->rxqs)[i])
2557 data = (*priv->rxqs)[i];
2559 data->rt_timestamp = priv->config.rt_timestamp;