1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
25 #include "mlx5_defs.h"
27 #include "mlx5_rxtx.h"
28 #include "mlx5_utils.h"
29 #include "mlx5_autoconf.h"
32 /* Default RSS hash key also used for ConnectX-3. */
33 uint8_t rss_hash_default_key[] = {
34 0x2c, 0xc6, 0x81, 0xd1,
35 0x5b, 0xdb, 0xf4, 0xf7,
36 0xfc, 0xa2, 0x83, 0x19,
37 0xdb, 0x1a, 0x3e, 0x94,
38 0x6b, 0x9e, 0x38, 0xd9,
39 0x2c, 0x9c, 0x03, 0xd1,
40 0xad, 0x99, 0x44, 0xa7,
41 0xd9, 0x56, 0x3d, 0x59,
42 0x06, 0x3c, 0x25, 0xf3,
43 0xfc, 0x1f, 0xdc, 0x2a,
46 /* Length of the default RSS hash key. */
47 static_assert(MLX5_RSS_HASH_KEY_LEN ==
48 (unsigned int)sizeof(rss_hash_default_key),
49 "wrong RSS default key size.");
52 * Check whether Multi-Packet RQ can be enabled for the device.
55 * Pointer to Ethernet device.
58 * 1 if supported, negative errno value if not.
61 mlx5_check_mprq_support(struct rte_eth_dev *dev)
63 struct mlx5_priv *priv = dev->data->dev_private;
65 if (priv->config.mprq.enabled &&
66 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
72 * Check whether Multi-Packet RQ is enabled for the Rx queue.
75 * Pointer to receive queue structure.
78 * 0 if disabled, otherwise enabled.
81 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
83 return rxq->strd_num_n > 0;
87 * Check whether Multi-Packet RQ is enabled for the device.
90 * Pointer to Ethernet device.
93 * 0 if disabled, otherwise enabled.
96 mlx5_mprq_enabled(struct rte_eth_dev *dev)
98 struct mlx5_priv *priv = dev->data->dev_private;
103 if (mlx5_check_mprq_support(dev) < 0)
105 /* All the configured queues should be enabled. */
106 for (i = 0; i < priv->rxqs_n; ++i) {
107 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
108 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
109 (rxq, struct mlx5_rxq_ctrl, rxq);
111 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
114 if (mlx5_rxq_mprq_enabled(rxq))
117 /* Multi-Packet RQ can't be partially configured. */
118 MLX5_ASSERT(n == 0 || n == n_ibv);
123 * Calculate the number of CQEs in CQ for the Rx queue.
126 * Pointer to receive queue structure.
129 * Number of CQEs in CQ.
132 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
135 unsigned int wqe_n = 1 << rxq_data->elts_n;
137 if (mlx5_rxq_mprq_enabled(rxq_data))
138 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
145 * Allocate RX queue elements for Multi-Packet RQ.
148 * Pointer to RX queue structure.
151 * 0 on success, a negative errno value otherwise and rte_errno is set.
154 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
156 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
157 unsigned int wqe_n = 1 << rxq->elts_n;
161 /* Iterate on segments. */
162 for (i = 0; i <= wqe_n; ++i) {
163 struct mlx5_mprq_buf *buf;
165 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
166 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
171 (*rxq->mprq_bufs)[i] = buf;
173 rxq->mprq_repl = buf;
176 "port %u MPRQ queue %u allocated and configured %u segments",
177 rxq->port_id, rxq->idx, wqe_n);
180 err = rte_errno; /* Save rte_errno before cleanup. */
182 for (i = 0; (i != wqe_n); ++i) {
183 if ((*rxq->mprq_bufs)[i] != NULL)
184 rte_mempool_put(rxq->mprq_mp,
185 (*rxq->mprq_bufs)[i]);
186 (*rxq->mprq_bufs)[i] = NULL;
188 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
189 rxq->port_id, rxq->idx);
190 rte_errno = err; /* Restore rte_errno. */
195 * Allocate RX queue elements for Single-Packet RQ.
198 * Pointer to RX queue structure.
201 * 0 on success, errno value on failure.
204 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
206 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
207 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
208 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
209 (1 << rxq_ctrl->rxq.elts_n);
213 /* Iterate on segments. */
214 for (i = 0; (i != elts_n); ++i) {
215 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
216 struct rte_mbuf *buf;
218 buf = rte_pktmbuf_alloc(seg->mp);
220 DRV_LOG(ERR, "port %u empty mbuf pool",
221 PORT_ID(rxq_ctrl->priv));
225 /* Headroom is reserved by rte_pktmbuf_alloc(). */
226 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
227 /* Buffer is supposed to be empty. */
228 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
229 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
230 MLX5_ASSERT(!buf->next);
231 SET_DATA_OFF(buf, seg->offset);
232 PORT(buf) = rxq_ctrl->rxq.port_id;
233 DATA_LEN(buf) = seg->length;
234 PKT_LEN(buf) = seg->length;
236 (*rxq_ctrl->rxq.elts)[i] = buf;
238 /* If Rx vector is activated. */
239 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
240 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
241 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
242 struct rte_pktmbuf_pool_private *priv =
243 (struct rte_pktmbuf_pool_private *)
244 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
247 /* Initialize default rearm_data for vPMD. */
248 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
249 rte_mbuf_refcnt_set(mbuf_init, 1);
250 mbuf_init->nb_segs = 1;
251 mbuf_init->port = rxq->port_id;
252 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
253 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
255 * prevent compiler reordering:
256 * rearm_data covers previous fields.
258 rte_compiler_barrier();
259 rxq->mbuf_initializer =
260 *(rte_xmm_t *)&mbuf_init->rearm_data;
261 /* Padding with a fake mbuf for vectorized Rx. */
262 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
263 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
266 "port %u SPRQ queue %u allocated and configured %u segments"
268 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
269 elts_n / (1 << rxq_ctrl->rxq.sges_n));
272 err = rte_errno; /* Save rte_errno before cleanup. */
274 for (i = 0; (i != elts_n); ++i) {
275 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
276 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
277 (*rxq_ctrl->rxq.elts)[i] = NULL;
279 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
280 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
281 rte_errno = err; /* Restore rte_errno. */
286 * Allocate RX queue elements.
289 * Pointer to RX queue structure.
292 * 0 on success, errno value on failure.
295 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
300 * For MPRQ we need to allocate both MPRQ buffers
301 * for WQEs and simple mbufs for vector processing.
303 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
304 ret = rxq_alloc_elts_mprq(rxq_ctrl);
305 return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
309 * Free RX queue elements for Multi-Packet RQ.
312 * Pointer to RX queue structure.
315 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
317 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
320 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
321 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
322 if (rxq->mprq_bufs == NULL)
324 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
325 if ((*rxq->mprq_bufs)[i] != NULL)
326 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
327 (*rxq->mprq_bufs)[i] = NULL;
329 if (rxq->mprq_repl != NULL) {
330 mlx5_mprq_buf_free(rxq->mprq_repl);
331 rxq->mprq_repl = NULL;
336 * Free RX queue elements for Single-Packet RQ.
339 * Pointer to RX queue structure.
342 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
344 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
345 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
346 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
348 const uint16_t q_mask = q_n - 1;
349 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
352 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
353 PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
354 if (rxq->elts == NULL)
357 * Some mbuf in the Ring belongs to the application.
358 * They cannot be freed.
360 if (mlx5_rxq_check_vec_support(rxq) > 0) {
361 for (i = 0; i < used; ++i)
362 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
363 rxq->rq_pi = rxq->rq_ci;
365 for (i = 0; i != q_n; ++i) {
366 if ((*rxq->elts)[i] != NULL)
367 rte_pktmbuf_free_seg((*rxq->elts)[i]);
368 (*rxq->elts)[i] = NULL;
373 * Free RX queue elements.
376 * Pointer to RX queue structure.
379 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
382 * For MPRQ we need to allocate both MPRQ buffers
383 * for WQEs and simple mbufs for vector processing.
385 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
386 rxq_free_elts_mprq(rxq_ctrl);
387 rxq_free_elts_sprq(rxq_ctrl);
391 * Returns the per-queue supported offloads.
394 * Pointer to Ethernet device.
397 * Supported Rx offloads.
400 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
402 struct mlx5_priv *priv = dev->data->dev_private;
403 struct mlx5_dev_config *config = &priv->config;
404 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
405 DEV_RX_OFFLOAD_TIMESTAMP |
406 DEV_RX_OFFLOAD_JUMBO_FRAME |
407 DEV_RX_OFFLOAD_RSS_HASH);
409 if (config->hw_fcs_strip)
410 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
413 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
414 DEV_RX_OFFLOAD_UDP_CKSUM |
415 DEV_RX_OFFLOAD_TCP_CKSUM);
416 if (config->hw_vlan_strip)
417 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
418 if (MLX5_LRO_SUPPORTED(dev))
419 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
425 * Returns the per-port supported offloads.
428 * Supported Rx offloads.
431 mlx5_get_rx_port_offloads(void)
433 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
439 * Verify if the queue can be released.
442 * Pointer to Ethernet device.
447 * 1 if the queue can be released
448 * 0 if the queue can not be released, there are references to it.
449 * Negative errno and rte_errno is set if queue doesn't exist.
452 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
454 struct mlx5_priv *priv = dev->data->dev_private;
455 struct mlx5_rxq_ctrl *rxq_ctrl;
457 if (!(*priv->rxqs)[idx]) {
461 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
462 return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
466 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
468 rxq_sync_cq(struct mlx5_rxq_data *rxq)
470 const uint16_t cqe_n = 1 << rxq->cqe_n;
471 const uint16_t cqe_mask = cqe_n - 1;
472 volatile struct mlx5_cqe *cqe;
477 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
478 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
479 if (ret == MLX5_CQE_STATUS_HW_OWN)
481 if (ret == MLX5_CQE_STATUS_ERR) {
485 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
486 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
490 /* Compute the next non compressed CQE. */
491 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
494 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
495 for (i = 0; i < cqe_n; i++) {
496 cqe = &(*rxq->cqes)[i];
497 cqe->op_own = MLX5_CQE_INVALIDATE;
499 /* Resync CQE and WQE (WQ in RESET state). */
501 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
503 *rxq->rq_db = rte_cpu_to_be_32(0);
508 * Rx queue stop. Device queue goes to the RESET state,
509 * all involved mbufs are freed from WQ.
512 * Pointer to Ethernet device structure.
517 * 0 on success, a negative errno value otherwise and rte_errno is set.
520 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
522 struct mlx5_priv *priv = dev->data->dev_private;
523 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
524 struct mlx5_rxq_ctrl *rxq_ctrl =
525 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
528 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
529 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
531 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
536 /* Remove all processes CQEs. */
538 /* Free all involved mbufs. */
539 rxq_free_elts(rxq_ctrl);
540 /* Set the actual queue state. */
541 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
546 * Rx queue stop. Device queue goes to the RESET state,
547 * all involved mbufs are freed from WQ.
550 * Pointer to Ethernet device structure.
555 * 0 on success, a negative errno value otherwise and rte_errno is set.
558 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
560 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
563 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
564 DRV_LOG(ERR, "Hairpin queue can't be stopped");
568 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
571 * Vectorized Rx burst requires the CQ and RQ indices
572 * synchronized, that might be broken on RQ restart
573 * and cause Rx malfunction, so queue stopping is
574 * not supported if vectorized Rx burst is engaged.
575 * The routine pointer depends on the process
576 * type, should perform check there.
578 if (pkt_burst == mlx5_rx_burst_vec) {
579 DRV_LOG(ERR, "Rx queue stop is not supported "
580 "for vectorized Rx");
584 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
585 ret = mlx5_mp_os_req_queue_control(dev, idx,
586 MLX5_MP_REQ_QUEUE_RX_STOP);
588 ret = mlx5_rx_queue_stop_primary(dev, idx);
594 * Rx queue start. Device queue goes to the ready state,
595 * all required mbufs are allocated and WQ is replenished.
598 * Pointer to Ethernet device structure.
603 * 0 on success, a negative errno value otherwise and rte_errno is set.
606 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
608 struct mlx5_priv *priv = dev->data->dev_private;
609 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
610 struct mlx5_rxq_ctrl *rxq_ctrl =
611 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
614 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
615 /* Allocate needed buffers. */
616 ret = rxq_alloc_elts(rxq_ctrl);
618 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
623 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
625 /* Reset RQ consumer before moving queue ro READY state. */
626 *rxq->rq_db = rte_cpu_to_be_32(0);
628 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
630 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
635 /* Reinitialize RQ - set WQEs. */
636 mlx5_rxq_initialize(rxq);
637 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
638 /* Set actual queue state. */
639 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
644 * Rx queue start. Device queue goes to the ready state,
645 * all required mbufs are allocated and WQ is replenished.
648 * Pointer to Ethernet device structure.
653 * 0 on success, a negative errno value otherwise and rte_errno is set.
656 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
660 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
661 DRV_LOG(ERR, "Hairpin queue can't be started");
665 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
667 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
668 ret = mlx5_mp_os_req_queue_control(dev, idx,
669 MLX5_MP_REQ_QUEUE_RX_START);
671 ret = mlx5_rx_queue_start_primary(dev, idx);
677 * Rx queue presetup checks.
680 * Pointer to Ethernet device structure.
684 * Number of descriptors to configure in queue.
687 * 0 on success, a negative errno value otherwise and rte_errno is set.
690 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
692 struct mlx5_priv *priv = dev->data->dev_private;
694 if (!rte_is_power_of_2(*desc)) {
695 *desc = 1 << log2above(*desc);
697 "port %u increased number of descriptors in Rx queue %u"
698 " to the next power of two (%d)",
699 dev->data->port_id, idx, *desc);
701 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
702 dev->data->port_id, idx, *desc);
703 if (idx >= priv->rxqs_n) {
704 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
705 dev->data->port_id, idx, priv->rxqs_n);
706 rte_errno = EOVERFLOW;
709 if (!mlx5_rxq_releasable(dev, idx)) {
710 DRV_LOG(ERR, "port %u unable to release queue index %u",
711 dev->data->port_id, idx);
715 mlx5_rxq_release(dev, idx);
722 * Pointer to Ethernet device structure.
726 * Number of descriptors to configure in queue.
728 * NUMA socket on which memory must be allocated.
730 * Thresholds parameters.
732 * Memory pool for buffer allocations.
735 * 0 on success, a negative errno value otherwise and rte_errno is set.
738 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
739 unsigned int socket, const struct rte_eth_rxconf *conf,
740 struct rte_mempool *mp)
742 struct mlx5_priv *priv = dev->data->dev_private;
743 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
744 struct mlx5_rxq_ctrl *rxq_ctrl =
745 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
746 struct rte_eth_rxseg_split *rx_seg =
747 (struct rte_eth_rxseg_split *)conf->rx_seg;
748 struct rte_eth_rxseg_split rx_single = {.mp = mp};
749 uint16_t n_seg = conf->rx_nseg;
754 * The parameters should be checked on rte_eth_dev layer.
755 * If mp is specified it means the compatible configuration
756 * without buffer split feature tuning.
762 uint64_t offloads = conf->offloads |
763 dev->data->dev_conf.rxmode.offloads;
765 /* The offloads should be checked on rte_eth_dev layer. */
766 MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
767 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
768 DRV_LOG(ERR, "port %u queue index %u split "
769 "offload not configured",
770 dev->data->port_id, idx);
774 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
776 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
779 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
781 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
782 dev->data->port_id, idx);
786 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
787 dev->data->port_id, idx);
788 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
795 * Pointer to Ethernet device structure.
799 * Number of descriptors to configure in queue.
800 * @param hairpin_conf
801 * Hairpin configuration parameters.
804 * 0 on success, a negative errno value otherwise and rte_errno is set.
807 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
809 const struct rte_eth_hairpin_conf *hairpin_conf)
811 struct mlx5_priv *priv = dev->data->dev_private;
812 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
813 struct mlx5_rxq_ctrl *rxq_ctrl =
814 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
817 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
820 if (hairpin_conf->peer_count != 1 ||
821 hairpin_conf->peers[0].port != dev->data->port_id ||
822 hairpin_conf->peers[0].queue >= priv->txqs_n) {
823 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
824 " invalid hairpind configuration", dev->data->port_id,
829 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
831 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
832 dev->data->port_id, idx);
836 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
837 dev->data->port_id, idx);
838 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
843 * DPDK callback to release a RX queue.
846 * Generic RX queue pointer.
849 mlx5_rx_queue_release(void *dpdk_rxq)
851 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
852 struct mlx5_rxq_ctrl *rxq_ctrl;
853 struct mlx5_priv *priv;
857 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
858 priv = rxq_ctrl->priv;
859 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
860 rte_panic("port %u Rx queue %u is still used by a flow and"
861 " cannot be removed\n",
862 PORT_ID(priv), rxq->idx);
863 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
867 * Allocate queue vector and fill epoll fd list for Rx interrupts.
870 * Pointer to Ethernet device.
873 * 0 on success, a negative errno value otherwise and rte_errno is set.
876 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
878 struct mlx5_priv *priv = dev->data->dev_private;
880 unsigned int rxqs_n = priv->rxqs_n;
881 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
882 unsigned int count = 0;
883 struct rte_intr_handle *intr_handle = dev->intr_handle;
885 if (!dev->data->dev_conf.intr_conf.rxq)
887 mlx5_rx_intr_vec_disable(dev);
888 intr_handle->intr_vec = mlx5_malloc(0,
889 n * sizeof(intr_handle->intr_vec[0]),
891 if (intr_handle->intr_vec == NULL) {
893 "port %u failed to allocate memory for interrupt"
894 " vector, Rx interrupts will not be supported",
899 intr_handle->type = RTE_INTR_HANDLE_EXT;
900 for (i = 0; i != n; ++i) {
901 /* This rxq obj must not be released in this function. */
902 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
903 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
906 /* Skip queues that cannot request interrupts. */
907 if (!rxq_obj || (!rxq_obj->ibv_channel &&
908 !rxq_obj->devx_channel)) {
909 /* Use invalid intr_vec[] index to disable entry. */
910 intr_handle->intr_vec[i] =
911 RTE_INTR_VEC_RXTX_OFFSET +
912 RTE_MAX_RXTX_INTR_VEC_ID;
913 /* Decrease the rxq_ctrl's refcnt */
915 mlx5_rxq_release(dev, i);
918 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
920 "port %u too many Rx queues for interrupt"
921 " vector size (%d), Rx interrupts cannot be"
923 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
924 mlx5_rx_intr_vec_disable(dev);
928 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
932 "port %u failed to make Rx interrupt file"
933 " descriptor %d non-blocking for queue index"
935 dev->data->port_id, rxq_obj->fd, i);
936 mlx5_rx_intr_vec_disable(dev);
939 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
940 intr_handle->efds[count] = rxq_obj->fd;
944 mlx5_rx_intr_vec_disable(dev);
946 intr_handle->nb_efd = count;
951 * Clean up Rx interrupts handler.
954 * Pointer to Ethernet device.
957 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
959 struct mlx5_priv *priv = dev->data->dev_private;
960 struct rte_intr_handle *intr_handle = dev->intr_handle;
962 unsigned int rxqs_n = priv->rxqs_n;
963 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
965 if (!dev->data->dev_conf.intr_conf.rxq)
967 if (!intr_handle->intr_vec)
969 for (i = 0; i != n; ++i) {
970 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
971 RTE_MAX_RXTX_INTR_VEC_ID)
974 * Need to access directly the queue to release the reference
975 * kept in mlx5_rx_intr_vec_enable().
977 mlx5_rxq_release(dev, i);
980 rte_intr_free_epoll_fd(intr_handle);
981 if (intr_handle->intr_vec)
982 mlx5_free(intr_handle->intr_vec);
983 intr_handle->nb_efd = 0;
984 intr_handle->intr_vec = NULL;
988 * MLX5 CQ notification .
991 * Pointer to receive queue structure.
993 * Sequence number per receive queue .
996 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
999 uint32_t doorbell_hi;
1001 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
1003 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1004 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1005 doorbell = (uint64_t)doorbell_hi << 32;
1006 doorbell |= rxq->cqn;
1007 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1008 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
1009 cq_db_reg, rxq->uar_lock_cq);
1013 * DPDK callback for Rx queue interrupt enable.
1016 * Pointer to Ethernet device structure.
1017 * @param rx_queue_id
1021 * 0 on success, a negative errno value otherwise and rte_errno is set.
1024 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1026 struct mlx5_rxq_ctrl *rxq_ctrl;
1028 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1031 if (rxq_ctrl->irq) {
1032 if (!rxq_ctrl->obj) {
1033 mlx5_rxq_release(dev, rx_queue_id);
1036 mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
1038 mlx5_rxq_release(dev, rx_queue_id);
1046 * DPDK callback for Rx queue interrupt disable.
1049 * Pointer to Ethernet device structure.
1050 * @param rx_queue_id
1054 * 0 on success, a negative errno value otherwise and rte_errno is set.
1057 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1059 struct mlx5_priv *priv = dev->data->dev_private;
1060 struct mlx5_rxq_ctrl *rxq_ctrl;
1063 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1070 if (rxq_ctrl->irq) {
1071 ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
1074 rxq_ctrl->rxq.cq_arm_sn++;
1076 mlx5_rxq_release(dev, rx_queue_id);
1080 * The ret variable may be EAGAIN which means the get_event function was
1081 * called before receiving one.
1087 ret = rte_errno; /* Save rte_errno before cleanup. */
1088 mlx5_rxq_release(dev, rx_queue_id);
1090 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1091 dev->data->port_id, rx_queue_id);
1092 rte_errno = ret; /* Restore rte_errno. */
1097 * Verify the Rx queue objects list is empty
1100 * Pointer to Ethernet device.
1103 * The number of objects not released.
1106 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1108 struct mlx5_priv *priv = dev->data->dev_private;
1110 struct mlx5_rxq_obj *rxq_obj;
1112 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1113 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1114 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1121 * Callback function to initialize mbufs for Multi-Packet RQ.
1124 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1125 void *_m, unsigned int i __rte_unused)
1127 struct mlx5_mprq_buf *buf = _m;
1128 struct rte_mbuf_ext_shared_info *shinfo;
1129 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1132 memset(_m, 0, sizeof(*buf));
1134 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1135 for (j = 0; j != strd_n; ++j) {
1136 shinfo = &buf->shinfos[j];
1137 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1138 shinfo->fcb_opaque = buf;
1143 * Free mempool of Multi-Packet RQ.
1146 * Pointer to Ethernet device.
1149 * 0 on success, negative errno value on failure.
1152 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1154 struct mlx5_priv *priv = dev->data->dev_private;
1155 struct rte_mempool *mp = priv->mprq_mp;
1160 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1161 dev->data->port_id, mp->name);
1163 * If a buffer in the pool has been externally attached to a mbuf and it
1164 * is still in use by application, destroying the Rx queue can spoil
1165 * the packet. It is unlikely to happen but if application dynamically
1166 * creates and destroys with holding Rx packets, this can happen.
1168 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1169 * RQ isn't provided by application but managed by PMD.
1171 if (!rte_mempool_full(mp)) {
1173 "port %u mempool for Multi-Packet RQ is still in use",
1174 dev->data->port_id);
1178 rte_mempool_free(mp);
1179 /* Unset mempool for each Rx queue. */
1180 for (i = 0; i != priv->rxqs_n; ++i) {
1181 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1185 rxq->mprq_mp = NULL;
1187 priv->mprq_mp = NULL;
1192 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1193 * mempool. If already allocated, reuse it if there're enough elements.
1194 * Otherwise, resize it.
1197 * Pointer to Ethernet device.
1200 * 0 on success, negative errno value on failure.
1203 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1205 struct mlx5_priv *priv = dev->data->dev_private;
1206 struct rte_mempool *mp = priv->mprq_mp;
1207 char name[RTE_MEMPOOL_NAMESIZE];
1208 unsigned int desc = 0;
1209 unsigned int buf_len;
1210 unsigned int obj_num;
1211 unsigned int obj_size;
1212 unsigned int strd_num_n = 0;
1213 unsigned int strd_sz_n = 0;
1215 unsigned int n_ibv = 0;
1217 if (!mlx5_mprq_enabled(dev))
1219 /* Count the total number of descriptors configured. */
1220 for (i = 0; i != priv->rxqs_n; ++i) {
1221 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1222 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1223 (rxq, struct mlx5_rxq_ctrl, rxq);
1225 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1228 desc += 1 << rxq->elts_n;
1229 /* Get the max number of strides. */
1230 if (strd_num_n < rxq->strd_num_n)
1231 strd_num_n = rxq->strd_num_n;
1232 /* Get the max size of a stride. */
1233 if (strd_sz_n < rxq->strd_sz_n)
1234 strd_sz_n = rxq->strd_sz_n;
1236 MLX5_ASSERT(strd_num_n && strd_sz_n);
1237 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1238 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1239 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1241 * Received packets can be either memcpy'd or externally referenced. In
1242 * case that the packet is attached to an mbuf as an external buffer, as
1243 * it isn't possible to predict how the buffers will be queued by
1244 * application, there's no option to exactly pre-allocate needed buffers
1245 * in advance but to speculatively prepares enough buffers.
1247 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1248 * received packets to buffers provided by application (rxq->mp) until
1249 * this Mempool gets available again.
1252 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1254 * rte_mempool_create_empty() has sanity check to refuse large cache
1255 * size compared to the number of elements.
1256 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1257 * constant number 2 instead.
1259 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1260 /* Check a mempool is already allocated and if it can be resued. */
1261 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1262 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1263 dev->data->port_id, mp->name);
1266 } else if (mp != NULL) {
1267 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1268 dev->data->port_id, mp->name);
1270 * If failed to free, which means it may be still in use, no way
1271 * but to keep using the existing one. On buffer underrun,
1272 * packets will be memcpy'd instead of external buffer
1275 if (mlx5_mprq_free_mp(dev)) {
1276 if (mp->elt_size >= obj_size)
1282 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1283 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1284 0, NULL, NULL, mlx5_mprq_buf_init,
1285 (void *)(uintptr_t)(1 << strd_num_n),
1286 dev->device->numa_node, 0);
1289 "port %u failed to allocate a mempool for"
1290 " Multi-Packet RQ, count=%u, size=%u",
1291 dev->data->port_id, obj_num, obj_size);
1297 /* Set mempool for each Rx queue. */
1298 for (i = 0; i != priv->rxqs_n; ++i) {
1299 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1300 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1301 (rxq, struct mlx5_rxq_ctrl, rxq);
1303 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1307 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1308 dev->data->port_id);
1312 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1313 sizeof(struct rte_vlan_hdr) * 2 + \
1314 sizeof(struct rte_ipv6_hdr)))
1315 #define MAX_TCP_OPTION_SIZE 40u
1316 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1317 sizeof(struct rte_tcp_hdr) + \
1318 MAX_TCP_OPTION_SIZE))
1321 * Adjust the maximum LRO massage size.
1324 * Pointer to Ethernet device.
1327 * @param max_lro_size
1328 * The maximum size for LRO packet.
1331 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1332 uint32_t max_lro_size)
1334 struct mlx5_priv *priv = dev->data->dev_private;
1336 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1337 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1338 MLX5_MAX_TCP_HDR_OFFSET)
1339 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1340 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1341 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1342 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1343 if (priv->max_lro_msg_size)
1344 priv->max_lro_msg_size =
1345 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1347 priv->max_lro_msg_size = max_lro_size;
1349 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1350 dev->data->port_id, idx,
1351 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1355 * Create a DPDK Rx queue.
1358 * Pointer to Ethernet device.
1362 * Number of descriptors to configure in queue.
1364 * NUMA socket on which memory must be allocated.
1367 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1369 struct mlx5_rxq_ctrl *
1370 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1371 unsigned int socket, const struct rte_eth_rxconf *conf,
1372 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1374 struct mlx5_priv *priv = dev->data->dev_private;
1375 struct mlx5_rxq_ctrl *tmpl;
1376 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1377 struct mlx5_dev_config *config = &priv->config;
1378 uint64_t offloads = conf->offloads |
1379 dev->data->dev_conf.rxmode.offloads;
1380 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1381 unsigned int max_rx_pkt_len = lro_on_queue ?
1382 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1383 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1384 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1385 RTE_PKTMBUF_HEADROOM;
1386 unsigned int max_lro_size = 0;
1387 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1388 const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
1389 !rx_seg[0].offset && !rx_seg[0].length;
1390 unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1391 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1392 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1393 (1U << config->mprq.max_stride_size_n) ?
1394 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1395 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1396 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1397 (config->mprq.stride_size_n ?
1398 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1400 * Always allocate extra slots, even if eventually
1401 * the vector Rx will not be used.
1403 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1404 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1405 unsigned int tail_len;
1407 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1408 desc_n * sizeof(struct rte_mbuf *), 0, socket);
1413 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1415 * Build the array of actual buffer offsets and lengths.
1416 * Pad with the buffers from the last memory pool if
1417 * needed to handle max size packets, replace zero length
1418 * with the buffer length from the pool.
1420 tail_len = max_rx_pkt_len;
1422 struct mlx5_eth_rxseg *hw_seg =
1423 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1424 uint32_t buf_len, offset, seg_len;
1427 * For the buffers beyond descriptions offset is zero,
1428 * the first buffer contains head room.
1430 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1431 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1432 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1434 * For the buffers beyond descriptions the length is
1435 * pool buffer length, zero lengths are replaced with
1436 * pool buffer length either.
1438 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1442 /* Check is done in long int, now overflows. */
1443 if (buf_len < seg_len + offset) {
1444 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1445 "%u/%u can't be satisfied",
1446 dev->data->port_id, idx,
1447 qs_seg->length, qs_seg->offset);
1451 if (seg_len > tail_len)
1452 seg_len = buf_len - offset;
1453 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1455 "port %u too many SGEs (%u) needed to handle"
1456 " requested maximum packet size %u, the maximum"
1457 " supported are %u", dev->data->port_id,
1458 tmpl->rxq.rxseg_n, max_rx_pkt_len,
1460 rte_errno = ENOTSUP;
1463 /* Build the actual scattering element in the queue object. */
1464 hw_seg->mp = qs_seg->mp;
1465 MLX5_ASSERT(offset <= UINT16_MAX);
1466 MLX5_ASSERT(seg_len <= UINT16_MAX);
1467 hw_seg->offset = (uint16_t)offset;
1468 hw_seg->length = (uint16_t)seg_len;
1470 * Advance the segment descriptor, the padding is the based
1471 * on the attributes of the last descriptor.
1473 if (tmpl->rxq.rxseg_n < n_seg)
1475 tail_len -= RTE_MIN(tail_len, seg_len);
1476 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1477 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1478 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1479 if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
1480 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1481 " configured and no enough mbuf space(%u) to contain "
1482 "the maximum RX packet length(%u) with head-room(%u)",
1483 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1484 RTE_PKTMBUF_HEADROOM);
1488 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1489 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1490 MLX5_MR_BTREE_CACHE_N, socket)) {
1491 /* rte_errno is already set. */
1494 tmpl->socket = socket;
1495 if (dev->data->dev_conf.intr_conf.rxq)
1498 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1499 * following conditions are met:
1500 * - MPRQ is enabled.
1501 * - The number of descs is more than the number of strides.
1502 * - max_rx_pkt_len plus overhead is less than the max size
1503 * of a stride or mprq_stride_size is specified by a user.
1504 * Need to make sure that there are enough strides to encap
1505 * the maximum packet size in case mprq_stride_size is set.
1506 * Otherwise, enable Rx scatter if necessary.
1508 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1509 (non_scatter_min_mbuf_size <=
1510 (1U << config->mprq.max_stride_size_n) ||
1511 (config->mprq.stride_size_n &&
1512 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1513 /* TODO: Rx scatter isn't supported yet. */
1514 tmpl->rxq.sges_n = 0;
1515 /* Trim the number of descs needed. */
1516 desc >>= mprq_stride_nums;
1517 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1518 config->mprq.stride_num_n : mprq_stride_nums;
1519 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1520 config->mprq.stride_size_n : mprq_stride_size;
1521 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1522 tmpl->rxq.strd_scatter_en =
1523 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1524 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1525 config->mprq.max_memcpy_len);
1526 max_lro_size = RTE_MIN(max_rx_pkt_len,
1527 (1u << tmpl->rxq.strd_num_n) *
1528 (1u << tmpl->rxq.strd_sz_n));
1530 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1531 " strd_num_n = %u, strd_sz_n = %u",
1532 dev->data->port_id, idx,
1533 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1534 } else if (tmpl->rxq.rxseg_n == 1) {
1535 MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
1536 tmpl->rxq.sges_n = 0;
1537 max_lro_size = max_rx_pkt_len;
1538 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1539 unsigned int sges_n;
1541 if (lro_on_queue && first_mb_free_size <
1542 MLX5_MAX_LRO_HEADER_FIX) {
1543 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1544 " to include the max header size(%u) for LRO",
1545 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1546 rte_errno = ENOTSUP;
1550 * Determine the number of SGEs needed for a full packet
1551 * and round it to the next power of two.
1553 sges_n = log2above(tmpl->rxq.rxseg_n);
1554 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1556 "port %u too many SGEs (%u) needed to handle"
1557 " requested maximum packet size %u, the maximum"
1558 " supported are %u", dev->data->port_id,
1559 1 << sges_n, max_rx_pkt_len,
1560 1u << MLX5_MAX_LOG_RQ_SEGS);
1561 rte_errno = ENOTSUP;
1564 tmpl->rxq.sges_n = sges_n;
1565 max_lro_size = max_rx_pkt_len;
1567 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1569 "port %u MPRQ is requested but cannot be enabled\n"
1570 " (requested: pkt_sz = %u, desc_num = %u,"
1571 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1572 " supported: min_rxqs_num = %u,"
1573 " min_stride_sz = %u, max_stride_sz = %u).",
1574 dev->data->port_id, non_scatter_min_mbuf_size,
1576 config->mprq.stride_size_n ?
1577 (1U << config->mprq.stride_size_n) :
1578 (1U << mprq_stride_size),
1579 config->mprq.stride_num_n ?
1580 (1U << config->mprq.stride_num_n) :
1581 (1U << mprq_stride_nums),
1582 config->mprq.min_rxqs_num,
1583 (1U << config->mprq.min_stride_size_n),
1584 (1U << config->mprq.max_stride_size_n));
1585 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1586 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1587 if (desc % (1 << tmpl->rxq.sges_n)) {
1589 "port %u number of Rx queue descriptors (%u) is not a"
1590 " multiple of SGEs per packet (%u)",
1593 1 << tmpl->rxq.sges_n);
1597 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1598 /* Toggle RX checksum offload if hardware supports it. */
1599 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1600 /* Configure Rx timestamp. */
1601 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1602 tmpl->rxq.timestamp_rx_flag = 0;
1603 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1604 &tmpl->rxq.timestamp_offset,
1605 &tmpl->rxq.timestamp_rx_flag) != 0) {
1606 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1609 /* Configure VLAN stripping. */
1610 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1611 /* By default, FCS (CRC) is stripped by hardware. */
1612 tmpl->rxq.crc_present = 0;
1613 tmpl->rxq.lro = lro_on_queue;
1614 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1615 if (config->hw_fcs_strip) {
1617 * RQs used for LRO-enabled TIRs should not be
1618 * configured to scatter the FCS.
1622 "port %u CRC stripping has been "
1623 "disabled but will still be performed "
1624 "by hardware, because LRO is enabled",
1625 dev->data->port_id);
1627 tmpl->rxq.crc_present = 1;
1630 "port %u CRC stripping has been disabled but will"
1631 " still be performed by hardware, make sure MLNX_OFED"
1632 " and firmware are up to date",
1633 dev->data->port_id);
1637 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1638 " incoming frames to hide it",
1640 tmpl->rxq.crc_present ? "disabled" : "enabled",
1641 tmpl->rxq.crc_present << 2);
1643 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1644 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1645 tmpl->rxq.port_id = dev->data->port_id;
1647 tmpl->rxq.mp = rx_seg[0].mp;
1648 tmpl->rxq.elts_n = log2above(desc);
1649 tmpl->rxq.rq_repl_thresh =
1650 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1652 (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1653 tmpl->rxq.mprq_bufs =
1654 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1656 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1658 tmpl->rxq.idx = idx;
1659 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1660 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1668 * Create a DPDK Rx hairpin queue.
1671 * Pointer to Ethernet device.
1675 * Number of descriptors to configure in queue.
1676 * @param hairpin_conf
1677 * The hairpin binding configuration.
1680 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1682 struct mlx5_rxq_ctrl *
1683 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1684 const struct rte_eth_hairpin_conf *hairpin_conf)
1686 struct mlx5_priv *priv = dev->data->dev_private;
1687 struct mlx5_rxq_ctrl *tmpl;
1689 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1695 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1696 tmpl->socket = SOCKET_ID_ANY;
1697 tmpl->rxq.rss_hash = 0;
1698 tmpl->rxq.port_id = dev->data->port_id;
1700 tmpl->rxq.mp = NULL;
1701 tmpl->rxq.elts_n = log2above(desc);
1702 tmpl->rxq.elts = NULL;
1703 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1704 tmpl->hairpin_conf = *hairpin_conf;
1705 tmpl->rxq.idx = idx;
1706 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1707 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1715 * Pointer to Ethernet device.
1720 * A pointer to the queue if it exists, NULL otherwise.
1722 struct mlx5_rxq_ctrl *
1723 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1725 struct mlx5_priv *priv = dev->data->dev_private;
1726 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1727 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1730 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1731 __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
1737 * Release a Rx queue.
1740 * Pointer to Ethernet device.
1745 * 1 while a reference on it exists, 0 when freed.
1748 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1750 struct mlx5_priv *priv = dev->data->dev_private;
1751 struct mlx5_rxq_ctrl *rxq_ctrl;
1753 if (!(*priv->rxqs)[idx])
1755 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1756 if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1758 if (rxq_ctrl->obj) {
1759 priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
1760 LIST_REMOVE(rxq_ctrl->obj, next);
1761 mlx5_free(rxq_ctrl->obj);
1762 rxq_ctrl->obj = NULL;
1764 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1765 rxq_free_elts(rxq_ctrl);
1766 if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1767 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1768 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1769 LIST_REMOVE(rxq_ctrl, next);
1770 mlx5_free(rxq_ctrl);
1771 (*priv->rxqs)[idx] = NULL;
1777 * Verify the Rx Queue list is empty
1780 * Pointer to Ethernet device.
1783 * The number of object not released.
1786 mlx5_rxq_verify(struct rte_eth_dev *dev)
1788 struct mlx5_priv *priv = dev->data->dev_private;
1789 struct mlx5_rxq_ctrl *rxq_ctrl;
1792 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1793 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1794 dev->data->port_id, rxq_ctrl->rxq.idx);
1801 * Get a Rx queue type.
1804 * Pointer to Ethernet device.
1809 * The Rx queue type.
1812 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1814 struct mlx5_priv *priv = dev->data->dev_private;
1815 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1817 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1818 rxq_ctrl = container_of((*priv->rxqs)[idx],
1819 struct mlx5_rxq_ctrl,
1821 return rxq_ctrl->type;
1823 return MLX5_RXQ_TYPE_UNDEFINED;
1827 * Match queues listed in arguments to queues contained in indirection table
1831 * Pointer to indirection table to match.
1833 * Queues to match to ques in indirection table.
1835 * Number of queues in the array.
1838 * 1 if all queues in indirection table match 0 othrwise.
1841 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
1842 const uint16_t *queues, uint32_t queues_n)
1844 return (ind_tbl->queues_n == queues_n) &&
1845 (!memcmp(ind_tbl->queues, queues,
1846 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
1850 * Get an indirection table.
1853 * Pointer to Ethernet device.
1855 * Queues entering in the indirection table.
1857 * Number of queues in the array.
1860 * An indirection table if found.
1862 struct mlx5_ind_table_obj *
1863 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1866 struct mlx5_priv *priv = dev->data->dev_private;
1867 struct mlx5_ind_table_obj *ind_tbl;
1869 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1870 if ((ind_tbl->queues_n == queues_n) &&
1871 (memcmp(ind_tbl->queues, queues,
1872 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1879 rte_atomic32_inc(&ind_tbl->refcnt);
1880 for (i = 0; i != ind_tbl->queues_n; ++i)
1881 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1887 * Release an indirection table.
1890 * Pointer to Ethernet device.
1892 * Indirection table to release.
1895 * 1 while a reference on it exists, 0 when freed.
1898 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1899 struct mlx5_ind_table_obj *ind_tbl)
1901 struct mlx5_priv *priv = dev->data->dev_private;
1904 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1905 priv->obj_ops.ind_table_destroy(ind_tbl);
1906 for (i = 0; i != ind_tbl->queues_n; ++i)
1907 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1908 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1909 LIST_REMOVE(ind_tbl, next);
1917 * Verify the Rx Queue list is empty
1920 * Pointer to Ethernet device.
1923 * The number of object not released.
1926 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1928 struct mlx5_priv *priv = dev->data->dev_private;
1929 struct mlx5_ind_table_obj *ind_tbl;
1932 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1934 "port %u indirection table obj %p still referenced",
1935 dev->data->port_id, (void *)ind_tbl);
1942 * Create an indirection table.
1945 * Pointer to Ethernet device.
1947 * Queues entering in the indirection table.
1949 * Number of queues in the array.
1952 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
1954 static struct mlx5_ind_table_obj *
1955 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1958 struct mlx5_priv *priv = dev->data->dev_private;
1959 struct mlx5_ind_table_obj *ind_tbl;
1960 const unsigned int n = rte_is_power_of_2(queues_n) ?
1961 log2above(queues_n) :
1962 log2above(priv->config.ind_table_max_size);
1966 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
1967 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
1972 ind_tbl->queues_n = queues_n;
1973 for (i = 0; i != queues_n; ++i) {
1974 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1977 ind_tbl->queues[i] = queues[i];
1979 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
1982 rte_atomic32_inc(&ind_tbl->refcnt);
1983 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1987 for (j = 0; j < i; j++)
1988 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1991 DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
1996 * Get an Rx Hash queue.
1999 * Pointer to Ethernet device.
2001 * RSS configuration for the Rx hash queue.
2003 * Queues entering in hash queue. In case of empty hash_fields only the
2004 * first queue index will be taken for the indirection table.
2009 * An hash Rx queue index on success.
2012 mlx5_hrxq_get(struct rte_eth_dev *dev,
2013 const uint8_t *rss_key, uint32_t rss_key_len,
2014 uint64_t hash_fields,
2015 const uint16_t *queues, uint32_t queues_n)
2017 struct mlx5_priv *priv = dev->data->dev_private;
2018 struct mlx5_hrxq *hrxq;
2021 queues_n = hash_fields ? queues_n : 1;
2022 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2024 struct mlx5_ind_table_obj *ind_tbl;
2028 if (hrxq->rss_key_len != rss_key_len)
2030 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2032 if (hrxq->hash_fields != hash_fields)
2034 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2037 if (ind_tbl != hrxq->ind_table) {
2038 mlx5_ind_table_obj_release(dev, ind_tbl);
2041 rte_atomic32_inc(&hrxq->refcnt);
2048 * Modify an Rx Hash queue configuration.
2051 * Pointer to Ethernet device.
2053 * Index to Hash Rx queue to modify.
2055 * RSS key for the Rx hash queue.
2056 * @param rss_key_len
2058 * @param hash_fields
2059 * Verbs protocol hash field to make the RSS on.
2061 * Queues entering in hash queue. In case of empty hash_fields only the
2062 * first queue index will be taken for the indirection table.
2067 * 0 on success, a negative errno value otherwise and rte_errno is set.
2070 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2071 const uint8_t *rss_key, uint32_t rss_key_len,
2072 uint64_t hash_fields,
2073 const uint16_t *queues, uint32_t queues_n)
2076 struct mlx5_ind_table_obj *ind_tbl = NULL;
2077 struct mlx5_priv *priv = dev->data->dev_private;
2078 struct mlx5_hrxq *hrxq =
2079 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2087 if (hrxq->rss_key_len != rss_key_len) {
2088 /* rss_key_len is fixed size 40 byte & not supposed to change */
2092 queues_n = hash_fields ? queues_n : 1;
2093 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2094 queues, queues_n)) {
2095 ind_tbl = hrxq->ind_table;
2097 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2099 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
2105 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2106 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2107 hash_fields, ind_tbl);
2112 if (ind_tbl != hrxq->ind_table) {
2113 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2114 hrxq->ind_table = ind_tbl;
2116 hrxq->hash_fields = hash_fields;
2117 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2121 if (ind_tbl != hrxq->ind_table)
2122 mlx5_ind_table_obj_release(dev, ind_tbl);
2128 * Release the hash Rx queue.
2131 * Pointer to Ethernet device.
2133 * Index to Hash Rx queue to release.
2136 * 1 while a reference on it exists, 0 when freed.
2139 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2141 struct mlx5_priv *priv = dev->data->dev_private;
2142 struct mlx5_hrxq *hrxq;
2144 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2147 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2148 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2149 mlx5_glue->destroy_flow_action(hrxq->action);
2151 priv->obj_ops.hrxq_destroy(hrxq);
2152 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2153 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
2154 hrxq_idx, hrxq, next);
2155 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2158 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2163 * Create an Rx Hash queue.
2166 * Pointer to Ethernet device.
2168 * RSS key for the Rx hash queue.
2169 * @param rss_key_len
2171 * @param hash_fields
2172 * Verbs protocol hash field to make the RSS on.
2174 * Queues entering in hash queue. In case of empty hash_fields only the
2175 * first queue index will be taken for the indirection table.
2181 * If true new object of Rx Hash queue will be used in shared action.
2184 * The DevX object initialized index, 0 otherwise and rte_errno is set.
2187 mlx5_hrxq_new(struct rte_eth_dev *dev,
2188 const uint8_t *rss_key, uint32_t rss_key_len,
2189 uint64_t hash_fields,
2190 const uint16_t *queues, uint32_t queues_n,
2191 int tunnel, bool shared)
2193 struct mlx5_priv *priv = dev->data->dev_private;
2194 struct mlx5_hrxq *hrxq = NULL;
2195 uint32_t hrxq_idx = 0;
2196 struct mlx5_ind_table_obj *ind_tbl;
2199 queues_n = hash_fields ? queues_n : 1;
2200 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2202 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
2207 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2210 hrxq->shared = !!shared;
2211 hrxq->ind_table = ind_tbl;
2212 hrxq->rss_key_len = rss_key_len;
2213 hrxq->hash_fields = hash_fields;
2214 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2215 ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
2220 rte_atomic32_inc(&hrxq->refcnt);
2221 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2225 ret = rte_errno; /* Save rte_errno before cleanup. */
2226 mlx5_ind_table_obj_release(dev, ind_tbl);
2228 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2229 rte_errno = ret; /* Restore rte_errno. */
2234 * Create a drop Rx Hash queue.
2237 * Pointer to Ethernet device.
2240 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2243 mlx5_drop_action_create(struct rte_eth_dev *dev)
2245 struct mlx5_priv *priv = dev->data->dev_private;
2246 struct mlx5_hrxq *hrxq = NULL;
2249 if (priv->drop_queue.hrxq) {
2250 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2251 return priv->drop_queue.hrxq;
2253 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2256 "Port %u cannot allocate memory for drop queue.",
2257 dev->data->port_id);
2261 priv->drop_queue.hrxq = hrxq;
2262 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2264 if (!hrxq->ind_table) {
2268 ret = priv->obj_ops.drop_action_create(dev);
2271 rte_atomic32_set(&hrxq->refcnt, 1);
2275 if (hrxq->ind_table)
2276 mlx5_free(hrxq->ind_table);
2277 priv->drop_queue.hrxq = NULL;
2284 * Release a drop hash Rx queue.
2287 * Pointer to Ethernet device.
2290 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2292 struct mlx5_priv *priv = dev->data->dev_private;
2293 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2295 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2296 priv->obj_ops.drop_action_destroy(dev);
2297 mlx5_free(priv->drop_queue.rxq);
2298 mlx5_free(hrxq->ind_table);
2300 priv->drop_queue.rxq = NULL;
2301 priv->drop_queue.hrxq = NULL;
2306 * Verify the Rx Queue list is empty
2309 * Pointer to Ethernet device.
2312 * The number of object not released.
2315 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2317 struct mlx5_priv *priv = dev->data->dev_private;
2318 struct mlx5_hrxq *hrxq;
2322 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2325 "port %u hash Rx queue %p still referenced",
2326 dev->data->port_id, (void *)hrxq);
2333 * Set the Rx queue timestamp conversion parameters
2336 * Pointer to the Ethernet device structure.
2339 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2341 struct mlx5_priv *priv = dev->data->dev_private;
2342 struct mlx5_dev_ctx_shared *sh = priv->sh;
2343 struct mlx5_rxq_data *data;
2346 for (i = 0; i != priv->rxqs_n; ++i) {
2347 if (!(*priv->rxqs)[i])
2349 data = (*priv->rxqs)[i];
2351 data->rt_timestamp = priv->config.rt_timestamp;