1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
24 #include <mlx5_common.h>
25 #include <mlx5_common_mr.h>
27 #include "mlx5_defs.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
32 #include "mlx5_devx.h"
35 /* Default RSS hash key also used for ConnectX-3. */
36 uint8_t rss_hash_default_key[] = {
37 0x2c, 0xc6, 0x81, 0xd1,
38 0x5b, 0xdb, 0xf4, 0xf7,
39 0xfc, 0xa2, 0x83, 0x19,
40 0xdb, 0x1a, 0x3e, 0x94,
41 0x6b, 0x9e, 0x38, 0xd9,
42 0x2c, 0x9c, 0x03, 0xd1,
43 0xad, 0x99, 0x44, 0xa7,
44 0xd9, 0x56, 0x3d, 0x59,
45 0x06, 0x3c, 0x25, 0xf3,
46 0xfc, 0x1f, 0xdc, 0x2a,
49 /* Length of the default RSS hash key. */
50 static_assert(MLX5_RSS_HASH_KEY_LEN ==
51 (unsigned int)sizeof(rss_hash_default_key),
52 "wrong RSS default key size.");
55 * Calculate the number of CQEs in CQ for the Rx queue.
58 * Pointer to receive queue structure.
61 * Number of CQEs in CQ.
64 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
67 unsigned int wqe_n = 1 << rxq_data->elts_n;
69 if (mlx5_rxq_mprq_enabled(rxq_data))
70 cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
77 * Allocate RX queue elements for Multi-Packet RQ.
80 * Pointer to RX queue structure.
83 * 0 on success, a negative errno value otherwise and rte_errno is set.
86 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
88 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
89 unsigned int wqe_n = 1 << rxq->elts_n;
93 /* Iterate on segments. */
94 for (i = 0; i <= wqe_n; ++i) {
95 struct mlx5_mprq_buf *buf;
97 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
98 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
103 (*rxq->mprq_bufs)[i] = buf;
105 rxq->mprq_repl = buf;
108 "port %u MPRQ queue %u allocated and configured %u segments",
109 rxq->port_id, rxq->idx, wqe_n);
112 err = rte_errno; /* Save rte_errno before cleanup. */
114 for (i = 0; (i != wqe_n); ++i) {
115 if ((*rxq->mprq_bufs)[i] != NULL)
116 rte_mempool_put(rxq->mprq_mp,
117 (*rxq->mprq_bufs)[i]);
118 (*rxq->mprq_bufs)[i] = NULL;
120 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
121 rxq->port_id, rxq->idx);
122 rte_errno = err; /* Restore rte_errno. */
127 * Allocate RX queue elements for Single-Packet RQ.
130 * Pointer to RX queue structure.
133 * 0 on success, negative errno value on failure.
136 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
138 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
139 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
140 RTE_BIT32(rxq_ctrl->rxq.elts_n) *
141 RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
142 RTE_BIT32(rxq_ctrl->rxq.elts_n);
143 bool has_vec_support = mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0;
147 /* Iterate on segments. */
148 for (i = 0; (i != elts_n); ++i) {
149 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
150 struct rte_mbuf *buf;
152 buf = rte_pktmbuf_alloc(seg->mp);
154 if (rxq_ctrl->share_group == 0)
155 DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
156 RXQ_PORT_ID(rxq_ctrl),
159 DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
160 rxq_ctrl->share_group,
161 rxq_ctrl->share_qid);
165 /* Only vectored Rx routines rely on headroom size. */
166 MLX5_ASSERT(!has_vec_support ||
167 DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
168 /* Buffer is supposed to be empty. */
169 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
170 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
171 MLX5_ASSERT(!buf->next);
172 SET_DATA_OFF(buf, seg->offset);
173 PORT(buf) = rxq_ctrl->rxq.port_id;
174 DATA_LEN(buf) = seg->length;
175 PKT_LEN(buf) = seg->length;
177 (*rxq_ctrl->rxq.elts)[i] = buf;
179 /* If Rx vector is activated. */
180 if (has_vec_support) {
181 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
182 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
183 struct rte_pktmbuf_pool_private *priv =
184 (struct rte_pktmbuf_pool_private *)
185 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
188 /* Initialize default rearm_data for vPMD. */
189 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
190 rte_mbuf_refcnt_set(mbuf_init, 1);
191 mbuf_init->nb_segs = 1;
192 /* For shared queues port is provided in CQE */
193 mbuf_init->port = rxq->shared ? 0 : rxq->port_id;
194 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
195 mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
197 * prevent compiler reordering:
198 * rearm_data covers previous fields.
200 rte_compiler_barrier();
201 rxq->mbuf_initializer =
202 *(rte_xmm_t *)&mbuf_init->rearm_data;
203 /* Padding with a fake mbuf for vectorized Rx. */
204 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
205 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
207 if (rxq_ctrl->share_group == 0)
209 "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
210 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
211 elts_n / (1 << rxq_ctrl->rxq.sges_n));
214 "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
215 rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
216 elts_n / (1 << rxq_ctrl->rxq.sges_n));
219 err = rte_errno; /* Save rte_errno before cleanup. */
221 for (i = 0; (i != elts_n); ++i) {
222 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
223 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
224 (*rxq_ctrl->rxq.elts)[i] = NULL;
226 if (rxq_ctrl->share_group == 0)
227 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
228 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
230 DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
231 rxq_ctrl->share_group, rxq_ctrl->share_qid);
232 rte_errno = err; /* Restore rte_errno. */
237 * Allocate RX queue elements.
240 * Pointer to RX queue structure.
243 * 0 on success, negative errno value on failure.
246 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
251 * For MPRQ we need to allocate both MPRQ buffers
252 * for WQEs and simple mbufs for vector processing.
254 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
255 ret = rxq_alloc_elts_mprq(rxq_ctrl);
257 ret = rxq_alloc_elts_sprq(rxq_ctrl);
262 * Free RX queue elements for Multi-Packet RQ.
265 * Pointer to RX queue structure.
268 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
270 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
273 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
274 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
275 if (rxq->mprq_bufs == NULL)
277 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
278 if ((*rxq->mprq_bufs)[i] != NULL)
279 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
280 (*rxq->mprq_bufs)[i] = NULL;
282 if (rxq->mprq_repl != NULL) {
283 mlx5_mprq_buf_free(rxq->mprq_repl);
284 rxq->mprq_repl = NULL;
289 * Free RX queue elements for Single-Packet RQ.
292 * Pointer to RX queue structure.
295 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
297 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
298 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
299 RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
300 RTE_BIT32(rxq->elts_n);
301 const uint16_t q_mask = q_n - 1;
302 uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
303 rxq->elts_ci : rxq->rq_ci;
304 uint16_t used = q_n - (elts_ci - rxq->rq_pi);
307 if (rxq_ctrl->share_group == 0)
308 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
309 RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
311 DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
312 rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
313 if (rxq->elts == NULL)
316 * Some mbuf in the Ring belongs to the application.
317 * They cannot be freed.
319 if (mlx5_rxq_check_vec_support(rxq) > 0) {
320 for (i = 0; i < used; ++i)
321 (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
322 rxq->rq_pi = elts_ci;
324 for (i = 0; i != q_n; ++i) {
325 if ((*rxq->elts)[i] != NULL)
326 rte_pktmbuf_free_seg((*rxq->elts)[i]);
327 (*rxq->elts)[i] = NULL;
332 * Free RX queue elements.
335 * Pointer to RX queue structure.
338 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
341 * For MPRQ we need to allocate both MPRQ buffers
342 * for WQEs and simple mbufs for vector processing.
344 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
345 rxq_free_elts_mprq(rxq_ctrl);
346 rxq_free_elts_sprq(rxq_ctrl);
350 * Returns the per-queue supported offloads.
353 * Pointer to Ethernet device.
356 * Supported Rx offloads.
359 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
361 struct mlx5_priv *priv = dev->data->dev_private;
362 uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
363 RTE_ETH_RX_OFFLOAD_TIMESTAMP |
364 RTE_ETH_RX_OFFLOAD_RSS_HASH);
366 if (!priv->config.mprq.enabled)
367 offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
368 if (priv->sh->config.hw_fcs_strip)
369 offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
370 if (priv->sh->dev_cap.hw_csum)
371 offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
372 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
373 RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
374 if (priv->sh->dev_cap.hw_vlan_strip)
375 offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
376 if (priv->sh->dev_cap.lro_supported)
377 offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
383 * Returns the per-port supported offloads.
386 * Supported Rx offloads.
389 mlx5_get_rx_port_offloads(void)
391 uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
397 * Verify if the queue can be released.
400 * Pointer to Ethernet device.
405 * 1 if the queue can be released
406 * 0 if the queue can not be released, there are references to it.
407 * Negative errno and rte_errno is set if queue doesn't exist.
410 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
412 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
418 return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
421 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
423 rxq_sync_cq(struct mlx5_rxq_data *rxq)
425 const uint16_t cqe_n = 1 << rxq->cqe_n;
426 const uint16_t cqe_mask = cqe_n - 1;
427 volatile struct mlx5_cqe *cqe;
432 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
433 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
434 if (ret == MLX5_CQE_STATUS_HW_OWN)
436 if (ret == MLX5_CQE_STATUS_ERR) {
440 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
441 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
445 /* Compute the next non compressed CQE. */
446 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
449 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
450 for (i = 0; i < cqe_n; i++) {
451 cqe = &(*rxq->cqes)[i];
452 cqe->op_own = MLX5_CQE_INVALIDATE;
454 /* Resync CQE and WQE (WQ in RESET state). */
456 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
458 *rxq->rq_db = rte_cpu_to_be_32(0);
463 * Rx queue stop. Device queue goes to the RESET state,
464 * all involved mbufs are freed from WQ.
467 * Pointer to Ethernet device structure.
472 * 0 on success, a negative errno value otherwise and rte_errno is set.
475 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
477 struct mlx5_priv *priv = dev->data->dev_private;
478 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
479 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
482 MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
483 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
484 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
486 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
491 /* Remove all processes CQEs. */
492 rxq_sync_cq(&rxq_ctrl->rxq);
493 /* Free all involved mbufs. */
494 rxq_free_elts(rxq_ctrl);
495 /* Set the actual queue state. */
496 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
501 * Rx queue stop. Device queue goes to the RESET state,
502 * all involved mbufs are freed from WQ.
505 * Pointer to Ethernet device structure.
510 * 0 on success, a negative errno value otherwise and rte_errno is set.
513 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
515 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
518 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
519 DRV_LOG(ERR, "Hairpin queue can't be stopped");
523 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
526 * Vectorized Rx burst requires the CQ and RQ indices
527 * synchronized, that might be broken on RQ restart
528 * and cause Rx malfunction, so queue stopping is
529 * not supported if vectorized Rx burst is engaged.
530 * The routine pointer depends on the process
531 * type, should perform check there.
533 if (pkt_burst == mlx5_rx_burst_vec) {
534 DRV_LOG(ERR, "Rx queue stop is not supported "
535 "for vectorized Rx");
539 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
540 ret = mlx5_mp_os_req_queue_control(dev, idx,
541 MLX5_MP_REQ_QUEUE_RX_STOP);
543 ret = mlx5_rx_queue_stop_primary(dev, idx);
549 * Rx queue start. Device queue goes to the ready state,
550 * all required mbufs are allocated and WQ is replenished.
553 * Pointer to Ethernet device structure.
558 * 0 on success, a negative errno value otherwise and rte_errno is set.
561 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
563 struct mlx5_priv *priv = dev->data->dev_private;
564 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
565 struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
568 MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
569 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
570 /* Allocate needed buffers. */
571 ret = rxq_alloc_elts(rxq->ctrl);
573 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
578 *rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
580 /* Reset RQ consumer before moving queue to READY state. */
581 *rxq_data->rq_db = rte_cpu_to_be_32(0);
583 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
585 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
590 /* Reinitialize RQ - set WQEs. */
591 mlx5_rxq_initialize(rxq_data);
592 rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
593 /* Set actual queue state. */
594 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
599 * Rx queue start. Device queue goes to the ready state,
600 * all required mbufs are allocated and WQ is replenished.
603 * Pointer to Ethernet device structure.
608 * 0 on success, a negative errno value otherwise and rte_errno is set.
611 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
615 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
616 DRV_LOG(ERR, "Hairpin queue can't be started");
620 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
622 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
623 ret = mlx5_mp_os_req_queue_control(dev, idx,
624 MLX5_MP_REQ_QUEUE_RX_START);
626 ret = mlx5_rx_queue_start_primary(dev, idx);
632 * Rx queue presetup checks.
635 * Pointer to Ethernet device structure.
639 * Number of descriptors to configure in queue.
640 * @param[out] rxq_ctrl
641 * Address of pointer to shared Rx queue control.
644 * 0 on success, a negative errno value otherwise and rte_errno is set.
647 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc,
648 struct mlx5_rxq_ctrl **rxq_ctrl)
650 struct mlx5_priv *priv = dev->data->dev_private;
651 struct mlx5_rxq_priv *rxq;
654 if (!rte_is_power_of_2(*desc)) {
655 *desc = 1 << log2above(*desc);
657 "port %u increased number of descriptors in Rx queue %u"
658 " to the next power of two (%d)",
659 dev->data->port_id, idx, *desc);
661 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
662 dev->data->port_id, idx, *desc);
663 if (idx >= priv->rxqs_n) {
664 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
665 dev->data->port_id, idx, priv->rxqs_n);
666 rte_errno = EOVERFLOW;
669 if (rxq_ctrl == NULL || *rxq_ctrl == NULL)
671 if (!(*rxq_ctrl)->rxq.shared) {
672 if (!mlx5_rxq_releasable(dev, idx)) {
673 DRV_LOG(ERR, "port %u unable to release queue index %u",
674 dev->data->port_id, idx);
678 mlx5_rxq_release(dev, idx);
680 if ((*rxq_ctrl)->obj != NULL)
681 /* Some port using shared Rx queue has been started. */
683 /* Release all owner RxQ to reconfigure Shared RxQ. */
685 rxq = LIST_FIRST(&(*rxq_ctrl)->owners);
686 LIST_REMOVE(rxq, owner_entry);
687 empty = LIST_EMPTY(&(*rxq_ctrl)->owners);
688 mlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx);
696 * Get the shared Rx queue object that matches group and queue index.
699 * Pointer to Ethernet device structure.
703 * Shared RX queue index.
706 * Shared RXQ object that matching, or NULL if not found.
708 static struct mlx5_rxq_ctrl *
709 mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid)
711 struct mlx5_rxq_ctrl *rxq_ctrl;
712 struct mlx5_priv *priv = dev->data->dev_private;
714 LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
715 if (rxq_ctrl->share_group == group &&
716 rxq_ctrl->share_qid == share_qid)
723 * Check whether requested Rx queue configuration matches shared RXQ.
726 * Pointer to shared RXQ.
728 * Pointer to Ethernet device structure.
732 * Number of descriptors to configure in queue.
734 * NUMA socket on which memory must be allocated.
736 * Thresholds parameters.
738 * Memory pool for buffer allocations.
741 * 0 on success, a negative errno value otherwise and rte_errno is set.
744 mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
745 uint16_t idx, uint16_t desc, unsigned int socket,
746 const struct rte_eth_rxconf *conf,
747 struct rte_mempool *mp)
749 struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
750 struct mlx5_priv *priv = dev->data->dev_private;
754 if (rxq_ctrl->socket != socket) {
755 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
756 dev->data->port_id, idx);
759 if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
760 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
761 dev->data->port_id, idx);
764 if (priv->mtu != spriv->mtu) {
765 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
766 dev->data->port_id, idx);
769 if (priv->dev_data->dev_conf.intr_conf.rxq !=
770 spriv->dev_data->dev_conf.intr_conf.rxq) {
771 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
772 dev->data->port_id, idx);
775 if (mp != NULL && rxq_ctrl->rxq.mp != mp) {
776 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
777 dev->data->port_id, idx);
779 } else if (mp == NULL) {
780 if (conf->rx_nseg != rxq_ctrl->rxseg_n) {
781 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch",
782 dev->data->port_id, idx);
785 for (i = 0; i < conf->rx_nseg; i++) {
786 if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i],
787 sizeof(struct rte_eth_rxseg_split))) {
788 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
789 dev->data->port_id, idx, i);
794 if (priv->config.hw_padding != spriv->config.hw_padding) {
795 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
796 dev->data->port_id, idx);
799 if (priv->config.cqe_comp != spriv->config.cqe_comp ||
800 (priv->config.cqe_comp &&
801 priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
802 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
803 dev->data->port_id, idx);
812 * Pointer to Ethernet device structure.
816 * Number of descriptors to configure in queue.
818 * NUMA socket on which memory must be allocated.
820 * Thresholds parameters.
822 * Memory pool for buffer allocations.
825 * 0 on success, a negative errno value otherwise and rte_errno is set.
828 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
829 unsigned int socket, const struct rte_eth_rxconf *conf,
830 struct rte_mempool *mp)
832 struct mlx5_priv *priv = dev->data->dev_private;
833 struct mlx5_rxq_priv *rxq;
834 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
835 struct rte_eth_rxseg_split *rx_seg =
836 (struct rte_eth_rxseg_split *)conf->rx_seg;
837 struct rte_eth_rxseg_split rx_single = {.mp = mp};
838 uint16_t n_seg = conf->rx_nseg;
840 uint64_t offloads = conf->offloads |
841 dev->data->dev_conf.rxmode.offloads;
845 * The parameters should be checked on rte_eth_dev layer.
846 * If mp is specified it means the compatible configuration
847 * without buffer split feature tuning.
853 /* The offloads should be checked on rte_eth_dev layer. */
854 MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
855 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
856 DRV_LOG(ERR, "port %u queue index %u split "
857 "offload not configured",
858 dev->data->port_id, idx);
862 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
864 if (conf->share_group > 0) {
865 if (!priv->sh->cdev->config.hca_attr.mem_rq_rmp) {
866 DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
867 dev->data->port_id, idx);
871 if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
872 DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
873 dev->data->port_id, idx);
877 if (conf->share_qid >= priv->rxqs_n) {
878 DRV_LOG(ERR, "port %u shared Rx queue index %u > number of Rx queues %u",
879 dev->data->port_id, conf->share_qid,
884 if (priv->config.mprq.enabled) {
885 DRV_LOG(ERR, "port %u shared Rx queue index %u: not supported when MPRQ enabled",
886 dev->data->port_id, conf->share_qid);
890 /* Try to reuse shared RXQ. */
891 rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group,
893 if (rxq_ctrl != NULL &&
894 !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
900 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);
904 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
907 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
908 dev->data->port_id, idx);
914 (*priv->rxq_privs)[idx] = rxq;
915 if (rxq_ctrl != NULL) {
916 /* Join owner list. */
917 LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
918 rxq->ctrl = rxq_ctrl;
920 rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
922 if (rxq_ctrl == NULL) {
923 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
924 dev->data->port_id, idx);
926 (*priv->rxq_privs)[idx] = NULL;
931 mlx5_rxq_ref(dev, idx);
932 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
933 dev->data->port_id, idx);
934 dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
941 * Pointer to Ethernet device structure.
945 * Number of descriptors to configure in queue.
946 * @param hairpin_conf
947 * Hairpin configuration parameters.
950 * 0 on success, a negative errno value otherwise and rte_errno is set.
953 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
955 const struct rte_eth_hairpin_conf *hairpin_conf)
957 struct mlx5_priv *priv = dev->data->dev_private;
958 struct mlx5_rxq_priv *rxq;
959 struct mlx5_rxq_ctrl *rxq_ctrl;
962 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL);
965 if (hairpin_conf->peer_count != 1) {
967 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
968 " peer count is %u", dev->data->port_id,
969 idx, hairpin_conf->peer_count);
972 if (hairpin_conf->peers[0].port == dev->data->port_id) {
973 if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
975 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
976 " index %u, Tx %u is larger than %u",
977 dev->data->port_id, idx,
978 hairpin_conf->peers[0].queue, priv->txqs_n);
982 if (hairpin_conf->manual_bind == 0 ||
983 hairpin_conf->tx_explicit == 0) {
985 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
986 " index %u peer port %u with attributes %u %u",
987 dev->data->port_id, idx,
988 hairpin_conf->peers[0].port,
989 hairpin_conf->manual_bind,
990 hairpin_conf->tx_explicit);
994 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
997 DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
998 dev->data->port_id, idx);
1004 (*priv->rxq_privs)[idx] = rxq;
1005 rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
1007 DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
1008 dev->data->port_id, idx);
1010 (*priv->rxq_privs)[idx] = NULL;
1014 DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
1015 dev->data->port_id, idx);
1016 dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
1021 * DPDK callback to release a RX queue.
1024 * Pointer to Ethernet device structure.
1026 * Receive queue index.
1029 mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1031 struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
1035 if (!mlx5_rxq_releasable(dev, qid))
1036 rte_panic("port %u Rx queue %u is still used by a flow and"
1037 " cannot be removed\n", dev->data->port_id, qid);
1038 mlx5_rxq_release(dev, qid);
1042 * Allocate queue vector and fill epoll fd list for Rx interrupts.
1045 * Pointer to Ethernet device.
1048 * 0 on success, a negative errno value otherwise and rte_errno is set.
1051 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
1053 struct mlx5_priv *priv = dev->data->dev_private;
1055 unsigned int rxqs_n = priv->rxqs_n;
1056 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1057 unsigned int count = 0;
1058 struct rte_intr_handle *intr_handle = dev->intr_handle;
1060 if (!dev->data->dev_conf.intr_conf.rxq)
1062 mlx5_rx_intr_vec_disable(dev);
1063 if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
1065 "port %u failed to allocate memory for interrupt"
1066 " vector, Rx interrupts will not be supported",
1067 dev->data->port_id);
1072 if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
1075 for (i = 0; i != n; ++i) {
1076 /* This rxq obj must not be released in this function. */
1077 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1078 struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
1081 /* Skip queues that cannot request interrupts. */
1082 if (!rxq_obj || (!rxq_obj->ibv_channel &&
1083 !rxq_obj->devx_channel)) {
1084 /* Use invalid intr_vec[] index to disable entry. */
1085 if (rte_intr_vec_list_index_set(intr_handle, i,
1086 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
1090 mlx5_rxq_ref(dev, i);
1091 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1093 "port %u too many Rx queues for interrupt"
1094 " vector size (%d), Rx interrupts cannot be"
1096 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
1097 mlx5_rx_intr_vec_disable(dev);
1101 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
1105 "port %u failed to make Rx interrupt file"
1106 " descriptor %d non-blocking for queue index"
1108 dev->data->port_id, rxq_obj->fd, i);
1109 mlx5_rx_intr_vec_disable(dev);
1113 if (rte_intr_vec_list_index_set(intr_handle, i,
1114 RTE_INTR_VEC_RXTX_OFFSET + count))
1116 if (rte_intr_efds_index_set(intr_handle, count,
1122 mlx5_rx_intr_vec_disable(dev);
1123 else if (rte_intr_nb_efd_set(intr_handle, count))
1129 * Clean up Rx interrupts handler.
1132 * Pointer to Ethernet device.
1135 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
1137 struct mlx5_priv *priv = dev->data->dev_private;
1138 struct rte_intr_handle *intr_handle = dev->intr_handle;
1140 unsigned int rxqs_n = priv->rxqs_n;
1141 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1143 if (!dev->data->dev_conf.intr_conf.rxq)
1145 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
1147 for (i = 0; i != n; ++i) {
1148 if (rte_intr_vec_list_index_get(intr_handle, i) ==
1149 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
1152 * Need to access directly the queue to release the reference
1153 * kept in mlx5_rx_intr_vec_enable().
1155 mlx5_rxq_deref(dev, i);
1158 rte_intr_free_epoll_fd(intr_handle);
1160 rte_intr_vec_list_free(intr_handle);
1162 rte_intr_nb_efd_set(intr_handle, 0);
1166 * MLX5 CQ notification .
1169 * Pointer to receive queue structure.
1171 * Sequence number per receive queue .
1174 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1177 uint32_t doorbell_hi;
1180 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1181 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1182 doorbell = (uint64_t)doorbell_hi << 32;
1183 doorbell |= rxq->cqn;
1184 mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell),
1185 doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0);
1189 * DPDK callback for Rx queue interrupt enable.
1192 * Pointer to Ethernet device structure.
1193 * @param rx_queue_id
1197 * 0 on success, a negative errno value otherwise and rte_errno is set.
1200 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1202 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1205 if (rxq->ctrl->irq) {
1206 if (!rxq->ctrl->obj)
1208 mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
1217 * DPDK callback for Rx queue interrupt disable.
1220 * Pointer to Ethernet device structure.
1221 * @param rx_queue_id
1225 * 0 on success, a negative errno value otherwise and rte_errno is set.
1228 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1230 struct mlx5_priv *priv = dev->data->dev_private;
1231 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
1238 if (!rxq->ctrl->obj)
1240 if (rxq->ctrl->irq) {
1241 ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
1244 rxq->ctrl->rxq.cq_arm_sn++;
1249 * The ret variable may be EAGAIN which means the get_event function was
1250 * called before receiving one.
1256 if (rte_errno != EAGAIN)
1257 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1258 dev->data->port_id, rx_queue_id);
1263 * Verify the Rx queue objects list is empty
1266 * Pointer to Ethernet device.
1269 * The number of objects not released.
1272 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1274 struct mlx5_priv *priv = dev->data->dev_private;
1276 struct mlx5_rxq_obj *rxq_obj;
1278 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1279 if (rxq_obj->rxq_ctrl == NULL)
1281 if (rxq_obj->rxq_ctrl->rxq.shared &&
1282 !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
1284 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1285 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1292 * Callback function to initialize mbufs for Multi-Packet RQ.
1295 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1296 void *_m, unsigned int i __rte_unused)
1298 struct mlx5_mprq_buf *buf = _m;
1299 struct rte_mbuf_ext_shared_info *shinfo;
1300 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1303 memset(_m, 0, sizeof(*buf));
1305 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1306 for (j = 0; j != strd_n; ++j) {
1307 shinfo = &buf->shinfos[j];
1308 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1309 shinfo->fcb_opaque = buf;
1314 * Free mempool of Multi-Packet RQ.
1317 * Pointer to Ethernet device.
1320 * 0 on success, negative errno value on failure.
1323 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1325 struct mlx5_priv *priv = dev->data->dev_private;
1326 struct rte_mempool *mp = priv->mprq_mp;
1331 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1332 dev->data->port_id, mp->name);
1334 * If a buffer in the pool has been externally attached to a mbuf and it
1335 * is still in use by application, destroying the Rx queue can spoil
1336 * the packet. It is unlikely to happen but if application dynamically
1337 * creates and destroys with holding Rx packets, this can happen.
1339 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1340 * RQ isn't provided by application but managed by PMD.
1342 if (!rte_mempool_full(mp)) {
1344 "port %u mempool for Multi-Packet RQ is still in use",
1345 dev->data->port_id);
1349 rte_mempool_free(mp);
1350 /* Unset mempool for each Rx queue. */
1351 for (i = 0; i != priv->rxqs_n; ++i) {
1352 struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
1356 rxq->mprq_mp = NULL;
1358 priv->mprq_mp = NULL;
1363 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1364 * mempool. If already allocated, reuse it if there're enough elements.
1365 * Otherwise, resize it.
1368 * Pointer to Ethernet device.
1371 * 0 on success, negative errno value on failure.
1374 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1376 struct mlx5_priv *priv = dev->data->dev_private;
1377 struct rte_mempool *mp = priv->mprq_mp;
1378 char name[RTE_MEMPOOL_NAMESIZE];
1379 unsigned int desc = 0;
1380 unsigned int buf_len;
1381 unsigned int obj_num;
1382 unsigned int obj_size;
1383 unsigned int log_strd_num = 0;
1384 unsigned int log_strd_sz = 0;
1386 unsigned int n_ibv = 0;
1389 if (!mlx5_mprq_enabled(dev))
1391 /* Count the total number of descriptors configured. */
1392 for (i = 0; i != priv->rxqs_n; ++i) {
1393 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
1394 struct mlx5_rxq_data *rxq;
1396 if (rxq_ctrl == NULL ||
1397 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1399 rxq = &rxq_ctrl->rxq;
1401 desc += 1 << rxq->elts_n;
1402 /* Get the max number of strides. */
1403 if (log_strd_num < rxq->log_strd_num)
1404 log_strd_num = rxq->log_strd_num;
1405 /* Get the max size of a stride. */
1406 if (log_strd_sz < rxq->log_strd_sz)
1407 log_strd_sz = rxq->log_strd_sz;
1409 MLX5_ASSERT(log_strd_num && log_strd_sz);
1410 buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
1411 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
1412 RTE_BIT32(log_strd_num) *
1413 sizeof(struct rte_mbuf_ext_shared_info) +
1414 RTE_PKTMBUF_HEADROOM;
1416 * Received packets can be either memcpy'd or externally referenced. In
1417 * case that the packet is attached to an mbuf as an external buffer, as
1418 * it isn't possible to predict how the buffers will be queued by
1419 * application, there's no option to exactly pre-allocate needed buffers
1420 * in advance but to speculatively prepares enough buffers.
1422 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1423 * received packets to buffers provided by application (rxq->mp) until
1424 * this Mempool gets available again.
1427 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1429 * rte_mempool_create_empty() has sanity check to refuse large cache
1430 * size compared to the number of elements.
1431 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1432 * constant number 2 instead.
1434 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1435 /* Check a mempool is already allocated and if it can be resued. */
1436 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1437 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1438 dev->data->port_id, mp->name);
1441 } else if (mp != NULL) {
1442 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1443 dev->data->port_id, mp->name);
1445 * If failed to free, which means it may be still in use, no way
1446 * but to keep using the existing one. On buffer underrun,
1447 * packets will be memcpy'd instead of external buffer
1450 if (mlx5_mprq_free_mp(dev)) {
1451 if (mp->elt_size >= obj_size)
1457 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1458 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1459 0, NULL, NULL, mlx5_mprq_buf_init,
1460 (void *)((uintptr_t)1 << log_strd_num),
1461 dev->device->numa_node, 0);
1464 "port %u failed to allocate a mempool for"
1465 " Multi-Packet RQ, count=%u, size=%u",
1466 dev->data->port_id, obj_num, obj_size);
1470 ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);
1471 if (ret < 0 && rte_errno != EEXIST) {
1473 DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
1474 dev->data->port_id);
1475 rte_mempool_free(mp);
1481 /* Set mempool for each Rx queue. */
1482 for (i = 0; i != priv->rxqs_n; ++i) {
1483 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
1485 if (rxq_ctrl == NULL ||
1486 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1488 rxq_ctrl->rxq.mprq_mp = mp;
1490 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1491 dev->data->port_id);
1495 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1496 sizeof(struct rte_vlan_hdr) * 2 + \
1497 sizeof(struct rte_ipv6_hdr)))
1498 #define MAX_TCP_OPTION_SIZE 40u
1499 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1500 sizeof(struct rte_tcp_hdr) + \
1501 MAX_TCP_OPTION_SIZE))
1504 * Adjust the maximum LRO massage size.
1507 * Pointer to Ethernet device.
1510 * @param max_lro_size
1511 * The maximum size for LRO packet.
1514 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1515 uint32_t max_lro_size)
1517 struct mlx5_priv *priv = dev->data->dev_private;
1519 if (priv->sh->cdev->config.hca_attr.lro_max_msg_sz_mode ==
1520 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1521 MLX5_MAX_TCP_HDR_OFFSET)
1522 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1523 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1524 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1525 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1526 if (priv->max_lro_msg_size)
1527 priv->max_lro_msg_size =
1528 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1530 priv->max_lro_msg_size = max_lro_size;
1532 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1533 dev->data->port_id, idx,
1534 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1538 * Prepare both size and number of stride for Multi-Packet RQ.
1541 * Pointer to Ethernet device.
1545 * Number of descriptors to configure in queue.
1547 * Indicator if Rx segment enables, if so Multi-Packet RQ doesn't enable.
1548 * @param min_mbuf_size
1549 * Non scatter min mbuf size, max_rx_pktlen plus overhead.
1550 * @param actual_log_stride_num
1551 * Log number of strides to configure for this queue.
1552 * @param actual_log_stride_size
1553 * Log stride size to configure for this queue.
1556 * 0 if Multi-Packet RQ is supported, otherwise -1.
1559 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1560 bool rx_seg_en, uint32_t min_mbuf_size,
1561 uint32_t *actual_log_stride_num,
1562 uint32_t *actual_log_stride_size)
1564 struct mlx5_priv *priv = dev->data->dev_private;
1565 struct mlx5_port_config *config = &priv->config;
1566 struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
1567 uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
1568 uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
1569 uint32_t log_def_stride_num =
1570 RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
1571 log_min_stride_num),
1572 log_max_stride_num);
1573 uint32_t log_min_stride_size = dev_cap->mprq.log_min_stride_size;
1574 uint32_t log_max_stride_size = dev_cap->mprq.log_max_stride_size;
1575 uint32_t log_def_stride_size =
1576 RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
1577 log_min_stride_size),
1578 log_max_stride_size);
1579 uint32_t log_stride_wqe_size;
1581 if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
1583 /* Checks if chosen number of strides is in supported range. */
1584 if (config->mprq.log_stride_num > log_max_stride_num ||
1585 config->mprq.log_stride_num < log_min_stride_num) {
1586 *actual_log_stride_num = log_def_stride_num;
1588 "Port %u Rx queue %u number of strides for Multi-Packet RQ is out of range, setting default value (%u)",
1589 dev->data->port_id, idx, RTE_BIT32(log_def_stride_num));
1591 *actual_log_stride_num = config->mprq.log_stride_num;
1593 if (config->mprq.log_stride_size) {
1594 /* Checks if chosen size of stride is in supported range. */
1595 if (config->mprq.log_stride_size > log_max_stride_size ||
1596 config->mprq.log_stride_size < log_min_stride_size) {
1597 *actual_log_stride_size = log_def_stride_size;
1599 "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
1600 dev->data->port_id, idx,
1601 RTE_BIT32(log_def_stride_size));
1603 *actual_log_stride_size = config->mprq.log_stride_size;
1606 if (min_mbuf_size <= RTE_BIT32(log_max_stride_size))
1607 *actual_log_stride_size = log2above(min_mbuf_size);
1611 log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
1612 /* Check if WQE buffer size is supported by hardware. */
1613 if (log_stride_wqe_size < dev_cap->mprq.log_min_stride_wqe_size) {
1614 *actual_log_stride_num = log_def_stride_num;
1615 *actual_log_stride_size = log_def_stride_size;
1617 "Port %u Rx queue %u size of WQE buffer for Multi-Packet RQ is too small, setting default values (stride_num_n=%u, stride_size_n=%u)",
1618 dev->data->port_id, idx, RTE_BIT32(log_def_stride_num),
1619 RTE_BIT32(log_def_stride_size));
1620 log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
1622 MLX5_ASSERT(log_stride_wqe_size >=
1623 dev_cap->mprq.log_min_stride_wqe_size);
1624 if (desc <= RTE_BIT32(*actual_log_stride_num))
1626 if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
1627 DRV_LOG(WARNING, "Port %u Rx queue %u "
1628 "Multi-Packet RQ is unsupported, WQE buffer size (%u) "
1629 "is smaller than min mbuf size (%u)",
1630 dev->data->port_id, idx, RTE_BIT32(log_stride_wqe_size),
1634 DRV_LOG(DEBUG, "Port %u Rx queue %u "
1635 "Multi-Packet RQ is enabled strd_num_n = %u, strd_sz_n = %u",
1636 dev->data->port_id, idx, RTE_BIT32(*actual_log_stride_num),
1637 RTE_BIT32(*actual_log_stride_size));
1640 if (config->mprq.enabled)
1642 "Port %u MPRQ is requested but cannot be enabled\n"
1643 " (requested: pkt_sz = %u, desc_num = %u,"
1644 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1645 " supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
1646 " min_stride_sz = %u, max_stride_sz = %u).\n"
1647 "Rx segment is %senable.",
1648 dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
1649 RTE_BIT32(config->mprq.log_stride_size),
1650 RTE_BIT32(config->mprq.log_stride_num),
1651 config->mprq.min_rxqs_num,
1652 RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
1653 RTE_BIT32(dev_cap->mprq.log_min_stride_size),
1654 RTE_BIT32(dev_cap->mprq.log_max_stride_size),
1655 rx_seg_en ? "" : "not ");
1660 * Create a DPDK Rx queue.
1663 * Pointer to Ethernet device.
1665 * RX queue private data.
1667 * Number of descriptors to configure in queue.
1669 * NUMA socket on which memory must be allocated.
1672 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1674 struct mlx5_rxq_ctrl *
1675 mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
1677 unsigned int socket, const struct rte_eth_rxconf *conf,
1678 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1680 uint16_t idx = rxq->idx;
1681 struct mlx5_priv *priv = dev->data->dev_private;
1682 struct mlx5_rxq_ctrl *tmpl;
1683 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1684 struct mlx5_port_config *config = &priv->config;
1685 uint64_t offloads = conf->offloads |
1686 dev->data->dev_conf.rxmode.offloads;
1687 unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
1688 unsigned int max_rx_pktlen = lro_on_queue ?
1689 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1690 dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
1692 unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
1693 RTE_PKTMBUF_HEADROOM;
1694 unsigned int max_lro_size = 0;
1695 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1696 uint32_t mprq_log_actual_stride_num = 0;
1697 uint32_t mprq_log_actual_stride_size = 0;
1698 bool rx_seg_en = n_seg != 1 || rx_seg[0].offset || rx_seg[0].length;
1699 const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
1700 non_scatter_min_mbuf_size,
1701 &mprq_log_actual_stride_num,
1702 &mprq_log_actual_stride_size);
1704 * Always allocate extra slots, even if eventually
1705 * the vector Rx will not be used.
1707 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1708 size_t alloc_size = sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *);
1709 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1710 unsigned int tail_len;
1713 /* Trim the number of descs needed. */
1714 desc >>= mprq_log_actual_stride_num;
1715 alloc_size += desc * sizeof(struct mlx5_mprq_buf *);
1717 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, alloc_size, 0, socket);
1722 LIST_INIT(&tmpl->owners);
1724 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
1725 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1727 * Save the original segment configuration in the shared queue
1728 * descriptor for the later check on the sibling queue creation.
1730 tmpl->rxseg_n = n_seg;
1731 rte_memcpy(tmpl->rxseg, qs_seg,
1732 sizeof(struct rte_eth_rxseg_split) * n_seg);
1734 * Build the array of actual buffer offsets and lengths.
1735 * Pad with the buffers from the last memory pool if
1736 * needed to handle max size packets, replace zero length
1737 * with the buffer length from the pool.
1739 tail_len = max_rx_pktlen;
1741 struct mlx5_eth_rxseg *hw_seg =
1742 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1743 uint32_t buf_len, offset, seg_len;
1746 * For the buffers beyond descriptions offset is zero,
1747 * the first buffer contains head room.
1749 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1750 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1751 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1753 * For the buffers beyond descriptions the length is
1754 * pool buffer length, zero lengths are replaced with
1755 * pool buffer length either.
1757 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1761 /* Check is done in long int, now overflows. */
1762 if (buf_len < seg_len + offset) {
1763 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1764 "%u/%u can't be satisfied",
1765 dev->data->port_id, idx,
1766 qs_seg->length, qs_seg->offset);
1770 if (seg_len > tail_len)
1771 seg_len = buf_len - offset;
1772 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1774 "port %u too many SGEs (%u) needed to handle"
1775 " requested maximum packet size %u, the maximum"
1776 " supported are %u", dev->data->port_id,
1777 tmpl->rxq.rxseg_n, max_rx_pktlen,
1779 rte_errno = ENOTSUP;
1782 /* Build the actual scattering element in the queue object. */
1783 hw_seg->mp = qs_seg->mp;
1784 MLX5_ASSERT(offset <= UINT16_MAX);
1785 MLX5_ASSERT(seg_len <= UINT16_MAX);
1786 hw_seg->offset = (uint16_t)offset;
1787 hw_seg->length = (uint16_t)seg_len;
1789 * Advance the segment descriptor, the padding is the based
1790 * on the attributes of the last descriptor.
1792 if (tmpl->rxq.rxseg_n < n_seg)
1794 tail_len -= RTE_MIN(tail_len, seg_len);
1795 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1796 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1797 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1798 if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
1799 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1800 " configured and no enough mbuf space(%u) to contain "
1801 "the maximum RX packet length(%u) with head-room(%u)",
1802 dev->data->port_id, idx, mb_len, max_rx_pktlen,
1803 RTE_PKTMBUF_HEADROOM);
1807 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1808 if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
1809 &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1810 /* rte_errno is already set. */
1813 tmpl->socket = socket;
1814 if (dev->data->dev_conf.intr_conf.rxq)
1817 /* TODO: Rx scatter isn't supported yet. */
1818 tmpl->rxq.sges_n = 0;
1819 tmpl->rxq.log_strd_num = mprq_log_actual_stride_num;
1820 tmpl->rxq.log_strd_sz = mprq_log_actual_stride_size;
1821 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1822 tmpl->rxq.strd_scatter_en =
1823 !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
1824 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1825 config->mprq.max_memcpy_len);
1826 max_lro_size = RTE_MIN(max_rx_pktlen,
1827 RTE_BIT32(tmpl->rxq.log_strd_num) *
1828 RTE_BIT32(tmpl->rxq.log_strd_sz));
1829 } else if (tmpl->rxq.rxseg_n == 1) {
1830 MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
1831 tmpl->rxq.sges_n = 0;
1832 max_lro_size = max_rx_pktlen;
1833 } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1834 unsigned int sges_n;
1836 if (lro_on_queue && first_mb_free_size <
1837 MLX5_MAX_LRO_HEADER_FIX) {
1838 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1839 " to include the max header size(%u) for LRO",
1840 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1841 rte_errno = ENOTSUP;
1845 * Determine the number of SGEs needed for a full packet
1846 * and round it to the next power of two.
1848 sges_n = log2above(tmpl->rxq.rxseg_n);
1849 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1851 "port %u too many SGEs (%u) needed to handle"
1852 " requested maximum packet size %u, the maximum"
1853 " supported are %u", dev->data->port_id,
1854 1 << sges_n, max_rx_pktlen,
1855 1u << MLX5_MAX_LOG_RQ_SEGS);
1856 rte_errno = ENOTSUP;
1859 tmpl->rxq.sges_n = sges_n;
1860 max_lro_size = max_rx_pktlen;
1862 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1863 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1864 if (desc % (1 << tmpl->rxq.sges_n)) {
1866 "port %u number of Rx queue descriptors (%u) is not a"
1867 " multiple of SGEs per packet (%u)",
1870 1 << tmpl->rxq.sges_n);
1874 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1875 /* Toggle RX checksum offload if hardware supports it. */
1876 tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
1877 /* Configure Rx timestamp. */
1878 tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
1879 tmpl->rxq.timestamp_rx_flag = 0;
1880 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1881 &tmpl->rxq.timestamp_offset,
1882 &tmpl->rxq.timestamp_rx_flag) != 0) {
1883 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1886 /* Configure VLAN stripping. */
1887 tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1888 /* By default, FCS (CRC) is stripped by hardware. */
1889 tmpl->rxq.crc_present = 0;
1890 tmpl->rxq.lro = lro_on_queue;
1891 if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
1892 if (priv->sh->config.hw_fcs_strip) {
1894 * RQs used for LRO-enabled TIRs should not be
1895 * configured to scatter the FCS.
1899 "port %u CRC stripping has been "
1900 "disabled but will still be performed "
1901 "by hardware, because LRO is enabled",
1902 dev->data->port_id);
1904 tmpl->rxq.crc_present = 1;
1907 "port %u CRC stripping has been disabled but will"
1908 " still be performed by hardware, make sure MLNX_OFED"
1909 " and firmware are up to date",
1910 dev->data->port_id);
1914 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1915 " incoming frames to hide it",
1917 tmpl->rxq.crc_present ? "disabled" : "enabled",
1918 tmpl->rxq.crc_present << 2);
1919 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1920 (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
1922 tmpl->rxq.port_id = dev->data->port_id;
1923 tmpl->sh = priv->sh;
1924 tmpl->rxq.mp = rx_seg[0].mp;
1925 tmpl->rxq.elts_n = log2above(desc);
1926 tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1927 tmpl->rxq.elts = (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1928 tmpl->rxq.mprq_bufs =
1929 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1930 tmpl->rxq.idx = idx;
1931 if (conf->share_group > 0) {
1932 tmpl->rxq.shared = 1;
1933 tmpl->share_group = conf->share_group;
1934 tmpl->share_qid = conf->share_qid;
1935 LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
1937 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1940 mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
1946 * Create a DPDK Rx hairpin queue.
1949 * Pointer to Ethernet device.
1953 * Number of descriptors to configure in queue.
1954 * @param hairpin_conf
1955 * The hairpin binding configuration.
1958 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1960 struct mlx5_rxq_ctrl *
1961 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
1963 const struct rte_eth_hairpin_conf *hairpin_conf)
1965 uint16_t idx = rxq->idx;
1966 struct mlx5_priv *priv = dev->data->dev_private;
1967 struct mlx5_rxq_ctrl *tmpl;
1969 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1975 LIST_INIT(&tmpl->owners);
1977 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
1978 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1979 tmpl->socket = SOCKET_ID_ANY;
1980 tmpl->rxq.rss_hash = 0;
1981 tmpl->rxq.port_id = dev->data->port_id;
1982 tmpl->sh = priv->sh;
1983 tmpl->rxq.mp = NULL;
1984 tmpl->rxq.elts_n = log2above(desc);
1985 tmpl->rxq.elts = NULL;
1986 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1987 tmpl->rxq.idx = idx;
1988 rxq->hairpin_conf = *hairpin_conf;
1989 mlx5_rxq_ref(dev, idx);
1990 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1995 * Increase Rx queue reference count.
1998 * Pointer to Ethernet device.
2003 * A pointer to the queue if it exists, NULL otherwise.
2005 struct mlx5_rxq_priv *
2006 mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
2008 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2011 __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
2016 * Dereference a Rx queue.
2019 * Pointer to Ethernet device.
2024 * Updated reference count.
2027 mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
2029 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2033 return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
2040 * Pointer to Ethernet device.
2045 * A pointer to the queue if it exists, NULL otherwise.
2047 struct mlx5_rxq_priv *
2048 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2050 struct mlx5_priv *priv = dev->data->dev_private;
2052 MLX5_ASSERT(priv->rxq_privs != NULL);
2053 return (*priv->rxq_privs)[idx];
2057 * Get Rx queue shareable control.
2060 * Pointer to Ethernet device.
2065 * A pointer to the queue control if it exists, NULL otherwise.
2067 struct mlx5_rxq_ctrl *
2068 mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
2070 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2072 return rxq == NULL ? NULL : rxq->ctrl;
2076 * Get Rx queue shareable data.
2079 * Pointer to Ethernet device.
2084 * A pointer to the queue data if it exists, NULL otherwise.
2086 struct mlx5_rxq_data *
2087 mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
2089 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2091 return rxq == NULL ? NULL : &rxq->ctrl->rxq;
2095 * Release a Rx queue.
2098 * Pointer to Ethernet device.
2103 * 1 while a reference on it exists, 0 when freed.
2106 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2108 struct mlx5_priv *priv = dev->data->dev_private;
2109 struct mlx5_rxq_priv *rxq;
2110 struct mlx5_rxq_ctrl *rxq_ctrl;
2113 if (priv->rxq_privs == NULL)
2115 rxq = mlx5_rxq_get(dev, idx);
2116 if (rxq == NULL || rxq->refcnt == 0)
2118 rxq_ctrl = rxq->ctrl;
2119 refcnt = mlx5_rxq_deref(dev, idx);
2122 } else if (refcnt == 1) { /* RxQ stopped. */
2123 priv->obj_ops.rxq_obj_release(rxq);
2124 if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {
2125 LIST_REMOVE(rxq_ctrl->obj, next);
2126 mlx5_free(rxq_ctrl->obj);
2127 rxq_ctrl->obj = NULL;
2129 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
2130 if (!rxq_ctrl->started)
2131 rxq_free_elts(rxq_ctrl);
2132 dev->data->rx_queue_state[idx] =
2133 RTE_ETH_QUEUE_STATE_STOPPED;
2135 } else { /* Refcnt zero, closing device. */
2136 LIST_REMOVE(rxq, owner_entry);
2137 if (LIST_EMPTY(&rxq_ctrl->owners)) {
2138 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2140 (&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2141 if (rxq_ctrl->rxq.shared)
2142 LIST_REMOVE(rxq_ctrl, share_entry);
2143 LIST_REMOVE(rxq_ctrl, next);
2144 mlx5_free(rxq_ctrl);
2146 dev->data->rx_queues[idx] = NULL;
2148 (*priv->rxq_privs)[idx] = NULL;
2154 * Verify the Rx Queue list is empty
2157 * Pointer to Ethernet device.
2160 * The number of object not released.
2163 mlx5_rxq_verify(struct rte_eth_dev *dev)
2165 struct mlx5_priv *priv = dev->data->dev_private;
2166 struct mlx5_rxq_ctrl *rxq_ctrl;
2169 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2170 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2171 dev->data->port_id, rxq_ctrl->rxq.idx);
2178 * Get a Rx queue type.
2181 * Pointer to Ethernet device.
2186 * The Rx queue type.
2189 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2191 struct mlx5_priv *priv = dev->data->dev_private;
2192 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
2194 if (idx < priv->rxqs_n && rxq_ctrl != NULL)
2195 return rxq_ctrl->type;
2196 return MLX5_RXQ_TYPE_UNDEFINED;
2200 * Get a Rx hairpin queue configuration.
2203 * Pointer to Ethernet device.
2208 * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
2210 const struct rte_eth_hairpin_conf *
2211 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
2213 struct mlx5_priv *priv = dev->data->dev_private;
2214 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
2216 if (idx < priv->rxqs_n && rxq != NULL) {
2217 if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
2218 return &rxq->hairpin_conf;
2224 * Match queues listed in arguments to queues contained in indirection table
2228 * Pointer to indirection table to match.
2230 * Queues to match to ques in indirection table.
2232 * Number of queues in the array.
2235 * 1 if all queues in indirection table match 0 otherwise.
2238 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
2239 const uint16_t *queues, uint32_t queues_n)
2241 return (ind_tbl->queues_n == queues_n) &&
2242 (!memcmp(ind_tbl->queues, queues,
2243 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
2247 * Get an indirection table.
2250 * Pointer to Ethernet device.
2252 * Queues entering in the indirection table.
2254 * Number of queues in the array.
2257 * An indirection table if found.
2259 struct mlx5_ind_table_obj *
2260 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2263 struct mlx5_priv *priv = dev->data->dev_private;
2264 struct mlx5_ind_table_obj *ind_tbl;
2266 rte_rwlock_read_lock(&priv->ind_tbls_lock);
2267 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2268 if ((ind_tbl->queues_n == queues_n) &&
2269 (memcmp(ind_tbl->queues, queues,
2270 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2272 __atomic_fetch_add(&ind_tbl->refcnt, 1,
2277 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2282 * Release an indirection table.
2285 * Pointer to Ethernet device.
2287 * Indirection table to release.
2289 * Indirection table for Standalone queue.
2291 * If true, then dereference RX queues related to indirection table.
2292 * Otherwise, no additional action will be taken.
2295 * 1 while a reference on it exists, 0 when freed.
2298 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2299 struct mlx5_ind_table_obj *ind_tbl,
2303 struct mlx5_priv *priv = dev->data->dev_private;
2304 unsigned int i, ret;
2306 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2307 ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2308 if (!ret && !standalone)
2309 LIST_REMOVE(ind_tbl, next);
2310 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2313 priv->obj_ops.ind_table_destroy(ind_tbl);
2315 for (i = 0; i != ind_tbl->queues_n; ++i)
2316 claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
2323 * Verify the Rx Queue list is empty
2326 * Pointer to Ethernet device.
2329 * The number of object not released.
2332 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2334 struct mlx5_priv *priv = dev->data->dev_private;
2335 struct mlx5_ind_table_obj *ind_tbl;
2338 rte_rwlock_read_lock(&priv->ind_tbls_lock);
2339 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2341 "port %u indirection table obj %p still referenced",
2342 dev->data->port_id, (void *)ind_tbl);
2345 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
2350 * Setup an indirection table structure fields.
2353 * Pointer to Ethernet device.
2355 * Indirection table to modify.
2357 * Whether to increment RxQ reference counters.
2360 * 0 on success, a negative errno value otherwise and rte_errno is set.
2363 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
2364 struct mlx5_ind_table_obj *ind_tbl,
2367 struct mlx5_priv *priv = dev->data->dev_private;
2368 uint32_t queues_n = ind_tbl->queues_n;
2369 uint16_t *queues = ind_tbl->queues;
2370 unsigned int i = 0, j;
2372 const unsigned int n = rte_is_power_of_2(queues_n) ?
2373 log2above(queues_n) :
2374 log2above(priv->sh->dev_cap.ind_table_max_size);
2377 for (i = 0; i != queues_n; ++i) {
2378 if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
2383 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
2386 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
2391 for (j = 0; j < i; j++)
2392 mlx5_rxq_deref(dev, queues[j]);
2395 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2396 dev->data->port_id);
2401 * Create an indirection table.
2404 * Pointer to Ethernet device.
2406 * Queues entering in the indirection table.
2408 * Number of queues in the array.
2410 * Indirection table for Standalone queue.
2412 * Whether to increment RxQ reference counters.
2415 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2417 static struct mlx5_ind_table_obj *
2418 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2419 uint32_t queues_n, bool standalone, bool ref_qs)
2421 struct mlx5_priv *priv = dev->data->dev_private;
2422 struct mlx5_ind_table_obj *ind_tbl;
2425 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2426 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2431 ind_tbl->queues_n = queues_n;
2432 ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2433 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2434 ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs);
2440 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2441 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2442 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2448 mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
2449 struct mlx5_ind_table_obj *ind_tbl)
2453 refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
2457 * Modification of indirection tables having more than 1
2458 * reference is unsupported.
2461 "Port %u cannot modify indirection table %p (refcnt %u > 1).",
2462 dev->data->port_id, (void *)ind_tbl, refcnt);
2468 * Modify an indirection table.
2471 * Pointer to Ethernet device.
2473 * Indirection table to modify.
2475 * Queues replacement for the indirection table.
2477 * Number of queues in the array.
2479 * Indirection table for Standalone queue.
2481 * Whether to increment new RxQ set reference counters.
2482 * @param deref_old_qs
2483 * Whether to decrement old RxQ set reference counters.
2486 * 0 on success, a negative errno value otherwise and rte_errno is set.
2489 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2490 struct mlx5_ind_table_obj *ind_tbl,
2491 uint16_t *queues, const uint32_t queues_n,
2492 bool standalone, bool ref_new_qs, bool deref_old_qs)
2494 struct mlx5_priv *priv = dev->data->dev_private;
2495 unsigned int i = 0, j;
2497 const unsigned int n = rte_is_power_of_2(queues_n) ?
2498 log2above(queues_n) :
2499 log2above(priv->sh->dev_cap.ind_table_max_size);
2501 MLX5_ASSERT(standalone);
2502 RTE_SET_USED(standalone);
2503 if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
2506 for (i = 0; i != queues_n; ++i) {
2507 if (!mlx5_rxq_ref(dev, queues[i])) {
2512 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2513 ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
2517 for (i = 0; i < ind_tbl->queues_n; i++)
2518 claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
2519 ind_tbl->queues_n = queues_n;
2520 ind_tbl->queues = queues;
2525 for (j = 0; j < i; j++)
2526 mlx5_rxq_deref(dev, queues[j]);
2529 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2530 dev->data->port_id);
2535 * Attach an indirection table to its queues.
2538 * Pointer to Ethernet device.
2540 * Indirection table to attach.
2543 * 0 on success, a negative errno value otherwise and rte_errno is set.
2546 mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
2547 struct mlx5_ind_table_obj *ind_tbl)
2551 ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
2553 true /* standalone */,
2554 true /* ref_new_qs */,
2555 false /* deref_old_qs */);
2557 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2558 dev->data->port_id, (void *)ind_tbl);
2563 * Detach an indirection table from its queues.
2566 * Pointer to Ethernet device.
2568 * Indirection table to detach.
2571 * 0 on success, a negative errno value otherwise and rte_errno is set.
2574 mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
2575 struct mlx5_ind_table_obj *ind_tbl)
2577 struct mlx5_priv *priv = dev->data->dev_private;
2578 const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
2579 log2above(ind_tbl->queues_n) :
2580 log2above(priv->sh->dev_cap.ind_table_max_size);
2584 ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);
2587 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2588 ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);
2590 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2591 dev->data->port_id, (void *)ind_tbl);
2594 for (i = 0; i < ind_tbl->queues_n; i++)
2595 mlx5_rxq_release(dev, ind_tbl->queues[i]);
2600 mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
2603 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2604 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2605 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2607 return (hrxq->rss_key_len != rss_desc->key_len ||
2608 memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
2609 hrxq->hash_fields != rss_desc->hash_fields ||
2610 hrxq->ind_table->queues_n != rss_desc->queue_num ||
2611 memcmp(hrxq->ind_table->queues, rss_desc->queue,
2612 rss_desc->queue_num * sizeof(rss_desc->queue[0])));
2616 * Modify an Rx Hash queue configuration.
2619 * Pointer to Ethernet device.
2621 * Index to Hash Rx queue to modify.
2623 * RSS key for the Rx hash queue.
2624 * @param rss_key_len
2626 * @param hash_fields
2627 * Verbs protocol hash field to make the RSS on.
2629 * Queues entering in hash queue. In case of empty hash_fields only the
2630 * first queue index will be taken for the indirection table.
2635 * 0 on success, a negative errno value otherwise and rte_errno is set.
2638 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2639 const uint8_t *rss_key, uint32_t rss_key_len,
2640 uint64_t hash_fields,
2641 const uint16_t *queues, uint32_t queues_n)
2644 struct mlx5_ind_table_obj *ind_tbl = NULL;
2645 struct mlx5_priv *priv = dev->data->dev_private;
2646 struct mlx5_hrxq *hrxq =
2647 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2648 bool dev_started = !!dev->data->dev_started;
2656 if (hrxq->rss_key_len != rss_key_len) {
2657 /* rss_key_len is fixed size 40 byte & not supposed to change */
2661 queues_n = hash_fields ? queues_n : 1;
2662 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2663 queues, queues_n)) {
2664 ind_tbl = hrxq->ind_table;
2666 if (hrxq->standalone) {
2668 * Replacement of indirection table unsupported for
2669 * standalone hrxq objects (used by shared RSS).
2671 rte_errno = ENOTSUP;
2674 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2676 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2684 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2685 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2686 hash_fields, ind_tbl);
2691 if (ind_tbl != hrxq->ind_table) {
2692 MLX5_ASSERT(!hrxq->standalone);
2693 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2694 hrxq->standalone, true);
2695 hrxq->ind_table = ind_tbl;
2697 hrxq->hash_fields = hash_fields;
2698 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2702 if (ind_tbl != hrxq->ind_table) {
2703 MLX5_ASSERT(!hrxq->standalone);
2704 mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
2712 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2714 struct mlx5_priv *priv = dev->data->dev_private;
2716 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2717 mlx5_glue->destroy_flow_action(hrxq->action);
2719 priv->obj_ops.hrxq_destroy(hrxq);
2720 if (!hrxq->standalone) {
2721 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2722 hrxq->standalone, true);
2724 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2728 * Release the hash Rx queue.
2731 * Pointer to Ethernet device.
2733 * Index to Hash Rx queue to release.
2736 * mlx5 list pointer.
2738 * Hash queue entry pointer.
2741 mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2743 struct rte_eth_dev *dev = tool_ctx;
2744 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2746 __mlx5_hrxq_remove(dev, hrxq);
2749 static struct mlx5_hrxq *
2750 __mlx5_hrxq_create(struct rte_eth_dev *dev,
2751 struct mlx5_flow_rss_desc *rss_desc)
2753 struct mlx5_priv *priv = dev->data->dev_private;
2754 const uint8_t *rss_key = rss_desc->key;
2755 uint32_t rss_key_len = rss_desc->key_len;
2756 bool standalone = !!rss_desc->shared_rss;
2757 const uint16_t *queues =
2758 standalone ? rss_desc->const_q : rss_desc->queue;
2759 uint32_t queues_n = rss_desc->queue_num;
2760 struct mlx5_hrxq *hrxq = NULL;
2761 uint32_t hrxq_idx = 0;
2762 struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
2765 queues_n = rss_desc->hash_fields ? queues_n : 1;
2767 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2769 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2771 !!dev->data->dev_started);
2774 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2777 hrxq->standalone = standalone;
2778 hrxq->idx = hrxq_idx;
2779 hrxq->ind_table = ind_tbl;
2780 hrxq->rss_key_len = rss_key_len;
2781 hrxq->hash_fields = rss_desc->hash_fields;
2782 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2783 ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2788 if (!rss_desc->ind_tbl)
2789 mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
2791 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2795 struct mlx5_list_entry *
2796 mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
2798 struct rte_eth_dev *dev = tool_ctx;
2799 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2800 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2801 struct mlx5_hrxq *hrxq;
2803 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2804 return hrxq ? &hrxq->entry : NULL;
2807 struct mlx5_list_entry *
2808 mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2809 void *cb_ctx __rte_unused)
2811 struct rte_eth_dev *dev = tool_ctx;
2812 struct mlx5_priv *priv = dev->data->dev_private;
2813 struct mlx5_hrxq *hrxq;
2814 uint32_t hrxq_idx = 0;
2816 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2819 memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
2820 hrxq->idx = hrxq_idx;
2821 return &hrxq->entry;
2825 mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2827 struct rte_eth_dev *dev = tool_ctx;
2828 struct mlx5_priv *priv = dev->data->dev_private;
2829 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2831 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2835 * Get an Rx Hash queue.
2838 * Pointer to Ethernet device.
2840 * RSS configuration for the Rx hash queue.
2843 * An hash Rx queue index on success.
2845 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
2846 struct mlx5_flow_rss_desc *rss_desc)
2848 struct mlx5_priv *priv = dev->data->dev_private;
2849 struct mlx5_hrxq *hrxq;
2850 struct mlx5_list_entry *entry;
2851 struct mlx5_flow_cb_ctx ctx = {
2855 if (rss_desc->shared_rss) {
2856 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2858 entry = mlx5_list_register(priv->hrxqs, &ctx);
2861 hrxq = container_of(entry, typeof(*hrxq), entry);
2869 * Release the hash Rx queue.
2872 * Pointer to Ethernet device.
2874 * Index to Hash Rx queue to release.
2877 * 1 while a reference on it exists, 0 when freed.
2879 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2881 struct mlx5_priv *priv = dev->data->dev_private;
2882 struct mlx5_hrxq *hrxq;
2884 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2887 if (!hrxq->standalone)
2888 return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
2889 __mlx5_hrxq_remove(dev, hrxq);
2894 * Create a drop Rx Hash queue.
2897 * Pointer to Ethernet device.
2900 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2903 mlx5_drop_action_create(struct rte_eth_dev *dev)
2905 struct mlx5_priv *priv = dev->data->dev_private;
2906 struct mlx5_hrxq *hrxq = NULL;
2909 if (priv->drop_queue.hrxq)
2910 return priv->drop_queue.hrxq;
2911 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2914 "Port %u cannot allocate memory for drop queue.",
2915 dev->data->port_id);
2919 priv->drop_queue.hrxq = hrxq;
2920 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2922 if (!hrxq->ind_table) {
2926 ret = priv->obj_ops.drop_action_create(dev);
2932 if (hrxq->ind_table)
2933 mlx5_free(hrxq->ind_table);
2934 priv->drop_queue.hrxq = NULL;
2941 * Release a drop hash Rx queue.
2944 * Pointer to Ethernet device.
2947 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2949 struct mlx5_priv *priv = dev->data->dev_private;
2950 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2952 if (!priv->drop_queue.hrxq)
2954 priv->obj_ops.drop_action_destroy(dev);
2955 mlx5_free(priv->drop_queue.rxq);
2956 mlx5_free(hrxq->ind_table);
2958 priv->drop_queue.rxq = NULL;
2959 priv->drop_queue.hrxq = NULL;
2963 * Verify the Rx Queue list is empty
2966 * Pointer to Ethernet device.
2969 * The number of object not released.
2972 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2974 struct mlx5_priv *priv = dev->data->dev_private;
2976 return mlx5_list_get_entry_num(priv->hrxqs);
2980 * Set the Rx queue timestamp conversion parameters
2983 * Pointer to the Ethernet device structure.
2986 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2988 struct mlx5_priv *priv = dev->data->dev_private;
2989 struct mlx5_dev_ctx_shared *sh = priv->sh;
2992 for (i = 0; i != priv->rxqs_n; ++i) {
2993 struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
2998 data->rt_timestamp = sh->dev_cap.rt_timestamp;