1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
25 #include "mlx5_defs.h"
29 #include "mlx5_utils.h"
30 #include "mlx5_autoconf.h"
33 /* Default RSS hash key also used for ConnectX-3. */
34 uint8_t rss_hash_default_key[] = {
35 0x2c, 0xc6, 0x81, 0xd1,
36 0x5b, 0xdb, 0xf4, 0xf7,
37 0xfc, 0xa2, 0x83, 0x19,
38 0xdb, 0x1a, 0x3e, 0x94,
39 0x6b, 0x9e, 0x38, 0xd9,
40 0x2c, 0x9c, 0x03, 0xd1,
41 0xad, 0x99, 0x44, 0xa7,
42 0xd9, 0x56, 0x3d, 0x59,
43 0x06, 0x3c, 0x25, 0xf3,
44 0xfc, 0x1f, 0xdc, 0x2a,
47 /* Length of the default RSS hash key. */
48 static_assert(MLX5_RSS_HASH_KEY_LEN ==
49 (unsigned int)sizeof(rss_hash_default_key),
50 "wrong RSS default key size.");
53 * Calculate the number of CQEs in CQ for the Rx queue.
56 * Pointer to receive queue structure.
59 * Number of CQEs in CQ.
62 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
65 unsigned int wqe_n = 1 << rxq_data->elts_n;
67 if (mlx5_rxq_mprq_enabled(rxq_data))
68 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
75 * Allocate RX queue elements for Multi-Packet RQ.
78 * Pointer to RX queue structure.
81 * 0 on success, a negative errno value otherwise and rte_errno is set.
84 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
86 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
87 unsigned int wqe_n = 1 << rxq->elts_n;
91 /* Iterate on segments. */
92 for (i = 0; i <= wqe_n; ++i) {
93 struct mlx5_mprq_buf *buf;
95 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
96 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
101 (*rxq->mprq_bufs)[i] = buf;
103 rxq->mprq_repl = buf;
106 "port %u MPRQ queue %u allocated and configured %u segments",
107 rxq->port_id, rxq->idx, wqe_n);
110 err = rte_errno; /* Save rte_errno before cleanup. */
112 for (i = 0; (i != wqe_n); ++i) {
113 if ((*rxq->mprq_bufs)[i] != NULL)
114 rte_mempool_put(rxq->mprq_mp,
115 (*rxq->mprq_bufs)[i]);
116 (*rxq->mprq_bufs)[i] = NULL;
118 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
119 rxq->port_id, rxq->idx);
120 rte_errno = err; /* Restore rte_errno. */
125 * Allocate RX queue elements for Single-Packet RQ.
128 * Pointer to RX queue structure.
131 * 0 on success, errno value on failure.
134 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
136 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
137 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
138 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
139 (1 << rxq_ctrl->rxq.elts_n);
143 /* Iterate on segments. */
144 for (i = 0; (i != elts_n); ++i) {
145 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
146 struct rte_mbuf *buf;
148 buf = rte_pktmbuf_alloc(seg->mp);
150 DRV_LOG(ERR, "port %u empty mbuf pool",
151 PORT_ID(rxq_ctrl->priv));
155 /* Headroom is reserved by rte_pktmbuf_alloc(). */
156 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
157 /* Buffer is supposed to be empty. */
158 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
159 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
160 MLX5_ASSERT(!buf->next);
161 SET_DATA_OFF(buf, seg->offset);
162 PORT(buf) = rxq_ctrl->rxq.port_id;
163 DATA_LEN(buf) = seg->length;
164 PKT_LEN(buf) = seg->length;
166 (*rxq_ctrl->rxq.elts)[i] = buf;
168 /* If Rx vector is activated. */
169 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
170 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
171 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
172 struct rte_pktmbuf_pool_private *priv =
173 (struct rte_pktmbuf_pool_private *)
174 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
177 /* Initialize default rearm_data for vPMD. */
178 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
179 rte_mbuf_refcnt_set(mbuf_init, 1);
180 mbuf_init->nb_segs = 1;
181 mbuf_init->port = rxq->port_id;
182 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
183 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
185 * prevent compiler reordering:
186 * rearm_data covers previous fields.
188 rte_compiler_barrier();
189 rxq->mbuf_initializer =
190 *(rte_xmm_t *)&mbuf_init->rearm_data;
191 /* Padding with a fake mbuf for vectorized Rx. */
192 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
193 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
196 "port %u SPRQ queue %u allocated and configured %u segments"
198 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
199 elts_n / (1 << rxq_ctrl->rxq.sges_n));
202 err = rte_errno; /* Save rte_errno before cleanup. */
204 for (i = 0; (i != elts_n); ++i) {
205 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
206 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
207 (*rxq_ctrl->rxq.elts)[i] = NULL;
209 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
210 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
211 rte_errno = err; /* Restore rte_errno. */
216 * Allocate RX queue elements.
219 * Pointer to RX queue structure.
222 * 0 on success, errno value on failure.
225 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
230 * For MPRQ we need to allocate both MPRQ buffers
231 * for WQEs and simple mbufs for vector processing.
233 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
234 ret = rxq_alloc_elts_mprq(rxq_ctrl);
235 return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
239 * Free RX queue elements for Multi-Packet RQ.
242 * Pointer to RX queue structure.
245 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
247 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
250 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
251 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
252 if (rxq->mprq_bufs == NULL)
254 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
255 if ((*rxq->mprq_bufs)[i] != NULL)
256 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
257 (*rxq->mprq_bufs)[i] = NULL;
259 if (rxq->mprq_repl != NULL) {
260 mlx5_mprq_buf_free(rxq->mprq_repl);
261 rxq->mprq_repl = NULL;
266 * Free RX queue elements for Single-Packet RQ.
269 * Pointer to RX queue structure.
272 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
274 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
275 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
276 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
278 const uint16_t q_mask = q_n - 1;
279 uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
280 rxq->elts_ci : rxq->rq_ci;
281 uint16_t used = q_n - (elts_ci - rxq->rq_pi);
284 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
285 PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
286 if (rxq->elts == NULL)
289 * Some mbuf in the Ring belongs to the application.
290 * They cannot be freed.
292 if (mlx5_rxq_check_vec_support(rxq) > 0) {
293 for (i = 0; i < used; ++i)
294 (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
295 rxq->rq_pi = elts_ci;
297 for (i = 0; i != q_n; ++i) {
298 if ((*rxq->elts)[i] != NULL)
299 rte_pktmbuf_free_seg((*rxq->elts)[i]);
300 (*rxq->elts)[i] = NULL;
305 * Free RX queue elements.
308 * Pointer to RX queue structure.
311 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
314 * For MPRQ we need to allocate both MPRQ buffers
315 * for WQEs and simple mbufs for vector processing.
317 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
318 rxq_free_elts_mprq(rxq_ctrl);
319 rxq_free_elts_sprq(rxq_ctrl);
323 * Returns the per-queue supported offloads.
326 * Pointer to Ethernet device.
329 * Supported Rx offloads.
332 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
334 struct mlx5_priv *priv = dev->data->dev_private;
335 struct mlx5_dev_config *config = &priv->config;
336 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
337 DEV_RX_OFFLOAD_TIMESTAMP |
338 DEV_RX_OFFLOAD_JUMBO_FRAME |
339 DEV_RX_OFFLOAD_RSS_HASH);
341 if (!config->mprq.enabled)
342 offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
343 if (config->hw_fcs_strip)
344 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
346 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
347 DEV_RX_OFFLOAD_UDP_CKSUM |
348 DEV_RX_OFFLOAD_TCP_CKSUM);
349 if (config->hw_vlan_strip)
350 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
351 if (MLX5_LRO_SUPPORTED(dev))
352 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
358 * Returns the per-port supported offloads.
361 * Supported Rx offloads.
364 mlx5_get_rx_port_offloads(void)
366 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
372 * Verify if the queue can be released.
375 * Pointer to Ethernet device.
380 * 1 if the queue can be released
381 * 0 if the queue can not be released, there are references to it.
382 * Negative errno and rte_errno is set if queue doesn't exist.
385 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
387 struct mlx5_priv *priv = dev->data->dev_private;
388 struct mlx5_rxq_ctrl *rxq_ctrl;
390 if (!(*priv->rxqs)[idx]) {
394 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
395 return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
398 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
400 rxq_sync_cq(struct mlx5_rxq_data *rxq)
402 const uint16_t cqe_n = 1 << rxq->cqe_n;
403 const uint16_t cqe_mask = cqe_n - 1;
404 volatile struct mlx5_cqe *cqe;
409 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
410 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
411 if (ret == MLX5_CQE_STATUS_HW_OWN)
413 if (ret == MLX5_CQE_STATUS_ERR) {
417 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
418 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
422 /* Compute the next non compressed CQE. */
423 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
426 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
427 for (i = 0; i < cqe_n; i++) {
428 cqe = &(*rxq->cqes)[i];
429 cqe->op_own = MLX5_CQE_INVALIDATE;
431 /* Resync CQE and WQE (WQ in RESET state). */
433 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
435 *rxq->rq_db = rte_cpu_to_be_32(0);
440 * Rx queue stop. Device queue goes to the RESET state,
441 * all involved mbufs are freed from WQ.
444 * Pointer to Ethernet device structure.
449 * 0 on success, a negative errno value otherwise and rte_errno is set.
452 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
454 struct mlx5_priv *priv = dev->data->dev_private;
455 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
456 struct mlx5_rxq_ctrl *rxq_ctrl =
457 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
460 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
461 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
463 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
468 /* Remove all processes CQEs. */
470 /* Free all involved mbufs. */
471 rxq_free_elts(rxq_ctrl);
472 /* Set the actual queue state. */
473 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
478 * Rx queue stop. Device queue goes to the RESET state,
479 * all involved mbufs are freed from WQ.
482 * Pointer to Ethernet device structure.
487 * 0 on success, a negative errno value otherwise and rte_errno is set.
490 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
492 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
495 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
496 DRV_LOG(ERR, "Hairpin queue can't be stopped");
500 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
503 * Vectorized Rx burst requires the CQ and RQ indices
504 * synchronized, that might be broken on RQ restart
505 * and cause Rx malfunction, so queue stopping is
506 * not supported if vectorized Rx burst is engaged.
507 * The routine pointer depends on the process
508 * type, should perform check there.
510 if (pkt_burst == mlx5_rx_burst_vec) {
511 DRV_LOG(ERR, "Rx queue stop is not supported "
512 "for vectorized Rx");
516 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
517 ret = mlx5_mp_os_req_queue_control(dev, idx,
518 MLX5_MP_REQ_QUEUE_RX_STOP);
520 ret = mlx5_rx_queue_stop_primary(dev, idx);
526 * Rx queue start. Device queue goes to the ready state,
527 * all required mbufs are allocated and WQ is replenished.
530 * Pointer to Ethernet device structure.
535 * 0 on success, a negative errno value otherwise and rte_errno is set.
538 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
540 struct mlx5_priv *priv = dev->data->dev_private;
541 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
542 struct mlx5_rxq_ctrl *rxq_ctrl =
543 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
546 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
547 /* Allocate needed buffers. */
548 ret = rxq_alloc_elts(rxq_ctrl);
550 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
555 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
557 /* Reset RQ consumer before moving queue to READY state. */
558 *rxq->rq_db = rte_cpu_to_be_32(0);
560 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
562 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
567 /* Reinitialize RQ - set WQEs. */
568 mlx5_rxq_initialize(rxq);
569 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
570 /* Set actual queue state. */
571 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
576 * Rx queue start. Device queue goes to the ready state,
577 * all required mbufs are allocated and WQ is replenished.
580 * Pointer to Ethernet device structure.
585 * 0 on success, a negative errno value otherwise and rte_errno is set.
588 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
592 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
593 DRV_LOG(ERR, "Hairpin queue can't be started");
597 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
599 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
600 ret = mlx5_mp_os_req_queue_control(dev, idx,
601 MLX5_MP_REQ_QUEUE_RX_START);
603 ret = mlx5_rx_queue_start_primary(dev, idx);
609 * Rx queue presetup checks.
612 * Pointer to Ethernet device structure.
616 * Number of descriptors to configure in queue.
619 * 0 on success, a negative errno value otherwise and rte_errno is set.
622 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
624 struct mlx5_priv *priv = dev->data->dev_private;
626 if (!rte_is_power_of_2(*desc)) {
627 *desc = 1 << log2above(*desc);
629 "port %u increased number of descriptors in Rx queue %u"
630 " to the next power of two (%d)",
631 dev->data->port_id, idx, *desc);
633 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
634 dev->data->port_id, idx, *desc);
635 if (idx >= priv->rxqs_n) {
636 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
637 dev->data->port_id, idx, priv->rxqs_n);
638 rte_errno = EOVERFLOW;
641 if (!mlx5_rxq_releasable(dev, idx)) {
642 DRV_LOG(ERR, "port %u unable to release queue index %u",
643 dev->data->port_id, idx);
647 mlx5_rxq_release(dev, idx);
654 * Pointer to Ethernet device structure.
658 * Number of descriptors to configure in queue.
660 * NUMA socket on which memory must be allocated.
662 * Thresholds parameters.
664 * Memory pool for buffer allocations.
667 * 0 on success, a negative errno value otherwise and rte_errno is set.
670 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
671 unsigned int socket, const struct rte_eth_rxconf *conf,
672 struct rte_mempool *mp)
674 struct mlx5_priv *priv = dev->data->dev_private;
675 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
676 struct mlx5_rxq_ctrl *rxq_ctrl =
677 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
678 struct rte_eth_rxseg_split *rx_seg =
679 (struct rte_eth_rxseg_split *)conf->rx_seg;
680 struct rte_eth_rxseg_split rx_single = {.mp = mp};
681 uint16_t n_seg = conf->rx_nseg;
686 * The parameters should be checked on rte_eth_dev layer.
687 * If mp is specified it means the compatible configuration
688 * without buffer split feature tuning.
694 uint64_t offloads = conf->offloads |
695 dev->data->dev_conf.rxmode.offloads;
697 /* The offloads should be checked on rte_eth_dev layer. */
698 MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
699 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
700 DRV_LOG(ERR, "port %u queue index %u split "
701 "offload not configured",
702 dev->data->port_id, idx);
706 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
708 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
711 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
713 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
714 dev->data->port_id, idx);
718 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
719 dev->data->port_id, idx);
720 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
727 * Pointer to Ethernet device structure.
731 * Number of descriptors to configure in queue.
732 * @param hairpin_conf
733 * Hairpin configuration parameters.
736 * 0 on success, a negative errno value otherwise and rte_errno is set.
739 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
741 const struct rte_eth_hairpin_conf *hairpin_conf)
743 struct mlx5_priv *priv = dev->data->dev_private;
744 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
745 struct mlx5_rxq_ctrl *rxq_ctrl =
746 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
749 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
752 if (hairpin_conf->peer_count != 1) {
754 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
755 " peer count is %u", dev->data->port_id,
756 idx, hairpin_conf->peer_count);
759 if (hairpin_conf->peers[0].port == dev->data->port_id) {
760 if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
762 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
763 " index %u, Tx %u is larger than %u",
764 dev->data->port_id, idx,
765 hairpin_conf->peers[0].queue, priv->txqs_n);
769 if (hairpin_conf->manual_bind == 0 ||
770 hairpin_conf->tx_explicit == 0) {
772 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
773 " index %u peer port %u with attributes %u %u",
774 dev->data->port_id, idx,
775 hairpin_conf->peers[0].port,
776 hairpin_conf->manual_bind,
777 hairpin_conf->tx_explicit);
781 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
783 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
784 dev->data->port_id, idx);
788 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
789 dev->data->port_id, idx);
790 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
795 * DPDK callback to release a RX queue.
798 * Pointer to Ethernet device structure.
800 * Receive queue index.
803 mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
805 struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
809 if (!mlx5_rxq_releasable(dev, qid))
810 rte_panic("port %u Rx queue %u is still used by a flow and"
811 " cannot be removed\n", dev->data->port_id, qid);
812 mlx5_rxq_release(dev, qid);
816 * Allocate queue vector and fill epoll fd list for Rx interrupts.
819 * Pointer to Ethernet device.
822 * 0 on success, a negative errno value otherwise and rte_errno is set.
825 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
827 struct mlx5_priv *priv = dev->data->dev_private;
829 unsigned int rxqs_n = priv->rxqs_n;
830 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
831 unsigned int count = 0;
832 struct rte_intr_handle *intr_handle = dev->intr_handle;
834 if (!dev->data->dev_conf.intr_conf.rxq)
836 mlx5_rx_intr_vec_disable(dev);
837 intr_handle->intr_vec = mlx5_malloc(0,
838 n * sizeof(intr_handle->intr_vec[0]),
840 if (intr_handle->intr_vec == NULL) {
842 "port %u failed to allocate memory for interrupt"
843 " vector, Rx interrupts will not be supported",
848 intr_handle->type = RTE_INTR_HANDLE_EXT;
849 for (i = 0; i != n; ++i) {
850 /* This rxq obj must not be released in this function. */
851 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
852 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
855 /* Skip queues that cannot request interrupts. */
856 if (!rxq_obj || (!rxq_obj->ibv_channel &&
857 !rxq_obj->devx_channel)) {
858 /* Use invalid intr_vec[] index to disable entry. */
859 intr_handle->intr_vec[i] =
860 RTE_INTR_VEC_RXTX_OFFSET +
861 RTE_MAX_RXTX_INTR_VEC_ID;
862 /* Decrease the rxq_ctrl's refcnt */
864 mlx5_rxq_release(dev, i);
867 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
869 "port %u too many Rx queues for interrupt"
870 " vector size (%d), Rx interrupts cannot be"
872 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
873 mlx5_rx_intr_vec_disable(dev);
877 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
881 "port %u failed to make Rx interrupt file"
882 " descriptor %d non-blocking for queue index"
884 dev->data->port_id, rxq_obj->fd, i);
885 mlx5_rx_intr_vec_disable(dev);
888 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
889 intr_handle->efds[count] = rxq_obj->fd;
893 mlx5_rx_intr_vec_disable(dev);
895 intr_handle->nb_efd = count;
900 * Clean up Rx interrupts handler.
903 * Pointer to Ethernet device.
906 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
908 struct mlx5_priv *priv = dev->data->dev_private;
909 struct rte_intr_handle *intr_handle = dev->intr_handle;
911 unsigned int rxqs_n = priv->rxqs_n;
912 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
914 if (!dev->data->dev_conf.intr_conf.rxq)
916 if (!intr_handle->intr_vec)
918 for (i = 0; i != n; ++i) {
919 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
920 RTE_MAX_RXTX_INTR_VEC_ID)
923 * Need to access directly the queue to release the reference
924 * kept in mlx5_rx_intr_vec_enable().
926 mlx5_rxq_release(dev, i);
929 rte_intr_free_epoll_fd(intr_handle);
930 if (intr_handle->intr_vec)
931 mlx5_free(intr_handle->intr_vec);
932 intr_handle->nb_efd = 0;
933 intr_handle->intr_vec = NULL;
937 * MLX5 CQ notification .
940 * Pointer to receive queue structure.
942 * Sequence number per receive queue .
945 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
948 uint32_t doorbell_hi;
950 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
952 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
953 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
954 doorbell = (uint64_t)doorbell_hi << 32;
955 doorbell |= rxq->cqn;
956 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
957 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
958 cq_db_reg, rxq->uar_lock_cq);
962 * DPDK callback for Rx queue interrupt enable.
965 * Pointer to Ethernet device structure.
970 * 0 on success, a negative errno value otherwise and rte_errno is set.
973 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
975 struct mlx5_rxq_ctrl *rxq_ctrl;
977 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
981 if (!rxq_ctrl->obj) {
982 mlx5_rxq_release(dev, rx_queue_id);
985 mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
987 mlx5_rxq_release(dev, rx_queue_id);
995 * DPDK callback for Rx queue interrupt disable.
998 * Pointer to Ethernet device structure.
1003 * 0 on success, a negative errno value otherwise and rte_errno is set.
1006 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1008 struct mlx5_priv *priv = dev->data->dev_private;
1009 struct mlx5_rxq_ctrl *rxq_ctrl;
1012 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1019 if (rxq_ctrl->irq) {
1020 ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
1023 rxq_ctrl->rxq.cq_arm_sn++;
1025 mlx5_rxq_release(dev, rx_queue_id);
1029 * The ret variable may be EAGAIN which means the get_event function was
1030 * called before receiving one.
1036 ret = rte_errno; /* Save rte_errno before cleanup. */
1037 mlx5_rxq_release(dev, rx_queue_id);
1039 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1040 dev->data->port_id, rx_queue_id);
1041 rte_errno = ret; /* Restore rte_errno. */
1046 * Verify the Rx queue objects list is empty
1049 * Pointer to Ethernet device.
1052 * The number of objects not released.
1055 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1057 struct mlx5_priv *priv = dev->data->dev_private;
1059 struct mlx5_rxq_obj *rxq_obj;
1061 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1062 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1063 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1070 * Callback function to initialize mbufs for Multi-Packet RQ.
1073 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1074 void *_m, unsigned int i __rte_unused)
1076 struct mlx5_mprq_buf *buf = _m;
1077 struct rte_mbuf_ext_shared_info *shinfo;
1078 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1081 memset(_m, 0, sizeof(*buf));
1083 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1084 for (j = 0; j != strd_n; ++j) {
1085 shinfo = &buf->shinfos[j];
1086 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1087 shinfo->fcb_opaque = buf;
1092 * Free mempool of Multi-Packet RQ.
1095 * Pointer to Ethernet device.
1098 * 0 on success, negative errno value on failure.
1101 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1103 struct mlx5_priv *priv = dev->data->dev_private;
1104 struct rte_mempool *mp = priv->mprq_mp;
1109 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1110 dev->data->port_id, mp->name);
1112 * If a buffer in the pool has been externally attached to a mbuf and it
1113 * is still in use by application, destroying the Rx queue can spoil
1114 * the packet. It is unlikely to happen but if application dynamically
1115 * creates and destroys with holding Rx packets, this can happen.
1117 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1118 * RQ isn't provided by application but managed by PMD.
1120 if (!rte_mempool_full(mp)) {
1122 "port %u mempool for Multi-Packet RQ is still in use",
1123 dev->data->port_id);
1127 rte_mempool_free(mp);
1128 /* Unset mempool for each Rx queue. */
1129 for (i = 0; i != priv->rxqs_n; ++i) {
1130 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1134 rxq->mprq_mp = NULL;
1136 priv->mprq_mp = NULL;
1141 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1142 * mempool. If already allocated, reuse it if there're enough elements.
1143 * Otherwise, resize it.
1146 * Pointer to Ethernet device.
1149 * 0 on success, negative errno value on failure.
1152 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1154 struct mlx5_priv *priv = dev->data->dev_private;
1155 struct rte_mempool *mp = priv->mprq_mp;
1156 char name[RTE_MEMPOOL_NAMESIZE];
1157 unsigned int desc = 0;
1158 unsigned int buf_len;
1159 unsigned int obj_num;
1160 unsigned int obj_size;
1161 unsigned int strd_num_n = 0;
1162 unsigned int strd_sz_n = 0;
1164 unsigned int n_ibv = 0;
1166 if (!mlx5_mprq_enabled(dev))
1168 /* Count the total number of descriptors configured. */
1169 for (i = 0; i != priv->rxqs_n; ++i) {
1170 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1171 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1172 (rxq, struct mlx5_rxq_ctrl, rxq);
1174 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1177 desc += 1 << rxq->elts_n;
1178 /* Get the max number of strides. */
1179 if (strd_num_n < rxq->strd_num_n)
1180 strd_num_n = rxq->strd_num_n;
1181 /* Get the max size of a stride. */
1182 if (strd_sz_n < rxq->strd_sz_n)
1183 strd_sz_n = rxq->strd_sz_n;
1185 MLX5_ASSERT(strd_num_n && strd_sz_n);
1186 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1187 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1188 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1190 * Received packets can be either memcpy'd or externally referenced. In
1191 * case that the packet is attached to an mbuf as an external buffer, as
1192 * it isn't possible to predict how the buffers will be queued by
1193 * application, there's no option to exactly pre-allocate needed buffers
1194 * in advance but to speculatively prepares enough buffers.
1196 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1197 * received packets to buffers provided by application (rxq->mp) until
1198 * this Mempool gets available again.
1201 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1203 * rte_mempool_create_empty() has sanity check to refuse large cache
1204 * size compared to the number of elements.
1205 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1206 * constant number 2 instead.
1208 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1209 /* Check a mempool is already allocated and if it can be resued. */
1210 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1211 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1212 dev->data->port_id, mp->name);
1215 } else if (mp != NULL) {
1216 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1217 dev->data->port_id, mp->name);
1219 * If failed to free, which means it may be still in use, no way
1220 * but to keep using the existing one. On buffer underrun,
1221 * packets will be memcpy'd instead of external buffer
1224 if (mlx5_mprq_free_mp(dev)) {
1225 if (mp->elt_size >= obj_size)
1231 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1232 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1233 0, NULL, NULL, mlx5_mprq_buf_init,
1234 (void *)((uintptr_t)1 << strd_num_n),
1235 dev->device->numa_node, 0);
1238 "port %u failed to allocate a mempool for"
1239 " Multi-Packet RQ, count=%u, size=%u",
1240 dev->data->port_id, obj_num, obj_size);
1246 /* Set mempool for each Rx queue. */
1247 for (i = 0; i != priv->rxqs_n; ++i) {
1248 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1249 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1250 (rxq, struct mlx5_rxq_ctrl, rxq);
1252 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1256 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1257 dev->data->port_id);
1261 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1262 sizeof(struct rte_vlan_hdr) * 2 + \
1263 sizeof(struct rte_ipv6_hdr)))
1264 #define MAX_TCP_OPTION_SIZE 40u
1265 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1266 sizeof(struct rte_tcp_hdr) + \
1267 MAX_TCP_OPTION_SIZE))
1270 * Adjust the maximum LRO massage size.
1273 * Pointer to Ethernet device.
1276 * @param max_lro_size
1277 * The maximum size for LRO packet.
1280 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1281 uint32_t max_lro_size)
1283 struct mlx5_priv *priv = dev->data->dev_private;
1285 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1286 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1287 MLX5_MAX_TCP_HDR_OFFSET)
1288 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1289 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1290 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1291 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1292 if (priv->max_lro_msg_size)
1293 priv->max_lro_msg_size =
1294 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1296 priv->max_lro_msg_size = max_lro_size;
1298 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1299 dev->data->port_id, idx,
1300 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1304 * Create a DPDK Rx queue.
1307 * Pointer to Ethernet device.
1311 * Number of descriptors to configure in queue.
1313 * NUMA socket on which memory must be allocated.
1316 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1318 struct mlx5_rxq_ctrl *
1319 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1320 unsigned int socket, const struct rte_eth_rxconf *conf,
1321 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1323 struct mlx5_priv *priv = dev->data->dev_private;
1324 struct mlx5_rxq_ctrl *tmpl;
1325 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1326 struct mlx5_dev_config *config = &priv->config;
1327 uint64_t offloads = conf->offloads |
1328 dev->data->dev_conf.rxmode.offloads;
1329 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1330 unsigned int max_rx_pkt_len = lro_on_queue ?
1331 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1332 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1333 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1334 RTE_PKTMBUF_HEADROOM;
1335 unsigned int max_lro_size = 0;
1336 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1337 const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
1338 !rx_seg[0].offset && !rx_seg[0].length;
1339 unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1340 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1341 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1342 (1U << config->mprq.max_stride_size_n) ?
1343 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1344 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1345 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1346 (config->mprq.stride_size_n ?
1347 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1349 * Always allocate extra slots, even if eventually
1350 * the vector Rx will not be used.
1352 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1353 const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1354 unsigned int tail_len;
1356 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1357 sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
1359 (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
1365 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1367 * Build the array of actual buffer offsets and lengths.
1368 * Pad with the buffers from the last memory pool if
1369 * needed to handle max size packets, replace zero length
1370 * with the buffer length from the pool.
1372 tail_len = max_rx_pkt_len;
1374 struct mlx5_eth_rxseg *hw_seg =
1375 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1376 uint32_t buf_len, offset, seg_len;
1379 * For the buffers beyond descriptions offset is zero,
1380 * the first buffer contains head room.
1382 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1383 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1384 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1386 * For the buffers beyond descriptions the length is
1387 * pool buffer length, zero lengths are replaced with
1388 * pool buffer length either.
1390 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1394 /* Check is done in long int, now overflows. */
1395 if (buf_len < seg_len + offset) {
1396 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1397 "%u/%u can't be satisfied",
1398 dev->data->port_id, idx,
1399 qs_seg->length, qs_seg->offset);
1403 if (seg_len > tail_len)
1404 seg_len = buf_len - offset;
1405 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1407 "port %u too many SGEs (%u) needed to handle"
1408 " requested maximum packet size %u, the maximum"
1409 " supported are %u", dev->data->port_id,
1410 tmpl->rxq.rxseg_n, max_rx_pkt_len,
1412 rte_errno = ENOTSUP;
1415 /* Build the actual scattering element in the queue object. */
1416 hw_seg->mp = qs_seg->mp;
1417 MLX5_ASSERT(offset <= UINT16_MAX);
1418 MLX5_ASSERT(seg_len <= UINT16_MAX);
1419 hw_seg->offset = (uint16_t)offset;
1420 hw_seg->length = (uint16_t)seg_len;
1422 * Advance the segment descriptor, the padding is the based
1423 * on the attributes of the last descriptor.
1425 if (tmpl->rxq.rxseg_n < n_seg)
1427 tail_len -= RTE_MIN(tail_len, seg_len);
1428 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1429 MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1430 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1431 if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
1432 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1433 " configured and no enough mbuf space(%u) to contain "
1434 "the maximum RX packet length(%u) with head-room(%u)",
1435 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1436 RTE_PKTMBUF_HEADROOM);
1440 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1441 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1442 MLX5_MR_BTREE_CACHE_N, socket)) {
1443 /* rte_errno is already set. */
1446 tmpl->socket = socket;
1447 if (dev->data->dev_conf.intr_conf.rxq)
1450 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1451 * following conditions are met:
1452 * - MPRQ is enabled.
1453 * - The number of descs is more than the number of strides.
1454 * - max_rx_pkt_len plus overhead is less than the max size
1455 * of a stride or mprq_stride_size is specified by a user.
1456 * Need to make sure that there are enough strides to encap
1457 * the maximum packet size in case mprq_stride_size is set.
1458 * Otherwise, enable Rx scatter if necessary.
1460 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1461 (non_scatter_min_mbuf_size <=
1462 (1U << config->mprq.max_stride_size_n) ||
1463 (config->mprq.stride_size_n &&
1464 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1465 /* TODO: Rx scatter isn't supported yet. */
1466 tmpl->rxq.sges_n = 0;
1467 /* Trim the number of descs needed. */
1468 desc >>= mprq_stride_nums;
1469 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1470 config->mprq.stride_num_n : mprq_stride_nums;
1471 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1472 config->mprq.stride_size_n : mprq_stride_size;
1473 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1474 tmpl->rxq.strd_scatter_en =
1475 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1476 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1477 config->mprq.max_memcpy_len);
1478 max_lro_size = RTE_MIN(max_rx_pkt_len,
1479 (1u << tmpl->rxq.strd_num_n) *
1480 (1u << tmpl->rxq.strd_sz_n));
1482 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1483 " strd_num_n = %u, strd_sz_n = %u",
1484 dev->data->port_id, idx,
1485 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1486 } else if (tmpl->rxq.rxseg_n == 1) {
1487 MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
1488 tmpl->rxq.sges_n = 0;
1489 max_lro_size = max_rx_pkt_len;
1490 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1491 unsigned int sges_n;
1493 if (lro_on_queue && first_mb_free_size <
1494 MLX5_MAX_LRO_HEADER_FIX) {
1495 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1496 " to include the max header size(%u) for LRO",
1497 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1498 rte_errno = ENOTSUP;
1502 * Determine the number of SGEs needed for a full packet
1503 * and round it to the next power of two.
1505 sges_n = log2above(tmpl->rxq.rxseg_n);
1506 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1508 "port %u too many SGEs (%u) needed to handle"
1509 " requested maximum packet size %u, the maximum"
1510 " supported are %u", dev->data->port_id,
1511 1 << sges_n, max_rx_pkt_len,
1512 1u << MLX5_MAX_LOG_RQ_SEGS);
1513 rte_errno = ENOTSUP;
1516 tmpl->rxq.sges_n = sges_n;
1517 max_lro_size = max_rx_pkt_len;
1519 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1521 "port %u MPRQ is requested but cannot be enabled\n"
1522 " (requested: pkt_sz = %u, desc_num = %u,"
1523 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1524 " supported: min_rxqs_num = %u,"
1525 " min_stride_sz = %u, max_stride_sz = %u).",
1526 dev->data->port_id, non_scatter_min_mbuf_size,
1528 config->mprq.stride_size_n ?
1529 (1U << config->mprq.stride_size_n) :
1530 (1U << mprq_stride_size),
1531 config->mprq.stride_num_n ?
1532 (1U << config->mprq.stride_num_n) :
1533 (1U << mprq_stride_nums),
1534 config->mprq.min_rxqs_num,
1535 (1U << config->mprq.min_stride_size_n),
1536 (1U << config->mprq.max_stride_size_n));
1537 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1538 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1539 if (desc % (1 << tmpl->rxq.sges_n)) {
1541 "port %u number of Rx queue descriptors (%u) is not a"
1542 " multiple of SGEs per packet (%u)",
1545 1 << tmpl->rxq.sges_n);
1549 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1550 /* Toggle RX checksum offload if hardware supports it. */
1551 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1552 /* Configure Rx timestamp. */
1553 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1554 tmpl->rxq.timestamp_rx_flag = 0;
1555 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1556 &tmpl->rxq.timestamp_offset,
1557 &tmpl->rxq.timestamp_rx_flag) != 0) {
1558 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1561 /* Configure VLAN stripping. */
1562 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1563 /* By default, FCS (CRC) is stripped by hardware. */
1564 tmpl->rxq.crc_present = 0;
1565 tmpl->rxq.lro = lro_on_queue;
1566 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1567 if (config->hw_fcs_strip) {
1569 * RQs used for LRO-enabled TIRs should not be
1570 * configured to scatter the FCS.
1574 "port %u CRC stripping has been "
1575 "disabled but will still be performed "
1576 "by hardware, because LRO is enabled",
1577 dev->data->port_id);
1579 tmpl->rxq.crc_present = 1;
1582 "port %u CRC stripping has been disabled but will"
1583 " still be performed by hardware, make sure MLNX_OFED"
1584 " and firmware are up to date",
1585 dev->data->port_id);
1589 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1590 " incoming frames to hide it",
1592 tmpl->rxq.crc_present ? "disabled" : "enabled",
1593 tmpl->rxq.crc_present << 2);
1595 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1596 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1597 tmpl->rxq.port_id = dev->data->port_id;
1599 tmpl->rxq.mp = rx_seg[0].mp;
1600 tmpl->rxq.elts_n = log2above(desc);
1601 tmpl->rxq.rq_repl_thresh =
1602 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1604 (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1605 tmpl->rxq.mprq_bufs =
1606 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1608 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1610 tmpl->rxq.idx = idx;
1611 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1612 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1615 mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
1621 * Create a DPDK Rx hairpin queue.
1624 * Pointer to Ethernet device.
1628 * Number of descriptors to configure in queue.
1629 * @param hairpin_conf
1630 * The hairpin binding configuration.
1633 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1635 struct mlx5_rxq_ctrl *
1636 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1637 const struct rte_eth_hairpin_conf *hairpin_conf)
1639 struct mlx5_priv *priv = dev->data->dev_private;
1640 struct mlx5_rxq_ctrl *tmpl;
1642 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1648 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1649 tmpl->socket = SOCKET_ID_ANY;
1650 tmpl->rxq.rss_hash = 0;
1651 tmpl->rxq.port_id = dev->data->port_id;
1653 tmpl->rxq.mp = NULL;
1654 tmpl->rxq.elts_n = log2above(desc);
1655 tmpl->rxq.elts = NULL;
1656 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1657 tmpl->hairpin_conf = *hairpin_conf;
1658 tmpl->rxq.idx = idx;
1659 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1660 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1668 * Pointer to Ethernet device.
1673 * A pointer to the queue if it exists, NULL otherwise.
1675 struct mlx5_rxq_ctrl *
1676 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1678 struct mlx5_priv *priv = dev->data->dev_private;
1679 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1680 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1683 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1684 __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
1690 * Release a Rx queue.
1693 * Pointer to Ethernet device.
1698 * 1 while a reference on it exists, 0 when freed.
1701 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1703 struct mlx5_priv *priv = dev->data->dev_private;
1704 struct mlx5_rxq_ctrl *rxq_ctrl;
1706 if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
1708 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1709 if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1711 if (rxq_ctrl->obj) {
1712 priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
1713 LIST_REMOVE(rxq_ctrl->obj, next);
1714 mlx5_free(rxq_ctrl->obj);
1715 rxq_ctrl->obj = NULL;
1717 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
1718 rxq_free_elts(rxq_ctrl);
1719 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1721 if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1722 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1723 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1724 LIST_REMOVE(rxq_ctrl, next);
1725 mlx5_free(rxq_ctrl);
1726 (*priv->rxqs)[idx] = NULL;
1732 * Verify the Rx Queue list is empty
1735 * Pointer to Ethernet device.
1738 * The number of object not released.
1741 mlx5_rxq_verify(struct rte_eth_dev *dev)
1743 struct mlx5_priv *priv = dev->data->dev_private;
1744 struct mlx5_rxq_ctrl *rxq_ctrl;
1747 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1748 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1749 dev->data->port_id, rxq_ctrl->rxq.idx);
1756 * Get a Rx queue type.
1759 * Pointer to Ethernet device.
1764 * The Rx queue type.
1767 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1769 struct mlx5_priv *priv = dev->data->dev_private;
1770 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1772 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1773 rxq_ctrl = container_of((*priv->rxqs)[idx],
1774 struct mlx5_rxq_ctrl,
1776 return rxq_ctrl->type;
1778 return MLX5_RXQ_TYPE_UNDEFINED;
1782 * Get a Rx hairpin queue configuration.
1785 * Pointer to Ethernet device.
1790 * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
1792 const struct rte_eth_hairpin_conf *
1793 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
1795 struct mlx5_priv *priv = dev->data->dev_private;
1796 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1798 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1799 rxq_ctrl = container_of((*priv->rxqs)[idx],
1800 struct mlx5_rxq_ctrl,
1802 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
1803 return &rxq_ctrl->hairpin_conf;
1809 * Match queues listed in arguments to queues contained in indirection table
1813 * Pointer to indirection table to match.
1815 * Queues to match to ques in indirection table.
1817 * Number of queues in the array.
1820 * 1 if all queues in indirection table match 0 othrwise.
1823 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
1824 const uint16_t *queues, uint32_t queues_n)
1826 return (ind_tbl->queues_n == queues_n) &&
1827 (!memcmp(ind_tbl->queues, queues,
1828 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
1832 * Get an indirection table.
1835 * Pointer to Ethernet device.
1837 * Queues entering in the indirection table.
1839 * Number of queues in the array.
1842 * An indirection table if found.
1844 struct mlx5_ind_table_obj *
1845 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1848 struct mlx5_priv *priv = dev->data->dev_private;
1849 struct mlx5_ind_table_obj *ind_tbl;
1851 rte_rwlock_read_lock(&priv->ind_tbls_lock);
1852 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1853 if ((ind_tbl->queues_n == queues_n) &&
1854 (memcmp(ind_tbl->queues, queues,
1855 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1857 __atomic_fetch_add(&ind_tbl->refcnt, 1,
1862 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
1867 * Release an indirection table.
1870 * Pointer to Ethernet device.
1872 * Indirection table to release.
1874 * Indirection table for Standalone queue.
1877 * 1 while a reference on it exists, 0 when freed.
1880 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1881 struct mlx5_ind_table_obj *ind_tbl,
1884 struct mlx5_priv *priv = dev->data->dev_private;
1885 unsigned int i, ret;
1887 rte_rwlock_write_lock(&priv->ind_tbls_lock);
1888 ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
1889 if (!ret && !standalone)
1890 LIST_REMOVE(ind_tbl, next);
1891 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
1894 priv->obj_ops.ind_table_destroy(ind_tbl);
1895 for (i = 0; i != ind_tbl->queues_n; ++i)
1896 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1902 * Verify the Rx Queue list is empty
1905 * Pointer to Ethernet device.
1908 * The number of object not released.
1911 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1913 struct mlx5_priv *priv = dev->data->dev_private;
1914 struct mlx5_ind_table_obj *ind_tbl;
1917 rte_rwlock_read_lock(&priv->ind_tbls_lock);
1918 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1920 "port %u indirection table obj %p still referenced",
1921 dev->data->port_id, (void *)ind_tbl);
1924 rte_rwlock_read_unlock(&priv->ind_tbls_lock);
1929 * Setup an indirection table structure fields.
1932 * Pointer to Ethernet device.
1934 * Indirection table to modify.
1937 * 0 on success, a negative errno value otherwise and rte_errno is set.
1940 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
1941 struct mlx5_ind_table_obj *ind_tbl)
1943 struct mlx5_priv *priv = dev->data->dev_private;
1944 uint32_t queues_n = ind_tbl->queues_n;
1945 uint16_t *queues = ind_tbl->queues;
1948 const unsigned int n = rte_is_power_of_2(queues_n) ?
1949 log2above(queues_n) :
1950 log2above(priv->config.ind_table_max_size);
1952 for (i = 0; i != queues_n; ++i) {
1953 if (!mlx5_rxq_get(dev, queues[i])) {
1958 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
1961 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
1965 for (j = 0; j < i; j++)
1966 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1968 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
1969 dev->data->port_id);
1974 * Create an indirection table.
1977 * Pointer to Ethernet device.
1979 * Queues entering in the indirection table.
1981 * Number of queues in the array.
1983 * Indirection table for Standalone queue.
1986 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
1988 static struct mlx5_ind_table_obj *
1989 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1990 uint32_t queues_n, bool standalone)
1992 struct mlx5_priv *priv = dev->data->dev_private;
1993 struct mlx5_ind_table_obj *ind_tbl;
1996 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
1997 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2002 ind_tbl->queues_n = queues_n;
2003 ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2004 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2005 ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
2011 rte_rwlock_write_lock(&priv->ind_tbls_lock);
2012 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2013 rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2019 * Modify an indirection table.
2022 * Pointer to Ethernet device.
2024 * Indirection table to modify.
2026 * Queues replacement for the indirection table.
2028 * Number of queues in the array.
2030 * Indirection table for Standalone queue.
2033 * 0 on success, a negative errno value otherwise and rte_errno is set.
2036 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2037 struct mlx5_ind_table_obj *ind_tbl,
2038 uint16_t *queues, const uint32_t queues_n,
2041 struct mlx5_priv *priv = dev->data->dev_private;
2044 const unsigned int n = rte_is_power_of_2(queues_n) ?
2045 log2above(queues_n) :
2046 log2above(priv->config.ind_table_max_size);
2048 MLX5_ASSERT(standalone);
2049 RTE_SET_USED(standalone);
2050 if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {
2052 * Modification of indirection ntables having more than 1
2053 * reference unsupported. Intended for standalone indirection
2057 "Port %u cannot modify indirection table (refcnt> 1).",
2058 dev->data->port_id);
2062 for (i = 0; i != queues_n; ++i) {
2063 if (!mlx5_rxq_get(dev, queues[i])) {
2068 MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2069 ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
2072 for (j = 0; j < ind_tbl->queues_n; j++)
2073 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2074 ind_tbl->queues_n = queues_n;
2075 ind_tbl->queues = queues;
2079 for (j = 0; j < i; j++)
2080 mlx5_rxq_release(dev, queues[j]);
2082 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2083 dev->data->port_id);
2088 mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
2091 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2092 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2093 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2095 return (hrxq->rss_key_len != rss_desc->key_len ||
2096 memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
2097 hrxq->hash_fields != rss_desc->hash_fields ||
2098 hrxq->ind_table->queues_n != rss_desc->queue_num ||
2099 memcmp(hrxq->ind_table->queues, rss_desc->queue,
2100 rss_desc->queue_num * sizeof(rss_desc->queue[0])));
2104 * Modify an Rx Hash queue configuration.
2107 * Pointer to Ethernet device.
2109 * Index to Hash Rx queue to modify.
2111 * RSS key for the Rx hash queue.
2112 * @param rss_key_len
2114 * @param hash_fields
2115 * Verbs protocol hash field to make the RSS on.
2117 * Queues entering in hash queue. In case of empty hash_fields only the
2118 * first queue index will be taken for the indirection table.
2123 * 0 on success, a negative errno value otherwise and rte_errno is set.
2126 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2127 const uint8_t *rss_key, uint32_t rss_key_len,
2128 uint64_t hash_fields,
2129 const uint16_t *queues, uint32_t queues_n)
2132 struct mlx5_ind_table_obj *ind_tbl = NULL;
2133 struct mlx5_priv *priv = dev->data->dev_private;
2134 struct mlx5_hrxq *hrxq =
2135 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2143 if (hrxq->rss_key_len != rss_key_len) {
2144 /* rss_key_len is fixed size 40 byte & not supposed to change */
2148 queues_n = hash_fields ? queues_n : 1;
2149 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2150 queues, queues_n)) {
2151 ind_tbl = hrxq->ind_table;
2153 if (hrxq->standalone) {
2155 * Replacement of indirection table unsupported for
2156 * stanalone hrxq objects (used by shared RSS).
2158 rte_errno = ENOTSUP;
2161 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2163 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2170 MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2171 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2172 hash_fields, ind_tbl);
2177 if (ind_tbl != hrxq->ind_table) {
2178 MLX5_ASSERT(!hrxq->standalone);
2179 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2181 hrxq->ind_table = ind_tbl;
2183 hrxq->hash_fields = hash_fields;
2184 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2188 if (ind_tbl != hrxq->ind_table) {
2189 MLX5_ASSERT(!hrxq->standalone);
2190 mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
2197 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2199 struct mlx5_priv *priv = dev->data->dev_private;
2201 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2202 mlx5_glue->destroy_flow_action(hrxq->action);
2204 priv->obj_ops.hrxq_destroy(hrxq);
2205 if (!hrxq->standalone) {
2206 mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2209 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2213 * Release the hash Rx queue.
2216 * Pointer to Ethernet device.
2218 * Index to Hash Rx queue to release.
2221 * mlx5 list pointer.
2223 * Hash queue entry pointer.
2226 mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2228 struct rte_eth_dev *dev = tool_ctx;
2229 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2231 __mlx5_hrxq_remove(dev, hrxq);
2234 static struct mlx5_hrxq *
2235 __mlx5_hrxq_create(struct rte_eth_dev *dev,
2236 struct mlx5_flow_rss_desc *rss_desc)
2238 struct mlx5_priv *priv = dev->data->dev_private;
2239 const uint8_t *rss_key = rss_desc->key;
2240 uint32_t rss_key_len = rss_desc->key_len;
2241 bool standalone = !!rss_desc->shared_rss;
2242 const uint16_t *queues =
2243 standalone ? rss_desc->const_q : rss_desc->queue;
2244 uint32_t queues_n = rss_desc->queue_num;
2245 struct mlx5_hrxq *hrxq = NULL;
2246 uint32_t hrxq_idx = 0;
2247 struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
2250 queues_n = rss_desc->hash_fields ? queues_n : 1;
2252 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2254 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2258 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2261 hrxq->standalone = standalone;
2262 hrxq->idx = hrxq_idx;
2263 hrxq->ind_table = ind_tbl;
2264 hrxq->rss_key_len = rss_key_len;
2265 hrxq->hash_fields = rss_desc->hash_fields;
2266 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2267 ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2272 if (!rss_desc->ind_tbl)
2273 mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
2275 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2279 struct mlx5_list_entry *
2280 mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
2282 struct rte_eth_dev *dev = tool_ctx;
2283 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2284 struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2285 struct mlx5_hrxq *hrxq;
2287 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2288 return hrxq ? &hrxq->entry : NULL;
2291 struct mlx5_list_entry *
2292 mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2293 void *cb_ctx __rte_unused)
2295 struct rte_eth_dev *dev = tool_ctx;
2296 struct mlx5_priv *priv = dev->data->dev_private;
2297 struct mlx5_hrxq *hrxq;
2298 uint32_t hrxq_idx = 0;
2300 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2303 memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
2304 hrxq->idx = hrxq_idx;
2305 return &hrxq->entry;
2309 mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2311 struct rte_eth_dev *dev = tool_ctx;
2312 struct mlx5_priv *priv = dev->data->dev_private;
2313 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2315 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2319 * Get an Rx Hash queue.
2322 * Pointer to Ethernet device.
2324 * RSS configuration for the Rx hash queue.
2327 * An hash Rx queue index on success.
2329 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
2330 struct mlx5_flow_rss_desc *rss_desc)
2332 struct mlx5_priv *priv = dev->data->dev_private;
2333 struct mlx5_hrxq *hrxq;
2334 struct mlx5_list_entry *entry;
2335 struct mlx5_flow_cb_ctx ctx = {
2339 if (rss_desc->shared_rss) {
2340 hrxq = __mlx5_hrxq_create(dev, rss_desc);
2342 entry = mlx5_list_register(priv->hrxqs, &ctx);
2345 hrxq = container_of(entry, typeof(*hrxq), entry);
2353 * Release the hash Rx queue.
2356 * Pointer to Ethernet device.
2358 * Index to Hash Rx queue to release.
2361 * 1 while a reference on it exists, 0 when freed.
2363 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2365 struct mlx5_priv *priv = dev->data->dev_private;
2366 struct mlx5_hrxq *hrxq;
2368 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2371 if (!hrxq->standalone)
2372 return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
2373 __mlx5_hrxq_remove(dev, hrxq);
2378 * Create a drop Rx Hash queue.
2381 * Pointer to Ethernet device.
2384 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2387 mlx5_drop_action_create(struct rte_eth_dev *dev)
2389 struct mlx5_priv *priv = dev->data->dev_private;
2390 struct mlx5_hrxq *hrxq = NULL;
2393 if (priv->drop_queue.hrxq)
2394 return priv->drop_queue.hrxq;
2395 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2398 "Port %u cannot allocate memory for drop queue.",
2399 dev->data->port_id);
2403 priv->drop_queue.hrxq = hrxq;
2404 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2406 if (!hrxq->ind_table) {
2410 ret = priv->obj_ops.drop_action_create(dev);
2416 if (hrxq->ind_table)
2417 mlx5_free(hrxq->ind_table);
2418 priv->drop_queue.hrxq = NULL;
2425 * Release a drop hash Rx queue.
2428 * Pointer to Ethernet device.
2431 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2433 struct mlx5_priv *priv = dev->data->dev_private;
2434 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2436 if (!priv->drop_queue.hrxq)
2438 priv->obj_ops.drop_action_destroy(dev);
2439 mlx5_free(priv->drop_queue.rxq);
2440 mlx5_free(hrxq->ind_table);
2442 priv->drop_queue.rxq = NULL;
2443 priv->drop_queue.hrxq = NULL;
2447 * Verify the Rx Queue list is empty
2450 * Pointer to Ethernet device.
2453 * The number of object not released.
2456 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2458 struct mlx5_priv *priv = dev->data->dev_private;
2460 return mlx5_list_get_entry_num(priv->hrxqs);
2464 * Set the Rx queue timestamp conversion parameters
2467 * Pointer to the Ethernet device structure.
2470 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2472 struct mlx5_priv *priv = dev->data->dev_private;
2473 struct mlx5_dev_ctx_shared *sh = priv->sh;
2474 struct mlx5_rxq_data *data;
2477 for (i = 0; i != priv->rxqs_n; ++i) {
2478 if (!(*priv->rxqs)[i])
2480 data = (*priv->rxqs)[i];
2482 data->rt_timestamp = priv->config.rt_timestamp;