1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
25 #include "mlx5_defs.h"
27 #include "mlx5_rxtx.h"
28 #include "mlx5_utils.h"
29 #include "mlx5_autoconf.h"
32 /* Default RSS hash key also used for ConnectX-3. */
33 uint8_t rss_hash_default_key[] = {
34 0x2c, 0xc6, 0x81, 0xd1,
35 0x5b, 0xdb, 0xf4, 0xf7,
36 0xfc, 0xa2, 0x83, 0x19,
37 0xdb, 0x1a, 0x3e, 0x94,
38 0x6b, 0x9e, 0x38, 0xd9,
39 0x2c, 0x9c, 0x03, 0xd1,
40 0xad, 0x99, 0x44, 0xa7,
41 0xd9, 0x56, 0x3d, 0x59,
42 0x06, 0x3c, 0x25, 0xf3,
43 0xfc, 0x1f, 0xdc, 0x2a,
46 /* Length of the default RSS hash key. */
47 static_assert(MLX5_RSS_HASH_KEY_LEN ==
48 (unsigned int)sizeof(rss_hash_default_key),
49 "wrong RSS default key size.");
52 * Check whether Multi-Packet RQ can be enabled for the device.
55 * Pointer to Ethernet device.
58 * 1 if supported, negative errno value if not.
61 mlx5_check_mprq_support(struct rte_eth_dev *dev)
63 struct mlx5_priv *priv = dev->data->dev_private;
65 if (priv->config.mprq.enabled &&
66 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
72 * Check whether Multi-Packet RQ is enabled for the Rx queue.
75 * Pointer to receive queue structure.
78 * 0 if disabled, otherwise enabled.
81 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
83 return rxq->strd_num_n > 0;
87 * Check whether Multi-Packet RQ is enabled for the device.
90 * Pointer to Ethernet device.
93 * 0 if disabled, otherwise enabled.
96 mlx5_mprq_enabled(struct rte_eth_dev *dev)
98 struct mlx5_priv *priv = dev->data->dev_private;
103 if (mlx5_check_mprq_support(dev) < 0)
105 /* All the configured queues should be enabled. */
106 for (i = 0; i < priv->rxqs_n; ++i) {
107 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
108 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
109 (rxq, struct mlx5_rxq_ctrl, rxq);
111 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
114 if (mlx5_rxq_mprq_enabled(rxq))
117 /* Multi-Packet RQ can't be partially configured. */
118 MLX5_ASSERT(n == 0 || n == n_ibv);
123 * Calculate the number of CQEs in CQ for the Rx queue.
126 * Pointer to receive queue structure.
129 * Number of CQEs in CQ.
132 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
135 unsigned int wqe_n = 1 << rxq_data->elts_n;
137 if (mlx5_rxq_mprq_enabled(rxq_data))
138 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
145 * Allocate RX queue elements for Multi-Packet RQ.
148 * Pointer to RX queue structure.
151 * 0 on success, a negative errno value otherwise and rte_errno is set.
154 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
156 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
157 unsigned int wqe_n = 1 << rxq->elts_n;
161 /* Iterate on segments. */
162 for (i = 0; i <= wqe_n; ++i) {
163 struct mlx5_mprq_buf *buf;
165 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
166 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
171 (*rxq->mprq_bufs)[i] = buf;
173 rxq->mprq_repl = buf;
176 "port %u MPRQ queue %u allocated and configured %u segments",
177 rxq->port_id, rxq->idx, wqe_n);
180 err = rte_errno; /* Save rte_errno before cleanup. */
182 for (i = 0; (i != wqe_n); ++i) {
183 if ((*rxq->mprq_bufs)[i] != NULL)
184 rte_mempool_put(rxq->mprq_mp,
185 (*rxq->mprq_bufs)[i]);
186 (*rxq->mprq_bufs)[i] = NULL;
188 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
189 rxq->port_id, rxq->idx);
190 rte_errno = err; /* Restore rte_errno. */
195 * Allocate RX queue elements for Single-Packet RQ.
198 * Pointer to RX queue structure.
201 * 0 on success, errno value on failure.
204 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
206 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
207 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
208 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
209 (1 << rxq_ctrl->rxq.elts_n);
213 /* Iterate on segments. */
214 for (i = 0; (i != elts_n); ++i) {
215 struct rte_mbuf *buf;
217 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
219 DRV_LOG(ERR, "port %u empty mbuf pool",
220 PORT_ID(rxq_ctrl->priv));
224 /* Headroom is reserved by rte_pktmbuf_alloc(). */
225 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
226 /* Buffer is supposed to be empty. */
227 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
228 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
229 MLX5_ASSERT(!buf->next);
230 /* Only the first segment keeps headroom. */
232 SET_DATA_OFF(buf, 0);
233 PORT(buf) = rxq_ctrl->rxq.port_id;
234 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
235 PKT_LEN(buf) = DATA_LEN(buf);
237 (*rxq_ctrl->rxq.elts)[i] = buf;
239 /* If Rx vector is activated. */
240 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
241 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
242 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
243 struct rte_pktmbuf_pool_private *priv =
244 (struct rte_pktmbuf_pool_private *)
245 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
248 /* Initialize default rearm_data for vPMD. */
249 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
250 rte_mbuf_refcnt_set(mbuf_init, 1);
251 mbuf_init->nb_segs = 1;
252 mbuf_init->port = rxq->port_id;
253 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
254 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
256 * prevent compiler reordering:
257 * rearm_data covers previous fields.
259 rte_compiler_barrier();
260 rxq->mbuf_initializer =
261 *(rte_xmm_t *)&mbuf_init->rearm_data;
262 /* Padding with a fake mbuf for vectorized Rx. */
263 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
264 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
267 "port %u SPRQ queue %u allocated and configured %u segments"
269 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
270 elts_n / (1 << rxq_ctrl->rxq.sges_n));
273 err = rte_errno; /* Save rte_errno before cleanup. */
275 for (i = 0; (i != elts_n); ++i) {
276 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
277 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
278 (*rxq_ctrl->rxq.elts)[i] = NULL;
280 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
281 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
282 rte_errno = err; /* Restore rte_errno. */
287 * Allocate RX queue elements.
290 * Pointer to RX queue structure.
293 * 0 on success, errno value on failure.
296 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
301 * For MPRQ we need to allocate both MPRQ buffers
302 * for WQEs and simple mbufs for vector processing.
304 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
305 ret = rxq_alloc_elts_mprq(rxq_ctrl);
306 return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
310 * Free RX queue elements for Multi-Packet RQ.
313 * Pointer to RX queue structure.
316 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
318 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
321 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
322 rxq->port_id, rxq->idx, (1u << rxq->elts_n));
323 if (rxq->mprq_bufs == NULL)
325 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
326 if ((*rxq->mprq_bufs)[i] != NULL)
327 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
328 (*rxq->mprq_bufs)[i] = NULL;
330 if (rxq->mprq_repl != NULL) {
331 mlx5_mprq_buf_free(rxq->mprq_repl);
332 rxq->mprq_repl = NULL;
337 * Free RX queue elements for Single-Packet RQ.
340 * Pointer to RX queue structure.
343 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
345 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
346 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
347 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
349 const uint16_t q_mask = q_n - 1;
350 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
353 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
354 PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
355 if (rxq->elts == NULL)
358 * Some mbuf in the Ring belongs to the application.
359 * They cannot be freed.
361 if (mlx5_rxq_check_vec_support(rxq) > 0) {
362 for (i = 0; i < used; ++i)
363 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
364 rxq->rq_pi = rxq->rq_ci;
366 for (i = 0; i != q_n; ++i) {
367 if ((*rxq->elts)[i] != NULL)
368 rte_pktmbuf_free_seg((*rxq->elts)[i]);
369 (*rxq->elts)[i] = NULL;
374 * Free RX queue elements.
377 * Pointer to RX queue structure.
380 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
383 * For MPRQ we need to allocate both MPRQ buffers
384 * for WQEs and simple mbufs for vector processing.
386 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
387 rxq_free_elts_mprq(rxq_ctrl);
388 rxq_free_elts_sprq(rxq_ctrl);
392 * Returns the per-queue supported offloads.
395 * Pointer to Ethernet device.
398 * Supported Rx offloads.
401 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
403 struct mlx5_priv *priv = dev->data->dev_private;
404 struct mlx5_dev_config *config = &priv->config;
405 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
406 DEV_RX_OFFLOAD_TIMESTAMP |
407 DEV_RX_OFFLOAD_JUMBO_FRAME |
408 DEV_RX_OFFLOAD_RSS_HASH);
410 if (config->hw_fcs_strip)
411 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
414 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
415 DEV_RX_OFFLOAD_UDP_CKSUM |
416 DEV_RX_OFFLOAD_TCP_CKSUM);
417 if (config->hw_vlan_strip)
418 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
419 if (MLX5_LRO_SUPPORTED(dev))
420 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
426 * Returns the per-port supported offloads.
429 * Supported Rx offloads.
432 mlx5_get_rx_port_offloads(void)
434 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
440 * Verify if the queue can be released.
443 * Pointer to Ethernet device.
448 * 1 if the queue can be released
449 * 0 if the queue can not be released, there are references to it.
450 * Negative errno and rte_errno is set if queue doesn't exist.
453 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
455 struct mlx5_priv *priv = dev->data->dev_private;
456 struct mlx5_rxq_ctrl *rxq_ctrl;
458 if (!(*priv->rxqs)[idx]) {
462 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
463 return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
467 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
469 rxq_sync_cq(struct mlx5_rxq_data *rxq)
471 const uint16_t cqe_n = 1 << rxq->cqe_n;
472 const uint16_t cqe_mask = cqe_n - 1;
473 volatile struct mlx5_cqe *cqe;
478 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
479 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
480 if (ret == MLX5_CQE_STATUS_HW_OWN)
482 if (ret == MLX5_CQE_STATUS_ERR) {
486 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
487 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
491 /* Compute the next non compressed CQE. */
492 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
495 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
496 for (i = 0; i < cqe_n; i++) {
497 cqe = &(*rxq->cqes)[i];
498 cqe->op_own = MLX5_CQE_INVALIDATE;
500 /* Resync CQE and WQE (WQ in RESET state). */
502 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
504 *rxq->rq_db = rte_cpu_to_be_32(0);
509 * Rx queue stop. Device queue goes to the RESET state,
510 * all involved mbufs are freed from WQ.
513 * Pointer to Ethernet device structure.
518 * 0 on success, a negative errno value otherwise and rte_errno is set.
521 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
523 struct mlx5_priv *priv = dev->data->dev_private;
524 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
525 struct mlx5_rxq_ctrl *rxq_ctrl =
526 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
529 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
530 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
532 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
537 /* Remove all processes CQEs. */
539 /* Free all involved mbufs. */
540 rxq_free_elts(rxq_ctrl);
541 /* Set the actual queue state. */
542 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
547 * Rx queue stop. Device queue goes to the RESET state,
548 * all involved mbufs are freed from WQ.
551 * Pointer to Ethernet device structure.
556 * 0 on success, a negative errno value otherwise and rte_errno is set.
559 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
561 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
564 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
565 DRV_LOG(ERR, "Hairpin queue can't be stopped");
569 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
572 * Vectorized Rx burst requires the CQ and RQ indices
573 * synchronized, that might be broken on RQ restart
574 * and cause Rx malfunction, so queue stopping is
575 * not supported if vectorized Rx burst is engaged.
576 * The routine pointer depends on the process
577 * type, should perform check there.
579 if (pkt_burst == mlx5_rx_burst_vec) {
580 DRV_LOG(ERR, "Rx queue stop is not supported "
581 "for vectorized Rx");
585 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
586 ret = mlx5_mp_os_req_queue_control(dev, idx,
587 MLX5_MP_REQ_QUEUE_RX_STOP);
589 ret = mlx5_rx_queue_stop_primary(dev, idx);
595 * Rx queue start. Device queue goes to the ready state,
596 * all required mbufs are allocated and WQ is replenished.
599 * Pointer to Ethernet device structure.
604 * 0 on success, a negative errno value otherwise and rte_errno is set.
607 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
609 struct mlx5_priv *priv = dev->data->dev_private;
610 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
611 struct mlx5_rxq_ctrl *rxq_ctrl =
612 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
615 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
616 /* Allocate needed buffers. */
617 ret = rxq_alloc_elts(rxq_ctrl);
619 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
624 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
626 /* Reset RQ consumer before moving queue ro READY state. */
627 *rxq->rq_db = rte_cpu_to_be_32(0);
629 ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
631 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
636 /* Reinitialize RQ - set WQEs. */
637 mlx5_rxq_initialize(rxq);
638 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
639 /* Set actual queue state. */
640 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
645 * Rx queue start. Device queue goes to the ready state,
646 * all required mbufs are allocated and WQ is replenished.
649 * Pointer to Ethernet device structure.
654 * 0 on success, a negative errno value otherwise and rte_errno is set.
657 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
661 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
662 DRV_LOG(ERR, "Hairpin queue can't be started");
666 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
668 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
669 ret = mlx5_mp_os_req_queue_control(dev, idx,
670 MLX5_MP_REQ_QUEUE_RX_START);
672 ret = mlx5_rx_queue_start_primary(dev, idx);
678 * Rx queue presetup checks.
681 * Pointer to Ethernet device structure.
685 * Number of descriptors to configure in queue.
688 * 0 on success, a negative errno value otherwise and rte_errno is set.
691 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
693 struct mlx5_priv *priv = dev->data->dev_private;
695 if (!rte_is_power_of_2(*desc)) {
696 *desc = 1 << log2above(*desc);
698 "port %u increased number of descriptors in Rx queue %u"
699 " to the next power of two (%d)",
700 dev->data->port_id, idx, *desc);
702 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
703 dev->data->port_id, idx, *desc);
704 if (idx >= priv->rxqs_n) {
705 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
706 dev->data->port_id, idx, priv->rxqs_n);
707 rte_errno = EOVERFLOW;
710 if (!mlx5_rxq_releasable(dev, idx)) {
711 DRV_LOG(ERR, "port %u unable to release queue index %u",
712 dev->data->port_id, idx);
716 mlx5_rxq_release(dev, idx);
723 * Pointer to Ethernet device structure.
727 * Number of descriptors to configure in queue.
729 * NUMA socket on which memory must be allocated.
731 * Thresholds parameters.
733 * Memory pool for buffer allocations.
736 * 0 on success, a negative errno value otherwise and rte_errno is set.
739 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
740 unsigned int socket, const struct rte_eth_rxconf *conf,
741 struct rte_mempool *mp)
743 struct mlx5_priv *priv = dev->data->dev_private;
744 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
745 struct mlx5_rxq_ctrl *rxq_ctrl =
746 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
749 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
752 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
754 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
755 dev->data->port_id, idx);
759 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
760 dev->data->port_id, idx);
761 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
768 * Pointer to Ethernet device structure.
772 * Number of descriptors to configure in queue.
773 * @param hairpin_conf
774 * Hairpin configuration parameters.
777 * 0 on success, a negative errno value otherwise and rte_errno is set.
780 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
782 const struct rte_eth_hairpin_conf *hairpin_conf)
784 struct mlx5_priv *priv = dev->data->dev_private;
785 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
786 struct mlx5_rxq_ctrl *rxq_ctrl =
787 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
790 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
793 if (hairpin_conf->peer_count != 1 ||
794 hairpin_conf->peers[0].port != dev->data->port_id ||
795 hairpin_conf->peers[0].queue >= priv->txqs_n) {
796 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
797 " invalid hairpind configuration", dev->data->port_id,
802 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
804 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
805 dev->data->port_id, idx);
809 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
810 dev->data->port_id, idx);
811 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
816 * DPDK callback to release a RX queue.
819 * Generic RX queue pointer.
822 mlx5_rx_queue_release(void *dpdk_rxq)
824 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
825 struct mlx5_rxq_ctrl *rxq_ctrl;
826 struct mlx5_priv *priv;
830 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
831 priv = rxq_ctrl->priv;
832 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
833 rte_panic("port %u Rx queue %u is still used by a flow and"
834 " cannot be removed\n",
835 PORT_ID(priv), rxq->idx);
836 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
840 * Allocate queue vector and fill epoll fd list for Rx interrupts.
843 * Pointer to Ethernet device.
846 * 0 on success, a negative errno value otherwise and rte_errno is set.
849 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
851 struct mlx5_priv *priv = dev->data->dev_private;
853 unsigned int rxqs_n = priv->rxqs_n;
854 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
855 unsigned int count = 0;
856 struct rte_intr_handle *intr_handle = dev->intr_handle;
858 if (!dev->data->dev_conf.intr_conf.rxq)
860 mlx5_rx_intr_vec_disable(dev);
861 intr_handle->intr_vec = mlx5_malloc(0,
862 n * sizeof(intr_handle->intr_vec[0]),
864 if (intr_handle->intr_vec == NULL) {
866 "port %u failed to allocate memory for interrupt"
867 " vector, Rx interrupts will not be supported",
872 intr_handle->type = RTE_INTR_HANDLE_EXT;
873 for (i = 0; i != n; ++i) {
874 /* This rxq obj must not be released in this function. */
875 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
876 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
879 /* Skip queues that cannot request interrupts. */
880 if (!rxq_obj || (!rxq_obj->ibv_channel &&
881 !rxq_obj->devx_channel)) {
882 /* Use invalid intr_vec[] index to disable entry. */
883 intr_handle->intr_vec[i] =
884 RTE_INTR_VEC_RXTX_OFFSET +
885 RTE_MAX_RXTX_INTR_VEC_ID;
886 /* Decrease the rxq_ctrl's refcnt */
888 mlx5_rxq_release(dev, i);
891 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
893 "port %u too many Rx queues for interrupt"
894 " vector size (%d), Rx interrupts cannot be"
896 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
897 mlx5_rx_intr_vec_disable(dev);
901 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
905 "port %u failed to make Rx interrupt file"
906 " descriptor %d non-blocking for queue index"
908 dev->data->port_id, rxq_obj->fd, i);
909 mlx5_rx_intr_vec_disable(dev);
912 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
913 intr_handle->efds[count] = rxq_obj->fd;
917 mlx5_rx_intr_vec_disable(dev);
919 intr_handle->nb_efd = count;
924 * Clean up Rx interrupts handler.
927 * Pointer to Ethernet device.
930 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
932 struct mlx5_priv *priv = dev->data->dev_private;
933 struct rte_intr_handle *intr_handle = dev->intr_handle;
935 unsigned int rxqs_n = priv->rxqs_n;
936 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
938 if (!dev->data->dev_conf.intr_conf.rxq)
940 if (!intr_handle->intr_vec)
942 for (i = 0; i != n; ++i) {
943 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
944 RTE_MAX_RXTX_INTR_VEC_ID)
947 * Need to access directly the queue to release the reference
948 * kept in mlx5_rx_intr_vec_enable().
950 mlx5_rxq_release(dev, i);
953 rte_intr_free_epoll_fd(intr_handle);
954 if (intr_handle->intr_vec)
955 mlx5_free(intr_handle->intr_vec);
956 intr_handle->nb_efd = 0;
957 intr_handle->intr_vec = NULL;
961 * MLX5 CQ notification .
964 * Pointer to receive queue structure.
966 * Sequence number per receive queue .
969 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
972 uint32_t doorbell_hi;
974 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
976 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
977 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
978 doorbell = (uint64_t)doorbell_hi << 32;
979 doorbell |= rxq->cqn;
980 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
981 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
982 cq_db_reg, rxq->uar_lock_cq);
986 * DPDK callback for Rx queue interrupt enable.
989 * Pointer to Ethernet device structure.
994 * 0 on success, a negative errno value otherwise and rte_errno is set.
997 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
999 struct mlx5_rxq_ctrl *rxq_ctrl;
1001 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1004 if (rxq_ctrl->irq) {
1005 if (!rxq_ctrl->obj) {
1006 mlx5_rxq_release(dev, rx_queue_id);
1009 mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
1011 mlx5_rxq_release(dev, rx_queue_id);
1019 * DPDK callback for Rx queue interrupt disable.
1022 * Pointer to Ethernet device structure.
1023 * @param rx_queue_id
1027 * 0 on success, a negative errno value otherwise and rte_errno is set.
1030 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1032 struct mlx5_priv *priv = dev->data->dev_private;
1033 struct mlx5_rxq_ctrl *rxq_ctrl;
1036 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1043 if (rxq_ctrl->irq) {
1044 ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
1047 rxq_ctrl->rxq.cq_arm_sn++;
1049 mlx5_rxq_release(dev, rx_queue_id);
1053 * The ret variable may be EAGAIN which means the get_event function was
1054 * called before receiving one.
1060 ret = rte_errno; /* Save rte_errno before cleanup. */
1061 mlx5_rxq_release(dev, rx_queue_id);
1063 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1064 dev->data->port_id, rx_queue_id);
1065 rte_errno = ret; /* Restore rte_errno. */
1070 * Verify the Rx queue objects list is empty
1073 * Pointer to Ethernet device.
1076 * The number of objects not released.
1079 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1081 struct mlx5_priv *priv = dev->data->dev_private;
1083 struct mlx5_rxq_obj *rxq_obj;
1085 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1086 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1087 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1094 * Callback function to initialize mbufs for Multi-Packet RQ.
1097 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1098 void *_m, unsigned int i __rte_unused)
1100 struct mlx5_mprq_buf *buf = _m;
1101 struct rte_mbuf_ext_shared_info *shinfo;
1102 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1105 memset(_m, 0, sizeof(*buf));
1107 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1108 for (j = 0; j != strd_n; ++j) {
1109 shinfo = &buf->shinfos[j];
1110 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1111 shinfo->fcb_opaque = buf;
1116 * Free mempool of Multi-Packet RQ.
1119 * Pointer to Ethernet device.
1122 * 0 on success, negative errno value on failure.
1125 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1127 struct mlx5_priv *priv = dev->data->dev_private;
1128 struct rte_mempool *mp = priv->mprq_mp;
1133 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1134 dev->data->port_id, mp->name);
1136 * If a buffer in the pool has been externally attached to a mbuf and it
1137 * is still in use by application, destroying the Rx queue can spoil
1138 * the packet. It is unlikely to happen but if application dynamically
1139 * creates and destroys with holding Rx packets, this can happen.
1141 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1142 * RQ isn't provided by application but managed by PMD.
1144 if (!rte_mempool_full(mp)) {
1146 "port %u mempool for Multi-Packet RQ is still in use",
1147 dev->data->port_id);
1151 rte_mempool_free(mp);
1152 /* Unset mempool for each Rx queue. */
1153 for (i = 0; i != priv->rxqs_n; ++i) {
1154 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1158 rxq->mprq_mp = NULL;
1160 priv->mprq_mp = NULL;
1165 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1166 * mempool. If already allocated, reuse it if there're enough elements.
1167 * Otherwise, resize it.
1170 * Pointer to Ethernet device.
1173 * 0 on success, negative errno value on failure.
1176 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1178 struct mlx5_priv *priv = dev->data->dev_private;
1179 struct rte_mempool *mp = priv->mprq_mp;
1180 char name[RTE_MEMPOOL_NAMESIZE];
1181 unsigned int desc = 0;
1182 unsigned int buf_len;
1183 unsigned int obj_num;
1184 unsigned int obj_size;
1185 unsigned int strd_num_n = 0;
1186 unsigned int strd_sz_n = 0;
1188 unsigned int n_ibv = 0;
1190 if (!mlx5_mprq_enabled(dev))
1192 /* Count the total number of descriptors configured. */
1193 for (i = 0; i != priv->rxqs_n; ++i) {
1194 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1195 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1196 (rxq, struct mlx5_rxq_ctrl, rxq);
1198 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1201 desc += 1 << rxq->elts_n;
1202 /* Get the max number of strides. */
1203 if (strd_num_n < rxq->strd_num_n)
1204 strd_num_n = rxq->strd_num_n;
1205 /* Get the max size of a stride. */
1206 if (strd_sz_n < rxq->strd_sz_n)
1207 strd_sz_n = rxq->strd_sz_n;
1209 MLX5_ASSERT(strd_num_n && strd_sz_n);
1210 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1211 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1212 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1214 * Received packets can be either memcpy'd or externally referenced. In
1215 * case that the packet is attached to an mbuf as an external buffer, as
1216 * it isn't possible to predict how the buffers will be queued by
1217 * application, there's no option to exactly pre-allocate needed buffers
1218 * in advance but to speculatively prepares enough buffers.
1220 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1221 * received packets to buffers provided by application (rxq->mp) until
1222 * this Mempool gets available again.
1225 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1227 * rte_mempool_create_empty() has sanity check to refuse large cache
1228 * size compared to the number of elements.
1229 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1230 * constant number 2 instead.
1232 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1233 /* Check a mempool is already allocated and if it can be resued. */
1234 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1235 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1236 dev->data->port_id, mp->name);
1239 } else if (mp != NULL) {
1240 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1241 dev->data->port_id, mp->name);
1243 * If failed to free, which means it may be still in use, no way
1244 * but to keep using the existing one. On buffer underrun,
1245 * packets will be memcpy'd instead of external buffer
1248 if (mlx5_mprq_free_mp(dev)) {
1249 if (mp->elt_size >= obj_size)
1255 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1256 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1257 0, NULL, NULL, mlx5_mprq_buf_init,
1258 (void *)(uintptr_t)(1 << strd_num_n),
1259 dev->device->numa_node, 0);
1262 "port %u failed to allocate a mempool for"
1263 " Multi-Packet RQ, count=%u, size=%u",
1264 dev->data->port_id, obj_num, obj_size);
1270 /* Set mempool for each Rx queue. */
1271 for (i = 0; i != priv->rxqs_n; ++i) {
1272 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1273 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1274 (rxq, struct mlx5_rxq_ctrl, rxq);
1276 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1280 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1281 dev->data->port_id);
1285 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1286 sizeof(struct rte_vlan_hdr) * 2 + \
1287 sizeof(struct rte_ipv6_hdr)))
1288 #define MAX_TCP_OPTION_SIZE 40u
1289 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1290 sizeof(struct rte_tcp_hdr) + \
1291 MAX_TCP_OPTION_SIZE))
1294 * Adjust the maximum LRO massage size.
1297 * Pointer to Ethernet device.
1300 * @param max_lro_size
1301 * The maximum size for LRO packet.
1304 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1305 uint32_t max_lro_size)
1307 struct mlx5_priv *priv = dev->data->dev_private;
1309 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1310 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1311 MLX5_MAX_TCP_HDR_OFFSET)
1312 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1313 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1314 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1315 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1316 if (priv->max_lro_msg_size)
1317 priv->max_lro_msg_size =
1318 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1320 priv->max_lro_msg_size = max_lro_size;
1322 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1323 dev->data->port_id, idx,
1324 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1328 * Create a DPDK Rx queue.
1331 * Pointer to Ethernet device.
1335 * Number of descriptors to configure in queue.
1337 * NUMA socket on which memory must be allocated.
1340 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1342 struct mlx5_rxq_ctrl *
1343 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1344 unsigned int socket, const struct rte_eth_rxconf *conf,
1345 struct rte_mempool *mp)
1347 struct mlx5_priv *priv = dev->data->dev_private;
1348 struct mlx5_rxq_ctrl *tmpl;
1349 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1350 struct mlx5_dev_config *config = &priv->config;
1351 uint64_t offloads = conf->offloads |
1352 dev->data->dev_conf.rxmode.offloads;
1353 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1354 unsigned int max_rx_pkt_len = lro_on_queue ?
1355 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1356 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1357 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1358 RTE_PKTMBUF_HEADROOM;
1359 unsigned int max_lro_size = 0;
1360 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1361 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1362 unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1363 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1364 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1365 (1U << config->mprq.max_stride_size_n) ?
1366 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1367 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1368 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1369 (config->mprq.stride_size_n ?
1370 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1372 * Always allocate extra slots, even if eventually
1373 * the vector Rx will not be used.
1375 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1377 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1378 DEV_RX_OFFLOAD_SCATTER)) {
1379 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1380 " configured and no enough mbuf space(%u) to contain "
1381 "the maximum RX packet length(%u) with head-room(%u)",
1382 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1383 RTE_PKTMBUF_HEADROOM);
1387 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1388 sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
1389 (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
1396 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1397 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1398 MLX5_MR_BTREE_CACHE_N, socket)) {
1399 /* rte_errno is already set. */
1402 tmpl->socket = socket;
1403 if (dev->data->dev_conf.intr_conf.rxq)
1406 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1407 * following conditions are met:
1408 * - MPRQ is enabled.
1409 * - The number of descs is more than the number of strides.
1410 * - max_rx_pkt_len plus overhead is less than the max size
1411 * of a stride or mprq_stride_size is specified by a user.
1412 * Need to nake sure that there are enough stides to encap
1413 * the maximum packet size in case mprq_stride_size is set.
1414 * Otherwise, enable Rx scatter if necessary.
1416 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1417 (non_scatter_min_mbuf_size <=
1418 (1U << config->mprq.max_stride_size_n) ||
1419 (config->mprq.stride_size_n &&
1420 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1421 /* TODO: Rx scatter isn't supported yet. */
1422 tmpl->rxq.sges_n = 0;
1423 /* Trim the number of descs needed. */
1424 desc >>= mprq_stride_nums;
1425 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1426 config->mprq.stride_num_n : mprq_stride_nums;
1427 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1428 config->mprq.stride_size_n : mprq_stride_size;
1429 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1430 tmpl->rxq.strd_scatter_en =
1431 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1432 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1433 config->mprq.max_memcpy_len);
1434 max_lro_size = RTE_MIN(max_rx_pkt_len,
1435 (1u << tmpl->rxq.strd_num_n) *
1436 (1u << tmpl->rxq.strd_sz_n));
1438 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1439 " strd_num_n = %u, strd_sz_n = %u",
1440 dev->data->port_id, idx,
1441 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1442 } else if (max_rx_pkt_len <= first_mb_free_size) {
1443 tmpl->rxq.sges_n = 0;
1444 max_lro_size = max_rx_pkt_len;
1445 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1446 unsigned int size = non_scatter_min_mbuf_size;
1447 unsigned int sges_n;
1449 if (lro_on_queue && first_mb_free_size <
1450 MLX5_MAX_LRO_HEADER_FIX) {
1451 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1452 " to include the max header size(%u) for LRO",
1453 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1454 rte_errno = ENOTSUP;
1458 * Determine the number of SGEs needed for a full packet
1459 * and round it to the next power of two.
1461 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1462 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1464 "port %u too many SGEs (%u) needed to handle"
1465 " requested maximum packet size %u, the maximum"
1466 " supported are %u", dev->data->port_id,
1467 1 << sges_n, max_rx_pkt_len,
1468 1u << MLX5_MAX_LOG_RQ_SEGS);
1469 rte_errno = ENOTSUP;
1472 tmpl->rxq.sges_n = sges_n;
1473 max_lro_size = max_rx_pkt_len;
1475 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1477 "port %u MPRQ is requested but cannot be enabled\n"
1478 " (requested: pkt_sz = %u, desc_num = %u,"
1479 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1480 " supported: min_rxqs_num = %u,"
1481 " min_stride_sz = %u, max_stride_sz = %u).",
1482 dev->data->port_id, non_scatter_min_mbuf_size,
1484 config->mprq.stride_size_n ?
1485 (1U << config->mprq.stride_size_n) :
1486 (1U << mprq_stride_size),
1487 config->mprq.stride_num_n ?
1488 (1U << config->mprq.stride_num_n) :
1489 (1U << mprq_stride_nums),
1490 config->mprq.min_rxqs_num,
1491 (1U << config->mprq.min_stride_size_n),
1492 (1U << config->mprq.max_stride_size_n));
1493 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1494 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1495 if (desc % (1 << tmpl->rxq.sges_n)) {
1497 "port %u number of Rx queue descriptors (%u) is not a"
1498 " multiple of SGEs per packet (%u)",
1501 1 << tmpl->rxq.sges_n);
1505 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1506 /* Toggle RX checksum offload if hardware supports it. */
1507 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1508 /* Configure Rx timestamp. */
1509 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1510 tmpl->rxq.timestamp_rx_flag = 0;
1511 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1512 &tmpl->rxq.timestamp_offset,
1513 &tmpl->rxq.timestamp_rx_flag) != 0) {
1514 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1517 /* Configure VLAN stripping. */
1518 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1519 /* By default, FCS (CRC) is stripped by hardware. */
1520 tmpl->rxq.crc_present = 0;
1521 tmpl->rxq.lro = lro_on_queue;
1522 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1523 if (config->hw_fcs_strip) {
1525 * RQs used for LRO-enabled TIRs should not be
1526 * configured to scatter the FCS.
1530 "port %u CRC stripping has been "
1531 "disabled but will still be performed "
1532 "by hardware, because LRO is enabled",
1533 dev->data->port_id);
1535 tmpl->rxq.crc_present = 1;
1538 "port %u CRC stripping has been disabled but will"
1539 " still be performed by hardware, make sure MLNX_OFED"
1540 " and firmware are up to date",
1541 dev->data->port_id);
1545 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1546 " incoming frames to hide it",
1548 tmpl->rxq.crc_present ? "disabled" : "enabled",
1549 tmpl->rxq.crc_present << 2);
1551 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1552 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1553 tmpl->rxq.port_id = dev->data->port_id;
1556 tmpl->rxq.elts_n = log2above(desc);
1557 tmpl->rxq.rq_repl_thresh =
1558 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1560 (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1561 tmpl->rxq.mprq_bufs =
1562 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1564 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1566 tmpl->rxq.idx = idx;
1567 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1568 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1576 * Create a DPDK Rx hairpin queue.
1579 * Pointer to Ethernet device.
1583 * Number of descriptors to configure in queue.
1584 * @param hairpin_conf
1585 * The hairpin binding configuration.
1588 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1590 struct mlx5_rxq_ctrl *
1591 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1592 const struct rte_eth_hairpin_conf *hairpin_conf)
1594 struct mlx5_priv *priv = dev->data->dev_private;
1595 struct mlx5_rxq_ctrl *tmpl;
1597 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1603 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1604 tmpl->socket = SOCKET_ID_ANY;
1605 tmpl->rxq.rss_hash = 0;
1606 tmpl->rxq.port_id = dev->data->port_id;
1608 tmpl->rxq.mp = NULL;
1609 tmpl->rxq.elts_n = log2above(desc);
1610 tmpl->rxq.elts = NULL;
1611 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1612 tmpl->hairpin_conf = *hairpin_conf;
1613 tmpl->rxq.idx = idx;
1614 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1615 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1623 * Pointer to Ethernet device.
1628 * A pointer to the queue if it exists, NULL otherwise.
1630 struct mlx5_rxq_ctrl *
1631 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1633 struct mlx5_priv *priv = dev->data->dev_private;
1634 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1635 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1638 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1639 __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
1645 * Release a Rx queue.
1648 * Pointer to Ethernet device.
1653 * 1 while a reference on it exists, 0 when freed.
1656 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1658 struct mlx5_priv *priv = dev->data->dev_private;
1659 struct mlx5_rxq_ctrl *rxq_ctrl;
1661 if (!(*priv->rxqs)[idx])
1663 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1664 if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1666 if (rxq_ctrl->obj) {
1667 priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
1668 LIST_REMOVE(rxq_ctrl->obj, next);
1669 mlx5_free(rxq_ctrl->obj);
1670 rxq_ctrl->obj = NULL;
1672 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1673 rxq_free_elts(rxq_ctrl);
1674 if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1675 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1676 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1677 LIST_REMOVE(rxq_ctrl, next);
1678 mlx5_free(rxq_ctrl);
1679 (*priv->rxqs)[idx] = NULL;
1685 * Verify the Rx Queue list is empty
1688 * Pointer to Ethernet device.
1691 * The number of object not released.
1694 mlx5_rxq_verify(struct rte_eth_dev *dev)
1696 struct mlx5_priv *priv = dev->data->dev_private;
1697 struct mlx5_rxq_ctrl *rxq_ctrl;
1700 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1701 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1702 dev->data->port_id, rxq_ctrl->rxq.idx);
1709 * Get a Rx queue type.
1712 * Pointer to Ethernet device.
1717 * The Rx queue type.
1720 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1722 struct mlx5_priv *priv = dev->data->dev_private;
1723 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1725 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1726 rxq_ctrl = container_of((*priv->rxqs)[idx],
1727 struct mlx5_rxq_ctrl,
1729 return rxq_ctrl->type;
1731 return MLX5_RXQ_TYPE_UNDEFINED;
1735 * Get an indirection table.
1738 * Pointer to Ethernet device.
1740 * Queues entering in the indirection table.
1742 * Number of queues in the array.
1745 * An indirection table if found.
1747 struct mlx5_ind_table_obj *
1748 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1751 struct mlx5_priv *priv = dev->data->dev_private;
1752 struct mlx5_ind_table_obj *ind_tbl;
1754 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1755 if ((ind_tbl->queues_n == queues_n) &&
1756 (memcmp(ind_tbl->queues, queues,
1757 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1764 rte_atomic32_inc(&ind_tbl->refcnt);
1765 for (i = 0; i != ind_tbl->queues_n; ++i)
1766 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1772 * Release an indirection table.
1775 * Pointer to Ethernet device.
1777 * Indirection table to release.
1780 * 1 while a reference on it exists, 0 when freed.
1783 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1784 struct mlx5_ind_table_obj *ind_tbl)
1786 struct mlx5_priv *priv = dev->data->dev_private;
1789 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1790 priv->obj_ops.ind_table_destroy(ind_tbl);
1791 for (i = 0; i != ind_tbl->queues_n; ++i)
1792 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1793 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1794 LIST_REMOVE(ind_tbl, next);
1802 * Verify the Rx Queue list is empty
1805 * Pointer to Ethernet device.
1808 * The number of object not released.
1811 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1813 struct mlx5_priv *priv = dev->data->dev_private;
1814 struct mlx5_ind_table_obj *ind_tbl;
1817 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1819 "port %u indirection table obj %p still referenced",
1820 dev->data->port_id, (void *)ind_tbl);
1827 * Create an indirection table.
1830 * Pointer to Ethernet device.
1832 * Queues entering in the indirection table.
1834 * Number of queues in the array.
1837 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
1839 static struct mlx5_ind_table_obj *
1840 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1843 struct mlx5_priv *priv = dev->data->dev_private;
1844 struct mlx5_ind_table_obj *ind_tbl;
1845 const unsigned int n = rte_is_power_of_2(queues_n) ?
1846 log2above(queues_n) :
1847 log2above(priv->config.ind_table_max_size);
1851 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
1852 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
1857 ind_tbl->queues_n = queues_n;
1858 for (i = 0; i != queues_n; ++i) {
1859 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1862 ind_tbl->queues[i] = queues[i];
1864 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
1867 rte_atomic32_inc(&ind_tbl->refcnt);
1868 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1872 for (j = 0; j < i; j++)
1873 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1876 DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
1881 * Get an Rx Hash queue.
1884 * Pointer to Ethernet device.
1886 * RSS configuration for the Rx hash queue.
1888 * Queues entering in hash queue. In case of empty hash_fields only the
1889 * first queue index will be taken for the indirection table.
1894 * An hash Rx queue index on success.
1897 mlx5_hrxq_get(struct rte_eth_dev *dev,
1898 const uint8_t *rss_key, uint32_t rss_key_len,
1899 uint64_t hash_fields,
1900 const uint16_t *queues, uint32_t queues_n)
1902 struct mlx5_priv *priv = dev->data->dev_private;
1903 struct mlx5_hrxq *hrxq;
1906 queues_n = hash_fields ? queues_n : 1;
1907 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
1909 struct mlx5_ind_table_obj *ind_tbl;
1911 if (hrxq->rss_key_len != rss_key_len)
1913 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1915 if (hrxq->hash_fields != hash_fields)
1917 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
1920 if (ind_tbl != hrxq->ind_table) {
1921 mlx5_ind_table_obj_release(dev, ind_tbl);
1924 rte_atomic32_inc(&hrxq->refcnt);
1931 * Release the hash Rx queue.
1934 * Pointer to Ethernet device.
1936 * Index to Hash Rx queue to release.
1939 * 1 while a reference on it exists, 0 when freed.
1942 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
1944 struct mlx5_priv *priv = dev->data->dev_private;
1945 struct mlx5_hrxq *hrxq;
1947 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
1950 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1951 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1952 mlx5_glue->destroy_flow_action(hrxq->action);
1954 priv->obj_ops.hrxq_destroy(hrxq);
1955 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
1956 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
1957 hrxq_idx, hrxq, next);
1958 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
1961 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
1966 * Create an Rx Hash queue.
1969 * Pointer to Ethernet device.
1971 * RSS key for the Rx hash queue.
1972 * @param rss_key_len
1974 * @param hash_fields
1975 * Verbs protocol hash field to make the RSS on.
1977 * Queues entering in hash queue. In case of empty hash_fields only the
1978 * first queue index will be taken for the indirection table.
1985 * The DevX object initialized index, 0 otherwise and rte_errno is set.
1988 mlx5_hrxq_new(struct rte_eth_dev *dev,
1989 const uint8_t *rss_key, uint32_t rss_key_len,
1990 uint64_t hash_fields,
1991 const uint16_t *queues, uint32_t queues_n,
1992 int tunnel __rte_unused)
1994 struct mlx5_priv *priv = dev->data->dev_private;
1995 struct mlx5_hrxq *hrxq = NULL;
1996 uint32_t hrxq_idx = 0;
1997 struct mlx5_ind_table_obj *ind_tbl;
2000 queues_n = hash_fields ? queues_n : 1;
2001 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2003 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
2008 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2011 hrxq->ind_table = ind_tbl;
2012 hrxq->rss_key_len = rss_key_len;
2013 hrxq->hash_fields = hash_fields;
2014 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2015 ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
2020 rte_atomic32_inc(&hrxq->refcnt);
2021 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2025 ret = rte_errno; /* Save rte_errno before cleanup. */
2026 mlx5_ind_table_obj_release(dev, ind_tbl);
2028 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2029 rte_errno = ret; /* Restore rte_errno. */
2034 * Create a drop Rx Hash queue.
2037 * Pointer to Ethernet device.
2040 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2043 mlx5_drop_action_create(struct rte_eth_dev *dev)
2045 struct mlx5_priv *priv = dev->data->dev_private;
2046 struct mlx5_hrxq *hrxq = NULL;
2049 if (priv->drop_queue.hrxq) {
2050 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2051 return priv->drop_queue.hrxq;
2053 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2056 "Port %u cannot allocate memory for drop queue.",
2057 dev->data->port_id);
2061 priv->drop_queue.hrxq = hrxq;
2062 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2064 if (!hrxq->ind_table) {
2068 ret = priv->obj_ops.drop_action_create(dev);
2071 rte_atomic32_set(&hrxq->refcnt, 1);
2075 if (hrxq->ind_table)
2076 mlx5_free(hrxq->ind_table);
2077 priv->drop_queue.hrxq = NULL;
2084 * Release a drop hash Rx queue.
2087 * Pointer to Ethernet device.
2090 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2092 struct mlx5_priv *priv = dev->data->dev_private;
2093 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2095 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2096 priv->obj_ops.drop_action_destroy(dev);
2097 mlx5_free(priv->drop_queue.rxq);
2098 mlx5_free(hrxq->ind_table);
2100 priv->drop_queue.rxq = NULL;
2101 priv->drop_queue.hrxq = NULL;
2106 * Verify the Rx Queue list is empty
2109 * Pointer to Ethernet device.
2112 * The number of object not released.
2115 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2117 struct mlx5_priv *priv = dev->data->dev_private;
2118 struct mlx5_hrxq *hrxq;
2122 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2125 "port %u hash Rx queue %p still referenced",
2126 dev->data->port_id, (void *)hrxq);
2133 * Set the Rx queue timestamp conversion parameters
2136 * Pointer to the Ethernet device structure.
2139 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2141 struct mlx5_priv *priv = dev->data->dev_private;
2142 struct mlx5_dev_ctx_shared *sh = priv->sh;
2143 struct mlx5_rxq_data *data;
2146 for (i = 0; i != priv->rxqs_n; ++i) {
2147 if (!(*priv->rxqs)[i])
2149 data = (*priv->rxqs)[i];
2151 data->rt_timestamp = priv->config.rt_timestamp;