1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
20 #include <rte_eal_paging.h>
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_malloc.h>
26 #include "mlx5_defs.h"
28 #include "mlx5_common_os.h"
29 #include "mlx5_rxtx.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
32 #include "mlx5_flow.h"
35 /* Default RSS hash key also used for ConnectX-3. */
36 uint8_t rss_hash_default_key[] = {
37 0x2c, 0xc6, 0x81, 0xd1,
38 0x5b, 0xdb, 0xf4, 0xf7,
39 0xfc, 0xa2, 0x83, 0x19,
40 0xdb, 0x1a, 0x3e, 0x94,
41 0x6b, 0x9e, 0x38, 0xd9,
42 0x2c, 0x9c, 0x03, 0xd1,
43 0xad, 0x99, 0x44, 0xa7,
44 0xd9, 0x56, 0x3d, 0x59,
45 0x06, 0x3c, 0x25, 0xf3,
46 0xfc, 0x1f, 0xdc, 0x2a,
49 /* Length of the default RSS hash key. */
50 static_assert(MLX5_RSS_HASH_KEY_LEN ==
51 (unsigned int)sizeof(rss_hash_default_key),
52 "wrong RSS default key size.");
55 * Check whether Multi-Packet RQ can be enabled for the device.
58 * Pointer to Ethernet device.
61 * 1 if supported, negative errno value if not.
64 mlx5_check_mprq_support(struct rte_eth_dev *dev)
66 struct mlx5_priv *priv = dev->data->dev_private;
68 if (priv->config.mprq.enabled &&
69 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
75 * Check whether Multi-Packet RQ is enabled for the Rx queue.
78 * Pointer to receive queue structure.
81 * 0 if disabled, otherwise enabled.
84 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
86 return rxq->strd_num_n > 0;
90 * Check whether Multi-Packet RQ is enabled for the device.
93 * Pointer to Ethernet device.
96 * 0 if disabled, otherwise enabled.
99 mlx5_mprq_enabled(struct rte_eth_dev *dev)
101 struct mlx5_priv *priv = dev->data->dev_private;
106 if (mlx5_check_mprq_support(dev) < 0)
108 /* All the configured queues should be enabled. */
109 for (i = 0; i < priv->rxqs_n; ++i) {
110 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
111 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
112 (rxq, struct mlx5_rxq_ctrl, rxq);
114 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
117 if (mlx5_rxq_mprq_enabled(rxq))
120 /* Multi-Packet RQ can't be partially configured. */
121 MLX5_ASSERT(n == 0 || n == n_ibv);
126 * Calculate the number of CQEs in CQ for the Rx queue.
129 * Pointer to receive queue structure.
132 * Number of CQEs in CQ.
135 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
138 unsigned int wqe_n = 1 << rxq_data->elts_n;
140 if (mlx5_rxq_mprq_enabled(rxq_data))
141 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
148 * Allocate RX queue elements for Multi-Packet RQ.
151 * Pointer to RX queue structure.
154 * 0 on success, a negative errno value otherwise and rte_errno is set.
157 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
159 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
160 unsigned int wqe_n = 1 << rxq->elts_n;
164 /* Iterate on segments. */
165 for (i = 0; i <= wqe_n; ++i) {
166 struct mlx5_mprq_buf *buf;
168 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
169 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
174 (*rxq->mprq_bufs)[i] = buf;
176 rxq->mprq_repl = buf;
179 "port %u Rx queue %u allocated and configured %u segments",
180 rxq->port_id, rxq->idx, wqe_n);
183 err = rte_errno; /* Save rte_errno before cleanup. */
185 for (i = 0; (i != wqe_n); ++i) {
186 if ((*rxq->mprq_bufs)[i] != NULL)
187 rte_mempool_put(rxq->mprq_mp,
188 (*rxq->mprq_bufs)[i]);
189 (*rxq->mprq_bufs)[i] = NULL;
191 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
192 rxq->port_id, rxq->idx);
193 rte_errno = err; /* Restore rte_errno. */
198 * Allocate RX queue elements for Single-Packet RQ.
201 * Pointer to RX queue structure.
204 * 0 on success, errno value on failure.
207 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
209 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
210 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
214 /* Iterate on segments. */
215 for (i = 0; (i != elts_n); ++i) {
216 struct rte_mbuf *buf;
218 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
220 DRV_LOG(ERR, "port %u empty mbuf pool",
221 PORT_ID(rxq_ctrl->priv));
225 /* Headroom is reserved by rte_pktmbuf_alloc(). */
226 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
227 /* Buffer is supposed to be empty. */
228 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
229 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
230 MLX5_ASSERT(!buf->next);
231 /* Only the first segment keeps headroom. */
233 SET_DATA_OFF(buf, 0);
234 PORT(buf) = rxq_ctrl->rxq.port_id;
235 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
236 PKT_LEN(buf) = DATA_LEN(buf);
238 (*rxq_ctrl->rxq.elts)[i] = buf;
240 /* If Rx vector is activated. */
241 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
242 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
243 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
244 struct rte_pktmbuf_pool_private *priv =
245 (struct rte_pktmbuf_pool_private *)
246 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
249 /* Initialize default rearm_data for vPMD. */
250 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
251 rte_mbuf_refcnt_set(mbuf_init, 1);
252 mbuf_init->nb_segs = 1;
253 mbuf_init->port = rxq->port_id;
254 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
255 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
257 * prevent compiler reordering:
258 * rearm_data covers previous fields.
260 rte_compiler_barrier();
261 rxq->mbuf_initializer =
262 *(rte_xmm_t *)&mbuf_init->rearm_data;
263 /* Padding with a fake mbuf for vectorized Rx. */
264 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
265 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
268 "port %u Rx queue %u allocated and configured %u segments"
270 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
271 elts_n / (1 << rxq_ctrl->rxq.sges_n));
274 err = rte_errno; /* Save rte_errno before cleanup. */
276 for (i = 0; (i != elts_n); ++i) {
277 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
278 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
279 (*rxq_ctrl->rxq.elts)[i] = NULL;
281 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
282 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
283 rte_errno = err; /* Restore rte_errno. */
288 * Allocate RX queue elements.
291 * Pointer to RX queue structure.
294 * 0 on success, errno value on failure.
297 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
299 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
300 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
304 * Free RX queue elements for Multi-Packet RQ.
307 * Pointer to RX queue structure.
310 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
312 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
315 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
316 rxq->port_id, rxq->idx);
317 if (rxq->mprq_bufs == NULL)
319 MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
320 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
321 if ((*rxq->mprq_bufs)[i] != NULL)
322 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
323 (*rxq->mprq_bufs)[i] = NULL;
325 if (rxq->mprq_repl != NULL) {
326 mlx5_mprq_buf_free(rxq->mprq_repl);
327 rxq->mprq_repl = NULL;
332 * Free RX queue elements for Single-Packet RQ.
335 * Pointer to RX queue structure.
338 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
340 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
341 const uint16_t q_n = (1 << rxq->elts_n);
342 const uint16_t q_mask = q_n - 1;
343 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
346 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
347 PORT_ID(rxq_ctrl->priv), rxq->idx);
348 if (rxq->elts == NULL)
351 * Some mbuf in the Ring belongs to the application. They cannot be
354 if (mlx5_rxq_check_vec_support(rxq) > 0) {
355 for (i = 0; i < used; ++i)
356 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
357 rxq->rq_pi = rxq->rq_ci;
359 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
360 if ((*rxq->elts)[i] != NULL)
361 rte_pktmbuf_free_seg((*rxq->elts)[i]);
362 (*rxq->elts)[i] = NULL;
367 * Free RX queue elements.
370 * Pointer to RX queue structure.
373 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
375 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
376 rxq_free_elts_mprq(rxq_ctrl);
378 rxq_free_elts_sprq(rxq_ctrl);
382 * Returns the per-queue supported offloads.
385 * Pointer to Ethernet device.
388 * Supported Rx offloads.
391 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
393 struct mlx5_priv *priv = dev->data->dev_private;
394 struct mlx5_dev_config *config = &priv->config;
395 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
396 DEV_RX_OFFLOAD_TIMESTAMP |
397 DEV_RX_OFFLOAD_JUMBO_FRAME |
398 DEV_RX_OFFLOAD_RSS_HASH);
400 if (config->hw_fcs_strip)
401 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
404 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
405 DEV_RX_OFFLOAD_UDP_CKSUM |
406 DEV_RX_OFFLOAD_TCP_CKSUM);
407 if (config->hw_vlan_strip)
408 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
409 if (MLX5_LRO_SUPPORTED(dev))
410 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
416 * Returns the per-port supported offloads.
419 * Supported Rx offloads.
422 mlx5_get_rx_port_offloads(void)
424 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
430 * Verify if the queue can be released.
433 * Pointer to Ethernet device.
438 * 1 if the queue can be released
439 * 0 if the queue can not be released, there are references to it.
440 * Negative errno and rte_errno is set if queue doesn't exist.
443 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
445 struct mlx5_priv *priv = dev->data->dev_private;
446 struct mlx5_rxq_ctrl *rxq_ctrl;
448 if (!(*priv->rxqs)[idx]) {
452 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
453 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
456 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
458 rxq_sync_cq(struct mlx5_rxq_data *rxq)
460 const uint16_t cqe_n = 1 << rxq->cqe_n;
461 const uint16_t cqe_mask = cqe_n - 1;
462 volatile struct mlx5_cqe *cqe;
467 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
468 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
469 if (ret == MLX5_CQE_STATUS_HW_OWN)
471 if (ret == MLX5_CQE_STATUS_ERR) {
475 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
476 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
480 /* Compute the next non compressed CQE. */
481 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
484 /* Move all CQEs to HW ownership, including possible MiniCQEs. */
485 for (i = 0; i < cqe_n; i++) {
486 cqe = &(*rxq->cqes)[i];
487 cqe->op_own = MLX5_CQE_INVALIDATE;
489 /* Resync CQE and WQE (WQ in RESET state). */
491 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
493 *rxq->rq_db = rte_cpu_to_be_32(0);
498 * Rx queue stop. Device queue goes to the RESET state,
499 * all involved mbufs are freed from WQ.
502 * Pointer to Ethernet device structure.
507 * 0 on success, a negative errno value otherwise and rte_errno is set.
510 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
512 struct mlx5_priv *priv = dev->data->dev_private;
513 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
514 struct mlx5_rxq_ctrl *rxq_ctrl =
515 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
518 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
519 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
520 struct ibv_wq_attr mod = {
521 .attr_mask = IBV_WQ_ATTR_STATE,
522 .wq_state = IBV_WQS_RESET,
525 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
526 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
527 struct mlx5_devx_modify_rq_attr rq_attr;
529 memset(&rq_attr, 0, sizeof(rq_attr));
530 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
531 rq_attr.state = MLX5_RQC_STATE_RST;
532 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
535 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
540 /* Remove all processes CQEs. */
542 /* Free all involved mbufs. */
543 rxq_free_elts(rxq_ctrl);
544 /* Set the actual queue state. */
545 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
550 * Rx queue stop. Device queue goes to the RESET state,
551 * all involved mbufs are freed from WQ.
554 * Pointer to Ethernet device structure.
559 * 0 on success, a negative errno value otherwise and rte_errno is set.
562 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
564 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
567 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
568 DRV_LOG(ERR, "Hairpin queue can't be stopped");
572 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
575 * Vectorized Rx burst requires the CQ and RQ indices
576 * synchronized, that might be broken on RQ restart
577 * and cause Rx malfunction, so queue stopping is
578 * not supported if vectorized Rx burst is engaged.
579 * The routine pointer depends on the process
580 * type, should perform check there.
582 if (pkt_burst == mlx5_rx_burst) {
583 DRV_LOG(ERR, "Rx queue stop is not supported "
584 "for vectorized Rx");
588 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
589 ret = mlx5_mp_os_req_queue_control(dev, idx,
590 MLX5_MP_REQ_QUEUE_RX_STOP);
592 ret = mlx5_rx_queue_stop_primary(dev, idx);
598 * Rx queue start. Device queue goes to the ready state,
599 * all required mbufs are allocated and WQ is replenished.
602 * Pointer to Ethernet device structure.
607 * 0 on success, a negative errno value otherwise and rte_errno is set.
610 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
612 struct mlx5_priv *priv = dev->data->dev_private;
613 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
614 struct mlx5_rxq_ctrl *rxq_ctrl =
615 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
618 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
619 /* Allocate needed buffers. */
620 ret = rxq_alloc_elts(rxq_ctrl);
622 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
627 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
629 /* Reset RQ consumer before moving queue to READY state. */
630 *rxq->rq_db = rte_cpu_to_be_32(0);
632 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
633 struct ibv_wq_attr mod = {
634 .attr_mask = IBV_WQ_ATTR_STATE,
635 .wq_state = IBV_WQS_RDY,
638 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
639 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
640 struct mlx5_devx_modify_rq_attr rq_attr;
642 memset(&rq_attr, 0, sizeof(rq_attr));
643 rq_attr.rq_state = MLX5_RQC_STATE_RST;
644 rq_attr.state = MLX5_RQC_STATE_RDY;
645 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
648 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
653 /* Reinitialize RQ - set WQEs. */
654 mlx5_rxq_initialize(rxq);
655 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
656 /* Set actual queue state. */
657 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
662 * Rx queue start. Device queue goes to the ready state,
663 * all required mbufs are allocated and WQ is replenished.
666 * Pointer to Ethernet device structure.
671 * 0 on success, a negative errno value otherwise and rte_errno is set.
674 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
678 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
679 DRV_LOG(ERR, "Hairpin queue can't be started");
683 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
685 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
686 ret = mlx5_mp_os_req_queue_control(dev, idx,
687 MLX5_MP_REQ_QUEUE_RX_START);
689 ret = mlx5_rx_queue_start_primary(dev, idx);
695 * Rx queue presetup checks.
698 * Pointer to Ethernet device structure.
702 * Number of descriptors to configure in queue.
705 * 0 on success, a negative errno value otherwise and rte_errno is set.
708 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
710 struct mlx5_priv *priv = dev->data->dev_private;
712 if (!rte_is_power_of_2(*desc)) {
713 *desc = 1 << log2above(*desc);
715 "port %u increased number of descriptors in Rx queue %u"
716 " to the next power of two (%d)",
717 dev->data->port_id, idx, *desc);
719 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
720 dev->data->port_id, idx, *desc);
721 if (idx >= priv->rxqs_n) {
722 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
723 dev->data->port_id, idx, priv->rxqs_n);
724 rte_errno = EOVERFLOW;
727 if (!mlx5_rxq_releasable(dev, idx)) {
728 DRV_LOG(ERR, "port %u unable to release queue index %u",
729 dev->data->port_id, idx);
733 mlx5_rxq_release(dev, idx);
740 * Pointer to Ethernet device structure.
744 * Number of descriptors to configure in queue.
746 * NUMA socket on which memory must be allocated.
748 * Thresholds parameters.
750 * Memory pool for buffer allocations.
753 * 0 on success, a negative errno value otherwise and rte_errno is set.
756 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
757 unsigned int socket, const struct rte_eth_rxconf *conf,
758 struct rte_mempool *mp)
760 struct mlx5_priv *priv = dev->data->dev_private;
761 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
762 struct mlx5_rxq_ctrl *rxq_ctrl =
763 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
766 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
769 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
771 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
772 dev->data->port_id, idx);
776 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
777 dev->data->port_id, idx);
778 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
785 * Pointer to Ethernet device structure.
789 * Number of descriptors to configure in queue.
790 * @param hairpin_conf
791 * Hairpin configuration parameters.
794 * 0 on success, a negative errno value otherwise and rte_errno is set.
797 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
799 const struct rte_eth_hairpin_conf *hairpin_conf)
801 struct mlx5_priv *priv = dev->data->dev_private;
802 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
803 struct mlx5_rxq_ctrl *rxq_ctrl =
804 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
807 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
810 if (hairpin_conf->peer_count != 1 ||
811 hairpin_conf->peers[0].port != dev->data->port_id ||
812 hairpin_conf->peers[0].queue >= priv->txqs_n) {
813 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
814 " invalid hairpind configuration", dev->data->port_id,
819 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
821 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
822 dev->data->port_id, idx);
826 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
827 dev->data->port_id, idx);
828 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
833 * DPDK callback to release a RX queue.
836 * Generic RX queue pointer.
839 mlx5_rx_queue_release(void *dpdk_rxq)
841 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
842 struct mlx5_rxq_ctrl *rxq_ctrl;
843 struct mlx5_priv *priv;
847 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
848 priv = rxq_ctrl->priv;
849 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
850 rte_panic("port %u Rx queue %u is still used by a flow and"
851 " cannot be removed\n",
852 PORT_ID(priv), rxq->idx);
853 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
857 * Allocate queue vector and fill epoll fd list for Rx interrupts.
860 * Pointer to Ethernet device.
863 * 0 on success, a negative errno value otherwise and rte_errno is set.
866 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
868 struct mlx5_priv *priv = dev->data->dev_private;
870 unsigned int rxqs_n = priv->rxqs_n;
871 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
872 unsigned int count = 0;
873 struct rte_intr_handle *intr_handle = dev->intr_handle;
875 if (!dev->data->dev_conf.intr_conf.rxq)
877 mlx5_rx_intr_vec_disable(dev);
878 intr_handle->intr_vec = mlx5_malloc(0,
879 n * sizeof(intr_handle->intr_vec[0]),
881 if (intr_handle->intr_vec == NULL) {
883 "port %u failed to allocate memory for interrupt"
884 " vector, Rx interrupts will not be supported",
889 intr_handle->type = RTE_INTR_HANDLE_EXT;
890 for (i = 0; i != n; ++i) {
891 /* This rxq obj must not be released in this function. */
892 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
893 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
896 /* Skip queues that cannot request interrupts. */
897 if (!rxq_obj || (!rxq_obj->ibv_channel &&
898 !rxq_obj->devx_channel)) {
899 /* Use invalid intr_vec[] index to disable entry. */
900 intr_handle->intr_vec[i] =
901 RTE_INTR_VEC_RXTX_OFFSET +
902 RTE_MAX_RXTX_INTR_VEC_ID;
903 /* Decrease the rxq_ctrl's refcnt */
905 mlx5_rxq_release(dev, i);
908 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
910 "port %u too many Rx queues for interrupt"
911 " vector size (%d), Rx interrupts cannot be"
913 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
914 mlx5_rx_intr_vec_disable(dev);
918 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
922 "port %u failed to make Rx interrupt file"
923 " descriptor %d non-blocking for queue index"
925 dev->data->port_id, rxq_obj->fd, i);
926 mlx5_rx_intr_vec_disable(dev);
929 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
930 intr_handle->efds[count] = rxq_obj->fd;
934 mlx5_rx_intr_vec_disable(dev);
936 intr_handle->nb_efd = count;
941 * Clean up Rx interrupts handler.
944 * Pointer to Ethernet device.
947 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
949 struct mlx5_priv *priv = dev->data->dev_private;
950 struct rte_intr_handle *intr_handle = dev->intr_handle;
952 unsigned int rxqs_n = priv->rxqs_n;
953 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
955 if (!dev->data->dev_conf.intr_conf.rxq)
957 if (!intr_handle->intr_vec)
959 for (i = 0; i != n; ++i) {
960 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
961 RTE_MAX_RXTX_INTR_VEC_ID)
964 * Need to access directly the queue to release the reference
965 * kept in mlx5_rx_intr_vec_enable().
967 mlx5_rxq_release(dev, i);
970 rte_intr_free_epoll_fd(intr_handle);
971 if (intr_handle->intr_vec)
972 mlx5_free(intr_handle->intr_vec);
973 intr_handle->nb_efd = 0;
974 intr_handle->intr_vec = NULL;
978 * MLX5 CQ notification .
981 * Pointer to receive queue structure.
983 * Sequence number per receive queue .
986 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
989 uint32_t doorbell_hi;
991 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
993 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
994 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
995 doorbell = (uint64_t)doorbell_hi << 32;
996 doorbell |= rxq->cqn;
997 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
998 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
999 cq_db_reg, rxq->uar_lock_cq);
1003 * DPDK callback for Rx queue interrupt enable.
1006 * Pointer to Ethernet device structure.
1007 * @param rx_queue_id
1011 * 0 on success, a negative errno value otherwise and rte_errno is set.
1014 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1016 struct mlx5_rxq_ctrl *rxq_ctrl;
1018 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1021 if (rxq_ctrl->irq) {
1022 if (!rxq_ctrl->obj) {
1023 mlx5_rxq_release(dev, rx_queue_id);
1026 mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
1028 mlx5_rxq_release(dev, rx_queue_id);
1036 * DPDK callback for Rx queue interrupt disable.
1039 * Pointer to Ethernet device structure.
1040 * @param rx_queue_id
1044 * 0 on success, a negative errno value otherwise and rte_errno is set.
1047 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1049 struct mlx5_priv *priv = dev->data->dev_private;
1050 struct mlx5_rxq_ctrl *rxq_ctrl;
1053 rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1060 if (rxq_ctrl->irq) {
1061 ret = priv->obj_ops->rxq_event_get(rxq_ctrl->obj);
1064 rxq_ctrl->rxq.cq_arm_sn++;
1066 mlx5_rxq_release(dev, rx_queue_id);
1070 * The ret variable may be EAGAIN which means the get_event function was
1071 * called before receiving one.
1077 ret = rte_errno; /* Save rte_errno before cleanup. */
1078 mlx5_rxq_release(dev, rx_queue_id);
1080 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1081 dev->data->port_id, rx_queue_id);
1082 rte_errno = ret; /* Restore rte_errno. */
1087 * Verify the Rx queue objects list is empty
1090 * Pointer to Ethernet device.
1093 * The number of objects not released.
1096 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1098 struct mlx5_priv *priv = dev->data->dev_private;
1100 struct mlx5_rxq_obj *rxq_obj;
1102 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1103 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1104 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1111 * Callback function to initialize mbufs for Multi-Packet RQ.
1114 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1115 void *_m, unsigned int i __rte_unused)
1117 struct mlx5_mprq_buf *buf = _m;
1118 struct rte_mbuf_ext_shared_info *shinfo;
1119 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1122 memset(_m, 0, sizeof(*buf));
1124 rte_atomic16_set(&buf->refcnt, 1);
1125 for (j = 0; j != strd_n; ++j) {
1126 shinfo = &buf->shinfos[j];
1127 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1128 shinfo->fcb_opaque = buf;
1133 * Free mempool of Multi-Packet RQ.
1136 * Pointer to Ethernet device.
1139 * 0 on success, negative errno value on failure.
1142 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1144 struct mlx5_priv *priv = dev->data->dev_private;
1145 struct rte_mempool *mp = priv->mprq_mp;
1150 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1151 dev->data->port_id, mp->name);
1153 * If a buffer in the pool has been externally attached to a mbuf and it
1154 * is still in use by application, destroying the Rx queue can spoil
1155 * the packet. It is unlikely to happen but if application dynamically
1156 * creates and destroys with holding Rx packets, this can happen.
1158 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1159 * RQ isn't provided by application but managed by PMD.
1161 if (!rte_mempool_full(mp)) {
1163 "port %u mempool for Multi-Packet RQ is still in use",
1164 dev->data->port_id);
1168 rte_mempool_free(mp);
1169 /* Unset mempool for each Rx queue. */
1170 for (i = 0; i != priv->rxqs_n; ++i) {
1171 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1175 rxq->mprq_mp = NULL;
1177 priv->mprq_mp = NULL;
1182 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1183 * mempool. If already allocated, reuse it if there're enough elements.
1184 * Otherwise, resize it.
1187 * Pointer to Ethernet device.
1190 * 0 on success, negative errno value on failure.
1193 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1195 struct mlx5_priv *priv = dev->data->dev_private;
1196 struct rte_mempool *mp = priv->mprq_mp;
1197 char name[RTE_MEMPOOL_NAMESIZE];
1198 unsigned int desc = 0;
1199 unsigned int buf_len;
1200 unsigned int obj_num;
1201 unsigned int obj_size;
1202 unsigned int strd_num_n = 0;
1203 unsigned int strd_sz_n = 0;
1205 unsigned int n_ibv = 0;
1207 if (!mlx5_mprq_enabled(dev))
1209 /* Count the total number of descriptors configured. */
1210 for (i = 0; i != priv->rxqs_n; ++i) {
1211 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1212 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1213 (rxq, struct mlx5_rxq_ctrl, rxq);
1215 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1218 desc += 1 << rxq->elts_n;
1219 /* Get the max number of strides. */
1220 if (strd_num_n < rxq->strd_num_n)
1221 strd_num_n = rxq->strd_num_n;
1222 /* Get the max size of a stride. */
1223 if (strd_sz_n < rxq->strd_sz_n)
1224 strd_sz_n = rxq->strd_sz_n;
1226 MLX5_ASSERT(strd_num_n && strd_sz_n);
1227 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1228 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1229 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1231 * Received packets can be either memcpy'd or externally referenced. In
1232 * case that the packet is attached to an mbuf as an external buffer, as
1233 * it isn't possible to predict how the buffers will be queued by
1234 * application, there's no option to exactly pre-allocate needed buffers
1235 * in advance but to speculatively prepares enough buffers.
1237 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1238 * received packets to buffers provided by application (rxq->mp) until
1239 * this Mempool gets available again.
1242 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1244 * rte_mempool_create_empty() has sanity check to refuse large cache
1245 * size compared to the number of elements.
1246 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1247 * constant number 2 instead.
1249 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1250 /* Check a mempool is already allocated and if it can be resued. */
1251 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1252 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1253 dev->data->port_id, mp->name);
1256 } else if (mp != NULL) {
1257 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1258 dev->data->port_id, mp->name);
1260 * If failed to free, which means it may be still in use, no way
1261 * but to keep using the existing one. On buffer underrun,
1262 * packets will be memcpy'd instead of external buffer
1265 if (mlx5_mprq_free_mp(dev)) {
1266 if (mp->elt_size >= obj_size)
1272 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1273 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1274 0, NULL, NULL, mlx5_mprq_buf_init,
1275 (void *)(uintptr_t)(1 << strd_num_n),
1276 dev->device->numa_node, 0);
1279 "port %u failed to allocate a mempool for"
1280 " Multi-Packet RQ, count=%u, size=%u",
1281 dev->data->port_id, obj_num, obj_size);
1287 /* Set mempool for each Rx queue. */
1288 for (i = 0; i != priv->rxqs_n; ++i) {
1289 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1290 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1291 (rxq, struct mlx5_rxq_ctrl, rxq);
1293 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1297 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1298 dev->data->port_id);
1302 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1303 sizeof(struct rte_vlan_hdr) * 2 + \
1304 sizeof(struct rte_ipv6_hdr)))
1305 #define MAX_TCP_OPTION_SIZE 40u
1306 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1307 sizeof(struct rte_tcp_hdr) + \
1308 MAX_TCP_OPTION_SIZE))
1311 * Adjust the maximum LRO massage size.
1314 * Pointer to Ethernet device.
1317 * @param max_lro_size
1318 * The maximum size for LRO packet.
1321 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1322 uint32_t max_lro_size)
1324 struct mlx5_priv *priv = dev->data->dev_private;
1326 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1327 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1328 MLX5_MAX_TCP_HDR_OFFSET)
1329 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1330 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1331 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1332 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1333 if (priv->max_lro_msg_size)
1334 priv->max_lro_msg_size =
1335 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1337 priv->max_lro_msg_size = max_lro_size;
1339 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1340 dev->data->port_id, idx,
1341 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1345 * Create a DPDK Rx queue.
1348 * Pointer to Ethernet device.
1352 * Number of descriptors to configure in queue.
1354 * NUMA socket on which memory must be allocated.
1357 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1359 struct mlx5_rxq_ctrl *
1360 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1361 unsigned int socket, const struct rte_eth_rxconf *conf,
1362 struct rte_mempool *mp)
1364 struct mlx5_priv *priv = dev->data->dev_private;
1365 struct mlx5_rxq_ctrl *tmpl;
1366 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1367 unsigned int mprq_stride_nums;
1368 unsigned int mprq_stride_size;
1369 unsigned int mprq_stride_cap;
1370 struct mlx5_dev_config *config = &priv->config;
1372 * Always allocate extra slots, even if eventually
1373 * the vector Rx will not be used.
1376 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1377 uint64_t offloads = conf->offloads |
1378 dev->data->dev_conf.rxmode.offloads;
1379 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1380 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1381 unsigned int max_rx_pkt_len = lro_on_queue ?
1382 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1383 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1384 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1385 RTE_PKTMBUF_HEADROOM;
1386 unsigned int max_lro_size = 0;
1387 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1389 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1390 DEV_RX_OFFLOAD_SCATTER)) {
1391 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1392 " configured and no enough mbuf space(%u) to contain "
1393 "the maximum RX packet length(%u) with head-room(%u)",
1394 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1395 RTE_PKTMBUF_HEADROOM);
1399 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1400 desc_n * sizeof(struct rte_mbuf *), 0, socket);
1405 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1406 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1407 MLX5_MR_BTREE_CACHE_N, socket)) {
1408 /* rte_errno is already set. */
1411 tmpl->socket = socket;
1412 if (dev->data->dev_conf.intr_conf.rxq)
1414 mprq_stride_nums = config->mprq.stride_num_n ?
1415 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1416 mprq_stride_size = non_scatter_min_mbuf_size <=
1417 (1U << config->mprq.max_stride_size_n) ?
1418 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1419 mprq_stride_cap = (config->mprq.stride_num_n ?
1420 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1421 (config->mprq.stride_size_n ?
1422 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1424 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1425 * following conditions are met:
1426 * - MPRQ is enabled.
1427 * - The number of descs is more than the number of strides.
1428 * - max_rx_pkt_len plus overhead is less than the max size
1429 * of a stride or mprq_stride_size is specified by a user.
1430 * Need to nake sure that there are enough stides to encap
1431 * the maximum packet size in case mprq_stride_size is set.
1432 * Otherwise, enable Rx scatter if necessary.
1434 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1435 (non_scatter_min_mbuf_size <=
1436 (1U << config->mprq.max_stride_size_n) ||
1437 (config->mprq.stride_size_n &&
1438 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1439 /* TODO: Rx scatter isn't supported yet. */
1440 tmpl->rxq.sges_n = 0;
1441 /* Trim the number of descs needed. */
1442 desc >>= mprq_stride_nums;
1443 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1444 config->mprq.stride_num_n : mprq_stride_nums;
1445 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1446 config->mprq.stride_size_n : mprq_stride_size;
1447 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1448 tmpl->rxq.strd_scatter_en =
1449 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1450 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1451 config->mprq.max_memcpy_len);
1452 max_lro_size = RTE_MIN(max_rx_pkt_len,
1453 (1u << tmpl->rxq.strd_num_n) *
1454 (1u << tmpl->rxq.strd_sz_n));
1456 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1457 " strd_num_n = %u, strd_sz_n = %u",
1458 dev->data->port_id, idx,
1459 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1460 } else if (max_rx_pkt_len <= first_mb_free_size) {
1461 tmpl->rxq.sges_n = 0;
1462 max_lro_size = max_rx_pkt_len;
1463 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1464 unsigned int size = non_scatter_min_mbuf_size;
1465 unsigned int sges_n;
1467 if (lro_on_queue && first_mb_free_size <
1468 MLX5_MAX_LRO_HEADER_FIX) {
1469 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1470 " to include the max header size(%u) for LRO",
1471 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1472 rte_errno = ENOTSUP;
1476 * Determine the number of SGEs needed for a full packet
1477 * and round it to the next power of two.
1479 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1480 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1482 "port %u too many SGEs (%u) needed to handle"
1483 " requested maximum packet size %u, the maximum"
1484 " supported are %u", dev->data->port_id,
1485 1 << sges_n, max_rx_pkt_len,
1486 1u << MLX5_MAX_LOG_RQ_SEGS);
1487 rte_errno = ENOTSUP;
1490 tmpl->rxq.sges_n = sges_n;
1491 max_lro_size = max_rx_pkt_len;
1493 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1495 "port %u MPRQ is requested but cannot be enabled\n"
1496 " (requested: pkt_sz = %u, desc_num = %u,"
1497 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1498 " supported: min_rxqs_num = %u,"
1499 " min_stride_sz = %u, max_stride_sz = %u).",
1500 dev->data->port_id, non_scatter_min_mbuf_size,
1502 config->mprq.stride_size_n ?
1503 (1U << config->mprq.stride_size_n) :
1504 (1U << mprq_stride_size),
1505 config->mprq.stride_num_n ?
1506 (1U << config->mprq.stride_num_n) :
1507 (1U << mprq_stride_nums),
1508 config->mprq.min_rxqs_num,
1509 (1U << config->mprq.min_stride_size_n),
1510 (1U << config->mprq.max_stride_size_n));
1511 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1512 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1513 if (desc % (1 << tmpl->rxq.sges_n)) {
1515 "port %u number of Rx queue descriptors (%u) is not a"
1516 " multiple of SGEs per packet (%u)",
1519 1 << tmpl->rxq.sges_n);
1523 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1524 /* Toggle RX checksum offload if hardware supports it. */
1525 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1526 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1527 /* Configure VLAN stripping. */
1528 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1529 /* By default, FCS (CRC) is stripped by hardware. */
1530 tmpl->rxq.crc_present = 0;
1531 tmpl->rxq.lro = lro_on_queue;
1532 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1533 if (config->hw_fcs_strip) {
1535 * RQs used for LRO-enabled TIRs should not be
1536 * configured to scatter the FCS.
1540 "port %u CRC stripping has been "
1541 "disabled but will still be performed "
1542 "by hardware, because LRO is enabled",
1543 dev->data->port_id);
1545 tmpl->rxq.crc_present = 1;
1548 "port %u CRC stripping has been disabled but will"
1549 " still be performed by hardware, make sure MLNX_OFED"
1550 " and firmware are up to date",
1551 dev->data->port_id);
1555 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1556 " incoming frames to hide it",
1558 tmpl->rxq.crc_present ? "disabled" : "enabled",
1559 tmpl->rxq.crc_present << 2);
1561 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1562 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1563 tmpl->rxq.port_id = dev->data->port_id;
1566 tmpl->rxq.elts_n = log2above(desc);
1567 tmpl->rxq.rq_repl_thresh =
1568 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1570 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1572 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1574 tmpl->rxq.idx = idx;
1575 rte_atomic32_inc(&tmpl->refcnt);
1576 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1584 * Create a DPDK Rx hairpin queue.
1587 * Pointer to Ethernet device.
1591 * Number of descriptors to configure in queue.
1592 * @param hairpin_conf
1593 * The hairpin binding configuration.
1596 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1598 struct mlx5_rxq_ctrl *
1599 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1600 const struct rte_eth_hairpin_conf *hairpin_conf)
1602 struct mlx5_priv *priv = dev->data->dev_private;
1603 struct mlx5_rxq_ctrl *tmpl;
1605 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1611 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1612 tmpl->socket = SOCKET_ID_ANY;
1613 tmpl->rxq.rss_hash = 0;
1614 tmpl->rxq.port_id = dev->data->port_id;
1616 tmpl->rxq.mp = NULL;
1617 tmpl->rxq.elts_n = log2above(desc);
1618 tmpl->rxq.elts = NULL;
1619 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1620 tmpl->hairpin_conf = *hairpin_conf;
1621 tmpl->rxq.idx = idx;
1622 rte_atomic32_inc(&tmpl->refcnt);
1623 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1631 * Pointer to Ethernet device.
1636 * A pointer to the queue if it exists, NULL otherwise.
1638 struct mlx5_rxq_ctrl *
1639 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1641 struct mlx5_priv *priv = dev->data->dev_private;
1642 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1643 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1646 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1647 rte_atomic32_inc(&rxq_ctrl->refcnt);
1653 * Release a Rx queue.
1656 * Pointer to Ethernet device.
1661 * 1 while a reference on it exists, 0 when freed.
1664 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1666 struct mlx5_priv *priv = dev->data->dev_private;
1667 struct mlx5_rxq_ctrl *rxq_ctrl;
1669 if (!(*priv->rxqs)[idx])
1671 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1672 if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))
1674 if (rxq_ctrl->obj) {
1675 priv->obj_ops->rxq_obj_release(rxq_ctrl->obj);
1676 LIST_REMOVE(rxq_ctrl->obj, next);
1677 mlx5_free(rxq_ctrl->obj);
1678 rxq_ctrl->obj = NULL;
1680 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
1681 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1682 rxq_free_elts(rxq_ctrl);
1684 LIST_REMOVE(rxq_ctrl, next);
1685 mlx5_free(rxq_ctrl);
1686 (*priv->rxqs)[idx] = NULL;
1691 * Verify the Rx Queue list is empty
1694 * Pointer to Ethernet device.
1697 * The number of object not released.
1700 mlx5_rxq_verify(struct rte_eth_dev *dev)
1702 struct mlx5_priv *priv = dev->data->dev_private;
1703 struct mlx5_rxq_ctrl *rxq_ctrl;
1706 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1707 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1708 dev->data->port_id, rxq_ctrl->rxq.idx);
1715 * Get a Rx queue type.
1718 * Pointer to Ethernet device.
1723 * The Rx queue type.
1726 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1728 struct mlx5_priv *priv = dev->data->dev_private;
1729 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1731 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1732 rxq_ctrl = container_of((*priv->rxqs)[idx],
1733 struct mlx5_rxq_ctrl,
1735 return rxq_ctrl->type;
1737 return MLX5_RXQ_TYPE_UNDEFINED;
1741 * Create an indirection table.
1744 * Pointer to Ethernet device.
1746 * Queues entering in the indirection table.
1748 * Number of queues in the array.
1751 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1753 static struct mlx5_ind_table_obj *
1754 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1755 uint32_t queues_n, enum mlx5_ind_tbl_type type)
1757 struct mlx5_priv *priv = dev->data->dev_private;
1758 struct mlx5_ind_table_obj *ind_tbl;
1759 unsigned int i = 0, j = 0, k = 0;
1761 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
1762 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
1767 ind_tbl->type = type;
1768 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
1769 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1770 log2above(queues_n) :
1771 log2above(priv->config.ind_table_max_size);
1772 struct ibv_wq *wq[1 << wq_n];
1774 for (i = 0; i != queues_n; ++i) {
1775 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1779 wq[i] = rxq->obj->wq;
1780 ind_tbl->queues[i] = queues[i];
1782 ind_tbl->queues_n = queues_n;
1783 /* Finalise indirection table. */
1784 k = i; /* Retain value of i for use in error case. */
1785 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
1787 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1789 &(struct ibv_rwq_ind_table_init_attr){
1790 .log_ind_tbl_size = wq_n,
1794 if (!ind_tbl->ind_table) {
1798 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
1799 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
1800 const unsigned int rqt_n =
1801 1 << (rte_is_power_of_2(queues_n) ?
1802 log2above(queues_n) :
1803 log2above(priv->config.ind_table_max_size));
1805 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
1806 rqt_n * sizeof(uint32_t), 0,
1809 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
1810 dev->data->port_id);
1814 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
1815 rqt_attr->rqt_actual_size = rqt_n;
1816 for (i = 0; i != queues_n; ++i) {
1817 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1821 rqt_attr->rq_list[i] = rxq->obj->rq->id;
1822 ind_tbl->queues[i] = queues[i];
1824 k = i; /* Retain value of i for use in error case. */
1825 for (j = 0; k != rqt_n; ++k, ++j)
1826 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
1827 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
1829 mlx5_free(rqt_attr);
1830 if (!ind_tbl->rqt) {
1831 DRV_LOG(ERR, "port %u cannot create DevX RQT",
1832 dev->data->port_id);
1836 ind_tbl->queues_n = queues_n;
1838 rte_atomic32_inc(&ind_tbl->refcnt);
1839 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1842 for (j = 0; j < i; j++)
1843 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1845 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1850 * Get an indirection table.
1853 * Pointer to Ethernet device.
1855 * Queues entering in the indirection table.
1857 * Number of queues in the array.
1860 * An indirection table if found.
1862 static struct mlx5_ind_table_obj *
1863 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1866 struct mlx5_priv *priv = dev->data->dev_private;
1867 struct mlx5_ind_table_obj *ind_tbl;
1869 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1870 if ((ind_tbl->queues_n == queues_n) &&
1871 (memcmp(ind_tbl->queues, queues,
1872 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1879 rte_atomic32_inc(&ind_tbl->refcnt);
1880 for (i = 0; i != ind_tbl->queues_n; ++i)
1881 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1887 * Release an indirection table.
1890 * Pointer to Ethernet device.
1892 * Indirection table to release.
1895 * 1 while a reference on it exists, 0 when freed.
1898 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1899 struct mlx5_ind_table_obj *ind_tbl)
1903 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
1904 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
1905 claim_zero(mlx5_glue->destroy_rwq_ind_table
1906 (ind_tbl->ind_table));
1907 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
1908 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
1910 for (i = 0; i != ind_tbl->queues_n; ++i)
1911 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1912 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1913 LIST_REMOVE(ind_tbl, next);
1921 * Verify the Rx Queue list is empty
1924 * Pointer to Ethernet device.
1927 * The number of object not released.
1930 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1932 struct mlx5_priv *priv = dev->data->dev_private;
1933 struct mlx5_ind_table_obj *ind_tbl;
1936 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1938 "port %u indirection table obj %p still referenced",
1939 dev->data->port_id, (void *)ind_tbl);
1946 * Create an Rx Hash queue.
1949 * Pointer to Ethernet device.
1951 * RSS key for the Rx hash queue.
1952 * @param rss_key_len
1954 * @param hash_fields
1955 * Verbs protocol hash field to make the RSS on.
1957 * Queues entering in hash queue. In case of empty hash_fields only the
1958 * first queue index will be taken for the indirection table.
1965 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
1968 mlx5_hrxq_new(struct rte_eth_dev *dev,
1969 const uint8_t *rss_key, uint32_t rss_key_len,
1970 uint64_t hash_fields,
1971 const uint16_t *queues, uint32_t queues_n,
1972 int tunnel __rte_unused)
1974 struct mlx5_priv *priv = dev->data->dev_private;
1975 struct mlx5_hrxq *hrxq = NULL;
1976 uint32_t hrxq_idx = 0;
1977 struct ibv_qp *qp = NULL;
1978 struct mlx5_ind_table_obj *ind_tbl;
1980 struct mlx5_devx_obj *tir = NULL;
1981 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
1982 struct mlx5_rxq_ctrl *rxq_ctrl =
1983 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1985 queues_n = hash_fields ? queues_n : 1;
1986 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
1988 enum mlx5_ind_tbl_type type;
1990 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
1991 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
1992 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
1998 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
1999 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2000 struct mlx5dv_qp_init_attr qp_init_attr;
2002 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2004 qp_init_attr.comp_mask =
2005 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2006 qp_init_attr.create_flags =
2007 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2009 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2010 if (dev->data->dev_conf.lpbk_mode) {
2012 * Allow packet sent from NIC loop back
2013 * w/o source MAC check.
2015 qp_init_attr.comp_mask |=
2016 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2017 qp_init_attr.create_flags |=
2018 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2021 qp = mlx5_glue->dv_create_qp
2023 &(struct ibv_qp_init_attr_ex){
2024 .qp_type = IBV_QPT_RAW_PACKET,
2026 IBV_QP_INIT_ATTR_PD |
2027 IBV_QP_INIT_ATTR_IND_TABLE |
2028 IBV_QP_INIT_ATTR_RX_HASH,
2029 .rx_hash_conf = (struct ibv_rx_hash_conf){
2031 IBV_RX_HASH_FUNC_TOEPLITZ,
2032 .rx_hash_key_len = rss_key_len,
2034 (void *)(uintptr_t)rss_key,
2035 .rx_hash_fields_mask = hash_fields,
2037 .rwq_ind_tbl = ind_tbl->ind_table,
2042 qp = mlx5_glue->create_qp_ex
2044 &(struct ibv_qp_init_attr_ex){
2045 .qp_type = IBV_QPT_RAW_PACKET,
2047 IBV_QP_INIT_ATTR_PD |
2048 IBV_QP_INIT_ATTR_IND_TABLE |
2049 IBV_QP_INIT_ATTR_RX_HASH,
2050 .rx_hash_conf = (struct ibv_rx_hash_conf){
2052 IBV_RX_HASH_FUNC_TOEPLITZ,
2053 .rx_hash_key_len = rss_key_len,
2055 (void *)(uintptr_t)rss_key,
2056 .rx_hash_fields_mask = hash_fields,
2058 .rwq_ind_tbl = ind_tbl->ind_table,
2066 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2067 struct mlx5_devx_tir_attr tir_attr;
2071 /* Enable TIR LRO only if all the queues were configured for. */
2072 for (i = 0; i < queues_n; ++i) {
2073 if (!(*priv->rxqs)[queues[i]]->lro) {
2078 memset(&tir_attr, 0, sizeof(tir_attr));
2079 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2080 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2081 tir_attr.tunneled_offload_en = !!tunnel;
2082 /* If needed, translate hash_fields bitmap to PRM format. */
2084 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2085 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2086 hash_fields & IBV_RX_HASH_INNER ?
2087 &tir_attr.rx_hash_field_selector_inner :
2088 &tir_attr.rx_hash_field_selector_outer;
2090 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2091 &tir_attr.rx_hash_field_selector_outer;
2094 /* 1 bit: 0: IPv4, 1: IPv6. */
2095 rx_hash_field_select->l3_prot_type =
2096 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2097 /* 1 bit: 0: TCP, 1: UDP. */
2098 rx_hash_field_select->l4_prot_type =
2099 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2100 /* Bitmask which sets which fields to use in RX Hash. */
2101 rx_hash_field_select->selected_fields =
2102 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2103 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2104 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2105 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2106 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2107 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2108 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2109 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2111 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2112 tir_attr.transport_domain = priv->sh->td->id;
2114 tir_attr.transport_domain = priv->sh->tdn;
2115 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
2116 MLX5_RSS_HASH_KEY_LEN);
2117 tir_attr.indirect_table = ind_tbl->rqt->id;
2118 if (dev->data->dev_conf.lpbk_mode)
2119 tir_attr.self_lb_block =
2120 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2122 tir_attr.lro_timeout_period_usecs =
2123 priv->config.lro.timeout;
2124 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2125 tir_attr.lro_enable_mask =
2126 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2127 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2129 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2131 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2132 dev->data->port_id);
2137 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2140 hrxq->ind_table = ind_tbl;
2141 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2143 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2145 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2146 if (!hrxq->action) {
2151 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2153 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2154 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2156 if (!hrxq->action) {
2162 hrxq->rss_key_len = rss_key_len;
2163 hrxq->hash_fields = hash_fields;
2164 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2165 rte_atomic32_inc(&hrxq->refcnt);
2166 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2170 err = rte_errno; /* Save rte_errno before cleanup. */
2171 mlx5_ind_table_obj_release(dev, ind_tbl);
2173 claim_zero(mlx5_glue->destroy_qp(qp));
2175 claim_zero(mlx5_devx_cmd_destroy(tir));
2177 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2178 rte_errno = err; /* Restore rte_errno. */
2183 * Get an Rx Hash queue.
2186 * Pointer to Ethernet device.
2188 * RSS configuration for the Rx hash queue.
2190 * Queues entering in hash queue. In case of empty hash_fields only the
2191 * first queue index will be taken for the indirection table.
2196 * An hash Rx queue index on success.
2199 mlx5_hrxq_get(struct rte_eth_dev *dev,
2200 const uint8_t *rss_key, uint32_t rss_key_len,
2201 uint64_t hash_fields,
2202 const uint16_t *queues, uint32_t queues_n)
2204 struct mlx5_priv *priv = dev->data->dev_private;
2205 struct mlx5_hrxq *hrxq;
2208 queues_n = hash_fields ? queues_n : 1;
2209 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2211 struct mlx5_ind_table_obj *ind_tbl;
2213 if (hrxq->rss_key_len != rss_key_len)
2215 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2217 if (hrxq->hash_fields != hash_fields)
2219 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2222 if (ind_tbl != hrxq->ind_table) {
2223 mlx5_ind_table_obj_release(dev, ind_tbl);
2226 rte_atomic32_inc(&hrxq->refcnt);
2233 * Release the hash Rx queue.
2236 * Pointer to Ethernet device.
2238 * Index to Hash Rx queue to release.
2241 * 1 while a reference on it exists, 0 when freed.
2244 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2246 struct mlx5_priv *priv = dev->data->dev_private;
2247 struct mlx5_hrxq *hrxq;
2249 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2252 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2253 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2254 mlx5_glue->destroy_flow_action(hrxq->action);
2256 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2257 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2258 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2259 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2260 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2261 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
2262 hrxq_idx, hrxq, next);
2263 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2266 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2271 * Verify the Rx Queue list is empty
2274 * Pointer to Ethernet device.
2277 * The number of object not released.
2280 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2282 struct mlx5_priv *priv = dev->data->dev_private;
2283 struct mlx5_hrxq *hrxq;
2287 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2290 "port %u hash Rx queue %p still referenced",
2291 dev->data->port_id, (void *)hrxq);
2298 * Create a drop Rx queue Verbs/DevX object.
2301 * Pointer to Ethernet device.
2304 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2306 static struct mlx5_rxq_obj *
2307 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2309 struct mlx5_priv *priv = dev->data->dev_private;
2310 struct ibv_context *ctx = priv->sh->ctx;
2312 struct ibv_wq *wq = NULL;
2313 struct mlx5_rxq_obj *rxq;
2315 if (priv->drop_queue.rxq)
2316 return priv->drop_queue.rxq;
2317 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2319 DEBUG("port %u cannot allocate CQ for drop queue",
2320 dev->data->port_id);
2324 wq = mlx5_glue->create_wq(ctx,
2325 &(struct ibv_wq_init_attr){
2326 .wq_type = IBV_WQT_RQ,
2333 DEBUG("port %u cannot allocate WQ for drop queue",
2334 dev->data->port_id);
2338 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
2340 DEBUG("port %u cannot allocate drop Rx queue memory",
2341 dev->data->port_id);
2347 priv->drop_queue.rxq = rxq;
2351 claim_zero(mlx5_glue->destroy_wq(wq));
2353 claim_zero(mlx5_glue->destroy_cq(cq));
2358 * Release a drop Rx queue Verbs/DevX object.
2361 * Pointer to Ethernet device.
2364 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2367 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2369 struct mlx5_priv *priv = dev->data->dev_private;
2370 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2373 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2375 claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
2377 priv->drop_queue.rxq = NULL;
2381 * Create a drop indirection table.
2384 * Pointer to Ethernet device.
2387 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2389 static struct mlx5_ind_table_obj *
2390 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2392 struct mlx5_priv *priv = dev->data->dev_private;
2393 struct mlx5_ind_table_obj *ind_tbl;
2394 struct mlx5_rxq_obj *rxq;
2395 struct mlx5_ind_table_obj tmpl;
2397 rxq = mlx5_rxq_obj_drop_new(dev);
2400 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2402 &(struct ibv_rwq_ind_table_init_attr){
2403 .log_ind_tbl_size = 0,
2404 .ind_tbl = (struct ibv_wq **)&rxq->wq,
2407 if (!tmpl.ind_table) {
2408 DEBUG("port %u cannot allocate indirection table for drop"
2410 dev->data->port_id);
2414 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
2420 ind_tbl->ind_table = tmpl.ind_table;
2423 mlx5_rxq_obj_drop_release(dev);
2428 * Release a drop indirection table.
2431 * Pointer to Ethernet device.
2434 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2436 struct mlx5_priv *priv = dev->data->dev_private;
2437 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2439 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2440 mlx5_rxq_obj_drop_release(dev);
2442 priv->drop_queue.hrxq->ind_table = NULL;
2446 * Create a drop Rx Hash queue.
2449 * Pointer to Ethernet device.
2452 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2455 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2457 struct mlx5_priv *priv = dev->data->dev_private;
2458 struct mlx5_ind_table_obj *ind_tbl = NULL;
2459 struct ibv_qp *qp = NULL;
2460 struct mlx5_hrxq *hrxq = NULL;
2462 if (priv->drop_queue.hrxq) {
2463 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2464 return priv->drop_queue.hrxq;
2466 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2469 "port %u cannot allocate memory for drop queue",
2470 dev->data->port_id);
2474 priv->drop_queue.hrxq = hrxq;
2475 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2478 hrxq->ind_table = ind_tbl;
2479 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2480 &(struct ibv_qp_init_attr_ex){
2481 .qp_type = IBV_QPT_RAW_PACKET,
2483 IBV_QP_INIT_ATTR_PD |
2484 IBV_QP_INIT_ATTR_IND_TABLE |
2485 IBV_QP_INIT_ATTR_RX_HASH,
2486 .rx_hash_conf = (struct ibv_rx_hash_conf){
2488 IBV_RX_HASH_FUNC_TOEPLITZ,
2489 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2490 .rx_hash_key = rss_hash_default_key,
2491 .rx_hash_fields_mask = 0,
2493 .rwq_ind_tbl = ind_tbl->ind_table,
2497 DEBUG("port %u cannot allocate QP for drop queue",
2498 dev->data->port_id);
2503 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2504 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2505 if (!hrxq->action) {
2510 rte_atomic32_set(&hrxq->refcnt, 1);
2513 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2514 if (hrxq && hrxq->action)
2515 mlx5_glue->destroy_flow_action(hrxq->action);
2518 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2520 mlx5_ind_table_obj_drop_release(dev);
2522 priv->drop_queue.hrxq = NULL;
2529 * Release a drop hash Rx queue.
2532 * Pointer to Ethernet device.
2535 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2537 struct mlx5_priv *priv = dev->data->dev_private;
2538 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2540 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2541 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2542 mlx5_glue->destroy_flow_action(hrxq->action);
2544 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2545 mlx5_ind_table_obj_drop_release(dev);
2547 priv->drop_queue.hrxq = NULL;
2553 * Set the Rx queue timestamp conversion parameters
2556 * Pointer to the Ethernet device structure.
2559 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2561 struct mlx5_priv *priv = dev->data->dev_private;
2562 struct mlx5_dev_ctx_shared *sh = priv->sh;
2563 struct mlx5_rxq_data *data;
2566 for (i = 0; i != priv->rxqs_n; ++i) {
2567 if (!(*priv->rxqs)[i])
2569 data = (*priv->rxqs)[i];
2571 data->rt_timestamp = priv->config.rt_timestamp;