1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
28 #include <rte_interrupts.h>
29 #include <rte_debug.h>
31 #include <rte_eal_paging.h>
33 #include <mlx5_glue.h>
34 #include <mlx5_devx_cmds.h>
35 #include <mlx5_malloc.h>
37 #include "mlx5_defs.h"
39 #include "mlx5_common_os.h"
40 #include "mlx5_rxtx.h"
41 #include "mlx5_utils.h"
42 #include "mlx5_autoconf.h"
43 #include "mlx5_flow.h"
46 /* Default RSS hash key also used for ConnectX-3. */
47 uint8_t rss_hash_default_key[] = {
48 0x2c, 0xc6, 0x81, 0xd1,
49 0x5b, 0xdb, 0xf4, 0xf7,
50 0xfc, 0xa2, 0x83, 0x19,
51 0xdb, 0x1a, 0x3e, 0x94,
52 0x6b, 0x9e, 0x38, 0xd9,
53 0x2c, 0x9c, 0x03, 0xd1,
54 0xad, 0x99, 0x44, 0xa7,
55 0xd9, 0x56, 0x3d, 0x59,
56 0x06, 0x3c, 0x25, 0xf3,
57 0xfc, 0x1f, 0xdc, 0x2a,
60 /* Length of the default RSS hash key. */
61 static_assert(MLX5_RSS_HASH_KEY_LEN ==
62 (unsigned int)sizeof(rss_hash_default_key),
63 "wrong RSS default key size.");
66 * Check whether Multi-Packet RQ can be enabled for the device.
69 * Pointer to Ethernet device.
72 * 1 if supported, negative errno value if not.
75 mlx5_check_mprq_support(struct rte_eth_dev *dev)
77 struct mlx5_priv *priv = dev->data->dev_private;
79 if (priv->config.mprq.enabled &&
80 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
86 * Check whether Multi-Packet RQ is enabled for the Rx queue.
89 * Pointer to receive queue structure.
92 * 0 if disabled, otherwise enabled.
95 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
97 return rxq->strd_num_n > 0;
101 * Check whether Multi-Packet RQ is enabled for the device.
104 * Pointer to Ethernet device.
107 * 0 if disabled, otherwise enabled.
110 mlx5_mprq_enabled(struct rte_eth_dev *dev)
112 struct mlx5_priv *priv = dev->data->dev_private;
117 if (mlx5_check_mprq_support(dev) < 0)
119 /* All the configured queues should be enabled. */
120 for (i = 0; i < priv->rxqs_n; ++i) {
121 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
122 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
123 (rxq, struct mlx5_rxq_ctrl, rxq);
125 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
128 if (mlx5_rxq_mprq_enabled(rxq))
131 /* Multi-Packet RQ can't be partially configured. */
132 MLX5_ASSERT(n == 0 || n == n_ibv);
137 * Allocate RX queue elements for Multi-Packet RQ.
140 * Pointer to RX queue structure.
143 * 0 on success, a negative errno value otherwise and rte_errno is set.
146 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
148 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
149 unsigned int wqe_n = 1 << rxq->elts_n;
153 /* Iterate on segments. */
154 for (i = 0; i <= wqe_n; ++i) {
155 struct mlx5_mprq_buf *buf;
157 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
158 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
163 (*rxq->mprq_bufs)[i] = buf;
165 rxq->mprq_repl = buf;
168 "port %u Rx queue %u allocated and configured %u segments",
169 rxq->port_id, rxq->idx, wqe_n);
172 err = rte_errno; /* Save rte_errno before cleanup. */
174 for (i = 0; (i != wqe_n); ++i) {
175 if ((*rxq->mprq_bufs)[i] != NULL)
176 rte_mempool_put(rxq->mprq_mp,
177 (*rxq->mprq_bufs)[i]);
178 (*rxq->mprq_bufs)[i] = NULL;
180 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
181 rxq->port_id, rxq->idx);
182 rte_errno = err; /* Restore rte_errno. */
187 * Allocate RX queue elements for Single-Packet RQ.
190 * Pointer to RX queue structure.
193 * 0 on success, errno value on failure.
196 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
198 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
199 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
203 /* Iterate on segments. */
204 for (i = 0; (i != elts_n); ++i) {
205 struct rte_mbuf *buf;
207 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
209 DRV_LOG(ERR, "port %u empty mbuf pool",
210 PORT_ID(rxq_ctrl->priv));
214 /* Headroom is reserved by rte_pktmbuf_alloc(). */
215 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
216 /* Buffer is supposed to be empty. */
217 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
218 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
219 MLX5_ASSERT(!buf->next);
220 /* Only the first segment keeps headroom. */
222 SET_DATA_OFF(buf, 0);
223 PORT(buf) = rxq_ctrl->rxq.port_id;
224 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
225 PKT_LEN(buf) = DATA_LEN(buf);
227 (*rxq_ctrl->rxq.elts)[i] = buf;
229 /* If Rx vector is activated. */
230 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
231 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
232 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
233 struct rte_pktmbuf_pool_private *priv =
234 (struct rte_pktmbuf_pool_private *)
235 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
238 /* Initialize default rearm_data for vPMD. */
239 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
240 rte_mbuf_refcnt_set(mbuf_init, 1);
241 mbuf_init->nb_segs = 1;
242 mbuf_init->port = rxq->port_id;
243 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
244 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
246 * prevent compiler reordering:
247 * rearm_data covers previous fields.
249 rte_compiler_barrier();
250 rxq->mbuf_initializer =
251 *(rte_xmm_t *)&mbuf_init->rearm_data;
252 /* Padding with a fake mbuf for vectorized Rx. */
253 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
254 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
257 "port %u Rx queue %u allocated and configured %u segments"
259 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
260 elts_n / (1 << rxq_ctrl->rxq.sges_n));
263 err = rte_errno; /* Save rte_errno before cleanup. */
265 for (i = 0; (i != elts_n); ++i) {
266 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
267 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
268 (*rxq_ctrl->rxq.elts)[i] = NULL;
270 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
271 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
272 rte_errno = err; /* Restore rte_errno. */
277 * Allocate RX queue elements.
280 * Pointer to RX queue structure.
283 * 0 on success, errno value on failure.
286 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
288 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
289 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
293 * Free RX queue elements for Multi-Packet RQ.
296 * Pointer to RX queue structure.
299 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
301 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
304 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
305 rxq->port_id, rxq->idx);
306 if (rxq->mprq_bufs == NULL)
308 MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
309 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
310 if ((*rxq->mprq_bufs)[i] != NULL)
311 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
312 (*rxq->mprq_bufs)[i] = NULL;
314 if (rxq->mprq_repl != NULL) {
315 mlx5_mprq_buf_free(rxq->mprq_repl);
316 rxq->mprq_repl = NULL;
321 * Free RX queue elements for Single-Packet RQ.
324 * Pointer to RX queue structure.
327 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
329 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
330 const uint16_t q_n = (1 << rxq->elts_n);
331 const uint16_t q_mask = q_n - 1;
332 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
335 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
336 PORT_ID(rxq_ctrl->priv), rxq->idx);
337 if (rxq->elts == NULL)
340 * Some mbuf in the Ring belongs to the application. They cannot be
343 if (mlx5_rxq_check_vec_support(rxq) > 0) {
344 for (i = 0; i < used; ++i)
345 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
346 rxq->rq_pi = rxq->rq_ci;
348 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
349 if ((*rxq->elts)[i] != NULL)
350 rte_pktmbuf_free_seg((*rxq->elts)[i]);
351 (*rxq->elts)[i] = NULL;
356 * Free RX queue elements.
359 * Pointer to RX queue structure.
362 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
364 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
365 rxq_free_elts_mprq(rxq_ctrl);
367 rxq_free_elts_sprq(rxq_ctrl);
371 * Returns the per-queue supported offloads.
374 * Pointer to Ethernet device.
377 * Supported Rx offloads.
380 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
382 struct mlx5_priv *priv = dev->data->dev_private;
383 struct mlx5_dev_config *config = &priv->config;
384 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
385 DEV_RX_OFFLOAD_TIMESTAMP |
386 DEV_RX_OFFLOAD_JUMBO_FRAME |
387 DEV_RX_OFFLOAD_RSS_HASH);
389 if (config->hw_fcs_strip)
390 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
393 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
394 DEV_RX_OFFLOAD_UDP_CKSUM |
395 DEV_RX_OFFLOAD_TCP_CKSUM);
396 if (config->hw_vlan_strip)
397 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
398 if (MLX5_LRO_SUPPORTED(dev))
399 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
405 * Returns the per-port supported offloads.
408 * Supported Rx offloads.
411 mlx5_get_rx_port_offloads(void)
413 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
419 * Verify if the queue can be released.
422 * Pointer to Ethernet device.
427 * 1 if the queue can be released
428 * 0 if the queue can not be released, there are references to it.
429 * Negative errno and rte_errno is set if queue doesn't exist.
432 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
434 struct mlx5_priv *priv = dev->data->dev_private;
435 struct mlx5_rxq_ctrl *rxq_ctrl;
437 if (!(*priv->rxqs)[idx]) {
441 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
442 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
446 * Rx queue presetup checks.
449 * Pointer to Ethernet device structure.
453 * Number of descriptors to configure in queue.
456 * 0 on success, a negative errno value otherwise and rte_errno is set.
459 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
461 struct mlx5_priv *priv = dev->data->dev_private;
463 if (!rte_is_power_of_2(*desc)) {
464 *desc = 1 << log2above(*desc);
466 "port %u increased number of descriptors in Rx queue %u"
467 " to the next power of two (%d)",
468 dev->data->port_id, idx, *desc);
470 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
471 dev->data->port_id, idx, *desc);
472 if (idx >= priv->rxqs_n) {
473 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
474 dev->data->port_id, idx, priv->rxqs_n);
475 rte_errno = EOVERFLOW;
478 if (!mlx5_rxq_releasable(dev, idx)) {
479 DRV_LOG(ERR, "port %u unable to release queue index %u",
480 dev->data->port_id, idx);
484 mlx5_rxq_release(dev, idx);
491 * Pointer to Ethernet device structure.
495 * Number of descriptors to configure in queue.
497 * NUMA socket on which memory must be allocated.
499 * Thresholds parameters.
501 * Memory pool for buffer allocations.
504 * 0 on success, a negative errno value otherwise and rte_errno is set.
507 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
508 unsigned int socket, const struct rte_eth_rxconf *conf,
509 struct rte_mempool *mp)
511 struct mlx5_priv *priv = dev->data->dev_private;
512 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
513 struct mlx5_rxq_ctrl *rxq_ctrl =
514 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
517 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
520 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
522 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
523 dev->data->port_id, idx);
527 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
528 dev->data->port_id, idx);
529 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
536 * Pointer to Ethernet device structure.
540 * Number of descriptors to configure in queue.
541 * @param hairpin_conf
542 * Hairpin configuration parameters.
545 * 0 on success, a negative errno value otherwise and rte_errno is set.
548 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
550 const struct rte_eth_hairpin_conf *hairpin_conf)
552 struct mlx5_priv *priv = dev->data->dev_private;
553 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
554 struct mlx5_rxq_ctrl *rxq_ctrl =
555 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
558 res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
561 if (hairpin_conf->peer_count != 1 ||
562 hairpin_conf->peers[0].port != dev->data->port_id ||
563 hairpin_conf->peers[0].queue >= priv->txqs_n) {
564 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
565 " invalid hairpind configuration", dev->data->port_id,
570 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
572 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
573 dev->data->port_id, idx);
577 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
578 dev->data->port_id, idx);
579 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
584 * DPDK callback to release a RX queue.
587 * Generic RX queue pointer.
590 mlx5_rx_queue_release(void *dpdk_rxq)
592 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
593 struct mlx5_rxq_ctrl *rxq_ctrl;
594 struct mlx5_priv *priv;
598 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
599 priv = rxq_ctrl->priv;
600 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
601 rte_panic("port %u Rx queue %u is still used by a flow and"
602 " cannot be removed\n",
603 PORT_ID(priv), rxq->idx);
604 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
608 * Get an Rx queue Verbs/DevX object.
611 * Pointer to Ethernet device.
613 * Queue index in DPDK Rx queue array
616 * The Verbs/DevX object if it exists.
618 static struct mlx5_rxq_obj *
619 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
621 struct mlx5_priv *priv = dev->data->dev_private;
622 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
623 struct mlx5_rxq_ctrl *rxq_ctrl;
625 if (idx >= priv->rxqs_n)
629 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
631 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
632 return rxq_ctrl->obj;
636 * Release the resources allocated for an RQ DevX object.
639 * DevX Rx queue object.
642 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
644 if (rxq_ctrl->rxq.wqes) {
645 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
646 rxq_ctrl->rxq.wqes = NULL;
648 if (rxq_ctrl->wq_umem) {
649 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
650 rxq_ctrl->wq_umem = NULL;
655 * Release an Rx hairpin related resources.
658 * Hairpin Rx queue object.
661 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
663 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
665 MLX5_ASSERT(rxq_obj);
666 rq_attr.state = MLX5_RQC_STATE_RST;
667 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
668 mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
669 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
673 * Release an Rx verbs/DevX queue object.
676 * Verbs/DevX Rx queue object.
679 * 1 while a reference on it exists, 0 when freed.
682 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
684 MLX5_ASSERT(rxq_obj);
685 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
686 switch (rxq_obj->type) {
687 case MLX5_RXQ_OBJ_TYPE_IBV:
688 MLX5_ASSERT(rxq_obj->wq);
689 MLX5_ASSERT(rxq_obj->cq);
690 rxq_free_elts(rxq_obj->rxq_ctrl);
691 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
692 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
694 case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
695 MLX5_ASSERT(rxq_obj->cq);
696 MLX5_ASSERT(rxq_obj->rq);
697 rxq_free_elts(rxq_obj->rxq_ctrl);
698 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
699 rxq_release_rq_resources(rxq_obj->rxq_ctrl);
700 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
702 case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
703 MLX5_ASSERT(rxq_obj->rq);
704 rxq_obj_hairpin_release(rxq_obj);
707 if (rxq_obj->channel)
708 claim_zero(mlx5_glue->destroy_comp_channel
710 LIST_REMOVE(rxq_obj, next);
718 * Allocate queue vector and fill epoll fd list for Rx interrupts.
721 * Pointer to Ethernet device.
724 * 0 on success, a negative errno value otherwise and rte_errno is set.
727 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
729 struct mlx5_priv *priv = dev->data->dev_private;
731 unsigned int rxqs_n = priv->rxqs_n;
732 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
733 unsigned int count = 0;
734 struct rte_intr_handle *intr_handle = dev->intr_handle;
736 if (!dev->data->dev_conf.intr_conf.rxq)
738 mlx5_rx_intr_vec_disable(dev);
739 intr_handle->intr_vec = mlx5_malloc(0,
740 n * sizeof(intr_handle->intr_vec[0]),
742 if (intr_handle->intr_vec == NULL) {
744 "port %u failed to allocate memory for interrupt"
745 " vector, Rx interrupts will not be supported",
750 intr_handle->type = RTE_INTR_HANDLE_EXT;
751 for (i = 0; i != n; ++i) {
752 /* This rxq obj must not be released in this function. */
753 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
758 /* Skip queues that cannot request interrupts. */
759 if (!rxq_obj || !rxq_obj->channel) {
760 /* Use invalid intr_vec[] index to disable entry. */
761 intr_handle->intr_vec[i] =
762 RTE_INTR_VEC_RXTX_OFFSET +
763 RTE_MAX_RXTX_INTR_VEC_ID;
766 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
768 "port %u too many Rx queues for interrupt"
769 " vector size (%d), Rx interrupts cannot be"
771 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
772 mlx5_rx_intr_vec_disable(dev);
776 fd = rxq_obj->channel->fd;
777 flags = fcntl(fd, F_GETFL);
778 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
782 "port %u failed to make Rx interrupt file"
783 " descriptor %d non-blocking for queue index"
785 dev->data->port_id, fd, i);
786 mlx5_rx_intr_vec_disable(dev);
789 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
790 intr_handle->efds[count] = fd;
794 mlx5_rx_intr_vec_disable(dev);
796 intr_handle->nb_efd = count;
801 * Clean up Rx interrupts handler.
804 * Pointer to Ethernet device.
807 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
809 struct mlx5_priv *priv = dev->data->dev_private;
810 struct rte_intr_handle *intr_handle = dev->intr_handle;
812 unsigned int rxqs_n = priv->rxqs_n;
813 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
815 if (!dev->data->dev_conf.intr_conf.rxq)
817 if (!intr_handle->intr_vec)
819 for (i = 0; i != n; ++i) {
820 struct mlx5_rxq_ctrl *rxq_ctrl;
821 struct mlx5_rxq_data *rxq_data;
823 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
824 RTE_MAX_RXTX_INTR_VEC_ID)
827 * Need to access directly the queue to release the reference
828 * kept in mlx5_rx_intr_vec_enable().
830 rxq_data = (*priv->rxqs)[i];
831 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
833 mlx5_rxq_obj_release(rxq_ctrl->obj);
836 rte_intr_free_epoll_fd(intr_handle);
837 if (intr_handle->intr_vec)
838 mlx5_free(intr_handle->intr_vec);
839 intr_handle->nb_efd = 0;
840 intr_handle->intr_vec = NULL;
844 * MLX5 CQ notification .
847 * Pointer to receive queue structure.
849 * Sequence number per receive queue .
852 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
855 uint32_t doorbell_hi;
857 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
859 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
860 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
861 doorbell = (uint64_t)doorbell_hi << 32;
862 doorbell |= rxq->cqn;
863 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
864 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
865 cq_db_reg, rxq->uar_lock_cq);
869 * DPDK callback for Rx queue interrupt enable.
872 * Pointer to Ethernet device structure.
877 * 0 on success, a negative errno value otherwise and rte_errno is set.
880 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
882 struct mlx5_priv *priv = dev->data->dev_private;
883 struct mlx5_rxq_data *rxq_data;
884 struct mlx5_rxq_ctrl *rxq_ctrl;
886 rxq_data = (*priv->rxqs)[rx_queue_id];
891 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
893 struct mlx5_rxq_obj *rxq_obj;
895 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
900 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
901 mlx5_rxq_obj_release(rxq_obj);
907 * DPDK callback for Rx queue interrupt disable.
910 * Pointer to Ethernet device structure.
915 * 0 on success, a negative errno value otherwise and rte_errno is set.
918 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
920 struct mlx5_priv *priv = dev->data->dev_private;
921 struct mlx5_rxq_data *rxq_data;
922 struct mlx5_rxq_ctrl *rxq_ctrl;
923 struct mlx5_rxq_obj *rxq_obj = NULL;
924 struct ibv_cq *ev_cq;
928 rxq_data = (*priv->rxqs)[rx_queue_id];
933 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
936 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
941 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
942 if (ret || ev_cq != rxq_obj->cq) {
946 rxq_data->cq_arm_sn++;
947 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
948 mlx5_rxq_obj_release(rxq_obj);
951 ret = rte_errno; /* Save rte_errno before cleanup. */
953 mlx5_rxq_obj_release(rxq_obj);
954 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
955 dev->data->port_id, rx_queue_id);
956 rte_errno = ret; /* Restore rte_errno. */
961 * Create a CQ Verbs object.
964 * Pointer to Ethernet device.
966 * Pointer to device private data.
968 * Pointer to Rx queue data.
970 * Number of CQEs in CQ.
972 * Pointer to Rx queue object data.
975 * The Verbs object initialised, NULL otherwise and rte_errno is set.
977 static struct ibv_cq *
978 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
979 struct mlx5_rxq_data *rxq_data,
980 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
983 struct ibv_cq_init_attr_ex ibv;
984 struct mlx5dv_cq_init_attr mlx5;
987 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
989 .channel = rxq_obj->channel,
992 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
995 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
997 cq_attr.mlx5.comp_mask |=
998 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
999 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1000 cq_attr.mlx5.cqe_comp_res_format =
1001 mlx5_rxq_mprq_enabled(rxq_data) ?
1002 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
1003 MLX5DV_CQE_RES_FORMAT_HASH;
1005 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1008 * For vectorized Rx, it must not be doubled in order to
1009 * make cq_ci and rq_ci aligned.
1011 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1012 cq_attr.ibv.cqe *= 2;
1013 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1015 "port %u Rx CQE compression is disabled for HW"
1017 dev->data->port_id);
1018 } else if (priv->config.cqe_comp && rxq_data->lro) {
1020 "port %u Rx CQE compression is disabled for LRO",
1021 dev->data->port_id);
1023 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1024 if (priv->config.cqe_pad) {
1025 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1026 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1029 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1035 * Create a WQ Verbs object.
1038 * Pointer to Ethernet device.
1040 * Pointer to device private data.
1042 * Pointer to Rx queue data.
1044 * Queue index in DPDK Rx queue array
1046 * Number of WQEs in WQ.
1048 * Pointer to Rx queue object data.
1051 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1053 static struct ibv_wq *
1054 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1055 struct mlx5_rxq_data *rxq_data, uint16_t idx,
1056 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1059 struct ibv_wq_init_attr ibv;
1060 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1061 struct mlx5dv_wq_init_attr mlx5;
1065 wq_attr.ibv = (struct ibv_wq_init_attr){
1066 .wq_context = NULL, /* Could be useful in the future. */
1067 .wq_type = IBV_WQT_RQ,
1068 /* Max number of outstanding WRs. */
1069 .max_wr = wqe_n >> rxq_data->sges_n,
1070 /* Max number of scatter/gather elements in a WR. */
1071 .max_sge = 1 << rxq_data->sges_n,
1074 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1075 .create_flags = (rxq_data->vlan_strip ?
1076 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1078 /* By default, FCS (CRC) is stripped by hardware. */
1079 if (rxq_data->crc_present) {
1080 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1081 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1083 if (priv->config.hw_padding) {
1084 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1085 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1086 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1087 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1088 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1089 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1092 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1093 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1096 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1097 struct mlx5dv_striding_rq_init_attr *mprq_attr =
1098 &wq_attr.mlx5.striding_rq_attrs;
1100 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1101 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1102 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1103 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1104 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1107 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1110 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1114 * Make sure number of WRs*SGEs match expectations since a queue
1115 * cannot allocate more than "desc" buffers.
1117 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1118 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1120 "port %u Rx queue %u requested %u*%u but got"
1122 dev->data->port_id, idx,
1123 wqe_n >> rxq_data->sges_n,
1124 (1 << rxq_data->sges_n),
1125 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1126 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1135 * Fill common fields of create RQ attributes structure.
1138 * Pointer to Rx queue data.
1140 * CQ number to use with this RQ.
1142 * RQ attributes structure to fill..
1145 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1146 struct mlx5_devx_create_rq_attr *rq_attr)
1148 rq_attr->state = MLX5_RQC_STATE_RST;
1149 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1151 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1155 * Fill common fields of DevX WQ attributes structure.
1158 * Pointer to device private data.
1160 * Pointer to Rx queue control structure.
1162 * WQ attributes structure to fill..
1165 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1166 struct mlx5_devx_wq_attr *wq_attr)
1168 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1169 MLX5_WQ_END_PAD_MODE_ALIGN :
1170 MLX5_WQ_END_PAD_MODE_NONE;
1171 wq_attr->pd = priv->sh->pdn;
1172 wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1173 wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1174 wq_attr->dbr_umem_valid = 1;
1175 wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1176 wq_attr->wq_umem_valid = 1;
1180 * Create a RQ object using DevX.
1183 * Pointer to Ethernet device.
1185 * Queue index in DPDK Rx queue array
1187 * CQ number to use with this RQ.
1190 * The DevX object initialised, NULL otherwise and rte_errno is set.
1192 static struct mlx5_devx_obj *
1193 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1195 struct mlx5_priv *priv = dev->data->dev_private;
1196 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1197 struct mlx5_rxq_ctrl *rxq_ctrl =
1198 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1199 struct mlx5_devx_create_rq_attr rq_attr;
1200 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1201 uint32_t wq_size = 0;
1202 uint32_t wqe_size = 0;
1203 uint32_t log_wqe_size = 0;
1205 struct mlx5_devx_obj *rq;
1207 memset(&rq_attr, 0, sizeof(rq_attr));
1208 /* Fill RQ attributes. */
1209 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1210 rq_attr.flush_in_error_en = 1;
1211 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1212 /* Fill WQ attributes for this RQ. */
1213 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1214 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1216 * Number of strides in each WQE:
1217 * 512*2^single_wqe_log_num_of_strides.
1219 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1220 rxq_data->strd_num_n -
1221 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1222 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1223 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1224 rxq_data->strd_sz_n -
1225 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1226 wqe_size = sizeof(struct mlx5_wqe_mprq);
1228 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1229 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1231 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1232 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1233 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1234 /* Calculate and allocate WQ memory space. */
1235 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1236 wq_size = wqe_n * wqe_size;
1237 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
1238 if (alignment == (size_t)-1) {
1239 DRV_LOG(ERR, "Failed to get mem page size");
1243 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
1244 alignment, rxq_ctrl->socket);
1247 rxq_data->wqes = buf;
1248 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1250 if (!rxq_ctrl->wq_umem) {
1254 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1255 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1257 rxq_release_rq_resources(rxq_ctrl);
1262 * Create the Rx hairpin queue object.
1265 * Pointer to Ethernet device.
1267 * Queue index in DPDK Rx queue array
1270 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1272 static struct mlx5_rxq_obj *
1273 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1275 struct mlx5_priv *priv = dev->data->dev_private;
1276 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1277 struct mlx5_rxq_ctrl *rxq_ctrl =
1278 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1279 struct mlx5_devx_create_rq_attr attr = { 0 };
1280 struct mlx5_rxq_obj *tmpl = NULL;
1281 uint32_t max_wq_data;
1283 MLX5_ASSERT(rxq_data);
1284 MLX5_ASSERT(!rxq_ctrl->obj);
1285 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1289 "port %u Rx queue %u cannot allocate verbs resources",
1290 dev->data->port_id, rxq_data->idx);
1294 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1295 tmpl->rxq_ctrl = rxq_ctrl;
1297 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1298 /* Jumbo frames > 9KB should be supported, and more packets. */
1299 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1300 if (priv->config.log_hp_size > max_wq_data) {
1301 DRV_LOG(ERR, "total data size %u power of 2 is "
1302 "too large for hairpin",
1303 priv->config.log_hp_size);
1308 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1310 attr.wq_attr.log_hairpin_data_sz =
1311 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1312 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1314 /* Set the packets number to the maximum value for performance. */
1315 attr.wq_attr.log_hairpin_num_packets =
1316 attr.wq_attr.log_hairpin_data_sz -
1317 MLX5_HAIRPIN_QUEUE_STRIDE;
1318 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1322 "port %u Rx hairpin queue %u can't create rq object",
1323 dev->data->port_id, idx);
1328 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1329 idx, (void *)&tmpl);
1330 rte_atomic32_inc(&tmpl->refcnt);
1331 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1332 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1337 * Create the Rx queue Verbs/DevX object.
1340 * Pointer to Ethernet device.
1342 * Queue index in DPDK Rx queue array
1344 * Type of Rx queue object to create.
1347 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1349 struct mlx5_rxq_obj *
1350 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1351 enum mlx5_rxq_obj_type type)
1353 struct mlx5_priv *priv = dev->data->dev_private;
1354 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1355 struct mlx5_rxq_ctrl *rxq_ctrl =
1356 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1357 struct ibv_wq_attr mod;
1359 unsigned int wqe_n = 1 << rxq_data->elts_n;
1360 struct mlx5_rxq_obj *tmpl = NULL;
1361 struct mlx5dv_cq cq_info;
1362 struct mlx5dv_rwq rwq;
1364 struct mlx5dv_obj obj;
1366 MLX5_ASSERT(rxq_data);
1367 MLX5_ASSERT(!rxq_ctrl->obj);
1368 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1369 return mlx5_rxq_obj_hairpin_new(dev, idx);
1370 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1371 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1372 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1376 "port %u Rx queue %u cannot allocate verbs resources",
1377 dev->data->port_id, rxq_data->idx);
1382 tmpl->rxq_ctrl = rxq_ctrl;
1383 if (rxq_ctrl->irq) {
1384 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1385 if (!tmpl->channel) {
1386 DRV_LOG(ERR, "port %u: comp channel creation failure",
1387 dev->data->port_id);
1392 if (mlx5_rxq_mprq_enabled(rxq_data))
1393 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1396 tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1398 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1399 dev->data->port_id, idx);
1403 obj.cq.in = tmpl->cq;
1404 obj.cq.out = &cq_info;
1405 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1410 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1412 "port %u wrong MLX5_CQE_SIZE environment variable"
1413 " value: it should be set to %u",
1414 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1418 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1419 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
1420 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1421 dev->data->port_id, priv->sh->device_attr.max_sge);
1422 /* Allocate door-bell for types created with DevX. */
1423 if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1424 struct mlx5_devx_dbr_page *dbr_page;
1427 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
1431 rxq_ctrl->dbr_offset = dbr_offset;
1432 rxq_ctrl->dbr_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
1433 rxq_ctrl->dbr_umem_id_valid = 1;
1434 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1435 (uintptr_t)rxq_ctrl->dbr_offset);
1437 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1438 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1441 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1442 dev->data->port_id, idx);
1446 /* Change queue state to ready. */
1447 mod = (struct ibv_wq_attr){
1448 .attr_mask = IBV_WQ_ATTR_STATE,
1449 .wq_state = IBV_WQS_RDY,
1451 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1454 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1455 " failed", dev->data->port_id, idx);
1459 obj.rwq.in = tmpl->wq;
1461 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1466 rxq_data->wqes = rwq.buf;
1467 rxq_data->rq_db = rwq.dbrec;
1468 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1469 struct mlx5_devx_modify_rq_attr rq_attr;
1471 memset(&rq_attr, 0, sizeof(rq_attr));
1472 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1474 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1475 dev->data->port_id, idx);
1479 /* Change queue state to ready. */
1480 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1481 rq_attr.state = MLX5_RQC_STATE_RDY;
1482 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1486 /* Fill the rings. */
1487 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1488 rxq_data->cq_db = cq_info.dbrec;
1489 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1490 rxq_data->cq_uar = cq_info.cq_uar;
1491 rxq_data->cqn = cq_info.cqn;
1492 rxq_data->cq_arm_sn = 0;
1493 mlx5_rxq_initialize(rxq_data);
1494 rxq_data->cq_ci = 0;
1495 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1496 idx, (void *)&tmpl);
1497 rte_atomic32_inc(&tmpl->refcnt);
1498 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1499 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1503 ret = rte_errno; /* Save rte_errno before cleanup. */
1504 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1505 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1506 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1507 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1509 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1511 claim_zero(mlx5_glue->destroy_comp_channel
1514 rte_errno = ret; /* Restore rte_errno. */
1516 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1517 rxq_release_rq_resources(rxq_ctrl);
1518 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1523 * Verify the Rx queue objects list is empty
1526 * Pointer to Ethernet device.
1529 * The number of objects not released.
1532 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1534 struct mlx5_priv *priv = dev->data->dev_private;
1536 struct mlx5_rxq_obj *rxq_obj;
1538 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1539 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1540 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1547 * Callback function to initialize mbufs for Multi-Packet RQ.
1550 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1551 void *_m, unsigned int i __rte_unused)
1553 struct mlx5_mprq_buf *buf = _m;
1554 struct rte_mbuf_ext_shared_info *shinfo;
1555 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1558 memset(_m, 0, sizeof(*buf));
1560 rte_atomic16_set(&buf->refcnt, 1);
1561 for (j = 0; j != strd_n; ++j) {
1562 shinfo = &buf->shinfos[j];
1563 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1564 shinfo->fcb_opaque = buf;
1569 * Free mempool of Multi-Packet RQ.
1572 * Pointer to Ethernet device.
1575 * 0 on success, negative errno value on failure.
1578 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1580 struct mlx5_priv *priv = dev->data->dev_private;
1581 struct rte_mempool *mp = priv->mprq_mp;
1586 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1587 dev->data->port_id, mp->name);
1589 * If a buffer in the pool has been externally attached to a mbuf and it
1590 * is still in use by application, destroying the Rx queue can spoil
1591 * the packet. It is unlikely to happen but if application dynamically
1592 * creates and destroys with holding Rx packets, this can happen.
1594 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1595 * RQ isn't provided by application but managed by PMD.
1597 if (!rte_mempool_full(mp)) {
1599 "port %u mempool for Multi-Packet RQ is still in use",
1600 dev->data->port_id);
1604 rte_mempool_free(mp);
1605 /* Unset mempool for each Rx queue. */
1606 for (i = 0; i != priv->rxqs_n; ++i) {
1607 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1611 rxq->mprq_mp = NULL;
1613 priv->mprq_mp = NULL;
1618 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1619 * mempool. If already allocated, reuse it if there're enough elements.
1620 * Otherwise, resize it.
1623 * Pointer to Ethernet device.
1626 * 0 on success, negative errno value on failure.
1629 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1631 struct mlx5_priv *priv = dev->data->dev_private;
1632 struct rte_mempool *mp = priv->mprq_mp;
1633 char name[RTE_MEMPOOL_NAMESIZE];
1634 unsigned int desc = 0;
1635 unsigned int buf_len;
1636 unsigned int obj_num;
1637 unsigned int obj_size;
1638 unsigned int strd_num_n = 0;
1639 unsigned int strd_sz_n = 0;
1641 unsigned int n_ibv = 0;
1643 if (!mlx5_mprq_enabled(dev))
1645 /* Count the total number of descriptors configured. */
1646 for (i = 0; i != priv->rxqs_n; ++i) {
1647 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1648 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1649 (rxq, struct mlx5_rxq_ctrl, rxq);
1651 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1654 desc += 1 << rxq->elts_n;
1655 /* Get the max number of strides. */
1656 if (strd_num_n < rxq->strd_num_n)
1657 strd_num_n = rxq->strd_num_n;
1658 /* Get the max size of a stride. */
1659 if (strd_sz_n < rxq->strd_sz_n)
1660 strd_sz_n = rxq->strd_sz_n;
1662 MLX5_ASSERT(strd_num_n && strd_sz_n);
1663 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1664 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1665 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1667 * Received packets can be either memcpy'd or externally referenced. In
1668 * case that the packet is attached to an mbuf as an external buffer, as
1669 * it isn't possible to predict how the buffers will be queued by
1670 * application, there's no option to exactly pre-allocate needed buffers
1671 * in advance but to speculatively prepares enough buffers.
1673 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1674 * received packets to buffers provided by application (rxq->mp) until
1675 * this Mempool gets available again.
1678 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1680 * rte_mempool_create_empty() has sanity check to refuse large cache
1681 * size compared to the number of elements.
1682 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1683 * constant number 2 instead.
1685 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1686 /* Check a mempool is already allocated and if it can be resued. */
1687 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1688 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1689 dev->data->port_id, mp->name);
1692 } else if (mp != NULL) {
1693 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1694 dev->data->port_id, mp->name);
1696 * If failed to free, which means it may be still in use, no way
1697 * but to keep using the existing one. On buffer underrun,
1698 * packets will be memcpy'd instead of external buffer
1701 if (mlx5_mprq_free_mp(dev)) {
1702 if (mp->elt_size >= obj_size)
1708 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1709 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1710 0, NULL, NULL, mlx5_mprq_buf_init,
1711 (void *)(uintptr_t)(1 << strd_num_n),
1712 dev->device->numa_node, 0);
1715 "port %u failed to allocate a mempool for"
1716 " Multi-Packet RQ, count=%u, size=%u",
1717 dev->data->port_id, obj_num, obj_size);
1723 /* Set mempool for each Rx queue. */
1724 for (i = 0; i != priv->rxqs_n; ++i) {
1725 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1726 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1727 (rxq, struct mlx5_rxq_ctrl, rxq);
1729 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1733 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1734 dev->data->port_id);
1738 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1739 sizeof(struct rte_vlan_hdr) * 2 + \
1740 sizeof(struct rte_ipv6_hdr)))
1741 #define MAX_TCP_OPTION_SIZE 40u
1742 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1743 sizeof(struct rte_tcp_hdr) + \
1744 MAX_TCP_OPTION_SIZE))
1747 * Adjust the maximum LRO massage size.
1750 * Pointer to Ethernet device.
1753 * @param max_lro_size
1754 * The maximum size for LRO packet.
1757 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1758 uint32_t max_lro_size)
1760 struct mlx5_priv *priv = dev->data->dev_private;
1762 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1763 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1764 MLX5_MAX_TCP_HDR_OFFSET)
1765 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1766 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1767 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1768 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1769 if (priv->max_lro_msg_size)
1770 priv->max_lro_msg_size =
1771 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1773 priv->max_lro_msg_size = max_lro_size;
1775 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1776 dev->data->port_id, idx,
1777 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1781 * Create a DPDK Rx queue.
1784 * Pointer to Ethernet device.
1788 * Number of descriptors to configure in queue.
1790 * NUMA socket on which memory must be allocated.
1793 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1795 struct mlx5_rxq_ctrl *
1796 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1797 unsigned int socket, const struct rte_eth_rxconf *conf,
1798 struct rte_mempool *mp)
1800 struct mlx5_priv *priv = dev->data->dev_private;
1801 struct mlx5_rxq_ctrl *tmpl;
1802 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1803 unsigned int mprq_stride_nums;
1804 unsigned int mprq_stride_size;
1805 unsigned int mprq_stride_cap;
1806 struct mlx5_dev_config *config = &priv->config;
1808 * Always allocate extra slots, even if eventually
1809 * the vector Rx will not be used.
1812 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1813 uint64_t offloads = conf->offloads |
1814 dev->data->dev_conf.rxmode.offloads;
1815 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1816 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1817 unsigned int max_rx_pkt_len = lro_on_queue ?
1818 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1819 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1820 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1821 RTE_PKTMBUF_HEADROOM;
1822 unsigned int max_lro_size = 0;
1823 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1825 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1826 DEV_RX_OFFLOAD_SCATTER)) {
1827 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1828 " configured and no enough mbuf space(%u) to contain "
1829 "the maximum RX packet length(%u) with head-room(%u)",
1830 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1831 RTE_PKTMBUF_HEADROOM);
1835 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1836 desc_n * sizeof(struct rte_mbuf *), 0, socket);
1841 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1842 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1843 MLX5_MR_BTREE_CACHE_N, socket)) {
1844 /* rte_errno is already set. */
1847 tmpl->socket = socket;
1848 if (dev->data->dev_conf.intr_conf.rxq)
1850 mprq_stride_nums = config->mprq.stride_num_n ?
1851 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1852 mprq_stride_size = non_scatter_min_mbuf_size <=
1853 (1U << config->mprq.max_stride_size_n) ?
1854 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1855 mprq_stride_cap = (config->mprq.stride_num_n ?
1856 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1857 (config->mprq.stride_size_n ?
1858 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1860 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1861 * following conditions are met:
1862 * - MPRQ is enabled.
1863 * - The number of descs is more than the number of strides.
1864 * - max_rx_pkt_len plus overhead is less than the max size
1865 * of a stride or mprq_stride_size is specified by a user.
1866 * Need to nake sure that there are enough stides to encap
1867 * the maximum packet size in case mprq_stride_size is set.
1868 * Otherwise, enable Rx scatter if necessary.
1870 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1871 (non_scatter_min_mbuf_size <=
1872 (1U << config->mprq.max_stride_size_n) ||
1873 (config->mprq.stride_size_n &&
1874 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1875 /* TODO: Rx scatter isn't supported yet. */
1876 tmpl->rxq.sges_n = 0;
1877 /* Trim the number of descs needed. */
1878 desc >>= mprq_stride_nums;
1879 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1880 config->mprq.stride_num_n : mprq_stride_nums;
1881 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1882 config->mprq.stride_size_n : mprq_stride_size;
1883 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1884 tmpl->rxq.strd_scatter_en =
1885 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1886 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1887 config->mprq.max_memcpy_len);
1888 max_lro_size = RTE_MIN(max_rx_pkt_len,
1889 (1u << tmpl->rxq.strd_num_n) *
1890 (1u << tmpl->rxq.strd_sz_n));
1892 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1893 " strd_num_n = %u, strd_sz_n = %u",
1894 dev->data->port_id, idx,
1895 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1896 } else if (max_rx_pkt_len <= first_mb_free_size) {
1897 tmpl->rxq.sges_n = 0;
1898 max_lro_size = max_rx_pkt_len;
1899 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1900 unsigned int size = non_scatter_min_mbuf_size;
1901 unsigned int sges_n;
1903 if (lro_on_queue && first_mb_free_size <
1904 MLX5_MAX_LRO_HEADER_FIX) {
1905 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1906 " to include the max header size(%u) for LRO",
1907 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1908 rte_errno = ENOTSUP;
1912 * Determine the number of SGEs needed for a full packet
1913 * and round it to the next power of two.
1915 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1916 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1918 "port %u too many SGEs (%u) needed to handle"
1919 " requested maximum packet size %u, the maximum"
1920 " supported are %u", dev->data->port_id,
1921 1 << sges_n, max_rx_pkt_len,
1922 1u << MLX5_MAX_LOG_RQ_SEGS);
1923 rte_errno = ENOTSUP;
1926 tmpl->rxq.sges_n = sges_n;
1927 max_lro_size = max_rx_pkt_len;
1929 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1931 "port %u MPRQ is requested but cannot be enabled\n"
1932 " (requested: pkt_sz = %u, desc_num = %u,"
1933 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1934 " supported: min_rxqs_num = %u,"
1935 " min_stride_sz = %u, max_stride_sz = %u).",
1936 dev->data->port_id, non_scatter_min_mbuf_size,
1938 config->mprq.stride_size_n ?
1939 (1U << config->mprq.stride_size_n) :
1940 (1U << mprq_stride_size),
1941 config->mprq.stride_num_n ?
1942 (1U << config->mprq.stride_num_n) :
1943 (1U << mprq_stride_nums),
1944 config->mprq.min_rxqs_num,
1945 (1U << config->mprq.min_stride_size_n),
1946 (1U << config->mprq.max_stride_size_n));
1947 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1948 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1949 if (desc % (1 << tmpl->rxq.sges_n)) {
1951 "port %u number of Rx queue descriptors (%u) is not a"
1952 " multiple of SGEs per packet (%u)",
1955 1 << tmpl->rxq.sges_n);
1959 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1960 /* Toggle RX checksum offload if hardware supports it. */
1961 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1962 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1963 /* Configure VLAN stripping. */
1964 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1965 /* By default, FCS (CRC) is stripped by hardware. */
1966 tmpl->rxq.crc_present = 0;
1967 tmpl->rxq.lro = lro_on_queue;
1968 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1969 if (config->hw_fcs_strip) {
1971 * RQs used for LRO-enabled TIRs should not be
1972 * configured to scatter the FCS.
1976 "port %u CRC stripping has been "
1977 "disabled but will still be performed "
1978 "by hardware, because LRO is enabled",
1979 dev->data->port_id);
1981 tmpl->rxq.crc_present = 1;
1984 "port %u CRC stripping has been disabled but will"
1985 " still be performed by hardware, make sure MLNX_OFED"
1986 " and firmware are up to date",
1987 dev->data->port_id);
1991 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1992 " incoming frames to hide it",
1994 tmpl->rxq.crc_present ? "disabled" : "enabled",
1995 tmpl->rxq.crc_present << 2);
1997 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1998 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1999 tmpl->rxq.port_id = dev->data->port_id;
2002 tmpl->rxq.elts_n = log2above(desc);
2003 tmpl->rxq.rq_repl_thresh =
2004 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
2006 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
2008 tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
2010 tmpl->rxq.idx = idx;
2011 rte_atomic32_inc(&tmpl->refcnt);
2012 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2020 * Create a DPDK Rx hairpin queue.
2023 * Pointer to Ethernet device.
2027 * Number of descriptors to configure in queue.
2028 * @param hairpin_conf
2029 * The hairpin binding configuration.
2032 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2034 struct mlx5_rxq_ctrl *
2035 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2036 const struct rte_eth_hairpin_conf *hairpin_conf)
2038 struct mlx5_priv *priv = dev->data->dev_private;
2039 struct mlx5_rxq_ctrl *tmpl;
2041 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
2047 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
2048 tmpl->socket = SOCKET_ID_ANY;
2049 tmpl->rxq.rss_hash = 0;
2050 tmpl->rxq.port_id = dev->data->port_id;
2052 tmpl->rxq.mp = NULL;
2053 tmpl->rxq.elts_n = log2above(desc);
2054 tmpl->rxq.elts = NULL;
2055 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2056 tmpl->hairpin_conf = *hairpin_conf;
2057 tmpl->rxq.idx = idx;
2058 rte_atomic32_inc(&tmpl->refcnt);
2059 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2067 * Pointer to Ethernet device.
2072 * A pointer to the queue if it exists, NULL otherwise.
2074 struct mlx5_rxq_ctrl *
2075 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2077 struct mlx5_priv *priv = dev->data->dev_private;
2078 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2080 if ((*priv->rxqs)[idx]) {
2081 rxq_ctrl = container_of((*priv->rxqs)[idx],
2082 struct mlx5_rxq_ctrl,
2084 mlx5_rxq_obj_get(dev, idx);
2085 rte_atomic32_inc(&rxq_ctrl->refcnt);
2091 * Release a Rx queue.
2094 * Pointer to Ethernet device.
2099 * 1 while a reference on it exists, 0 when freed.
2102 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2104 struct mlx5_priv *priv = dev->data->dev_private;
2105 struct mlx5_rxq_ctrl *rxq_ctrl;
2107 if (!(*priv->rxqs)[idx])
2109 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2110 MLX5_ASSERT(rxq_ctrl->priv);
2111 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2112 rxq_ctrl->obj = NULL;
2113 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2114 if (rxq_ctrl->dbr_umem_id_valid)
2115 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
2116 rxq_ctrl->dbr_umem_id,
2117 rxq_ctrl->dbr_offset));
2118 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2119 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2120 LIST_REMOVE(rxq_ctrl, next);
2121 mlx5_free(rxq_ctrl);
2122 (*priv->rxqs)[idx] = NULL;
2129 * Verify the Rx Queue list is empty
2132 * Pointer to Ethernet device.
2135 * The number of object not released.
2138 mlx5_rxq_verify(struct rte_eth_dev *dev)
2140 struct mlx5_priv *priv = dev->data->dev_private;
2141 struct mlx5_rxq_ctrl *rxq_ctrl;
2144 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2145 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2146 dev->data->port_id, rxq_ctrl->rxq.idx);
2153 * Get a Rx queue type.
2156 * Pointer to Ethernet device.
2161 * The Rx queue type.
2164 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2166 struct mlx5_priv *priv = dev->data->dev_private;
2167 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2169 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
2170 rxq_ctrl = container_of((*priv->rxqs)[idx],
2171 struct mlx5_rxq_ctrl,
2173 return rxq_ctrl->type;
2175 return MLX5_RXQ_TYPE_UNDEFINED;
2179 * Create an indirection table.
2182 * Pointer to Ethernet device.
2184 * Queues entering in the indirection table.
2186 * Number of queues in the array.
2189 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2191 static struct mlx5_ind_table_obj *
2192 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2193 uint32_t queues_n, enum mlx5_ind_tbl_type type)
2195 struct mlx5_priv *priv = dev->data->dev_private;
2196 struct mlx5_ind_table_obj *ind_tbl;
2197 unsigned int i = 0, j = 0, k = 0;
2199 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2200 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2205 ind_tbl->type = type;
2206 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2207 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2208 log2above(queues_n) :
2209 log2above(priv->config.ind_table_max_size);
2210 struct ibv_wq *wq[1 << wq_n];
2212 for (i = 0; i != queues_n; ++i) {
2213 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2217 wq[i] = rxq->obj->wq;
2218 ind_tbl->queues[i] = queues[i];
2220 ind_tbl->queues_n = queues_n;
2221 /* Finalise indirection table. */
2222 k = i; /* Retain value of i for use in error case. */
2223 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2225 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2227 &(struct ibv_rwq_ind_table_init_attr){
2228 .log_ind_tbl_size = wq_n,
2232 if (!ind_tbl->ind_table) {
2236 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2237 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2238 const unsigned int rqt_n =
2239 1 << (rte_is_power_of_2(queues_n) ?
2240 log2above(queues_n) :
2241 log2above(priv->config.ind_table_max_size));
2243 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
2244 rqt_n * sizeof(uint32_t), 0,
2247 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2248 dev->data->port_id);
2252 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2253 rqt_attr->rqt_actual_size = rqt_n;
2254 for (i = 0; i != queues_n; ++i) {
2255 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2259 rqt_attr->rq_list[i] = rxq->obj->rq->id;
2260 ind_tbl->queues[i] = queues[i];
2262 k = i; /* Retain value of i for use in error case. */
2263 for (j = 0; k != rqt_n; ++k, ++j)
2264 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2265 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2267 mlx5_free(rqt_attr);
2268 if (!ind_tbl->rqt) {
2269 DRV_LOG(ERR, "port %u cannot create DevX RQT",
2270 dev->data->port_id);
2274 ind_tbl->queues_n = queues_n;
2276 rte_atomic32_inc(&ind_tbl->refcnt);
2277 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2280 for (j = 0; j < i; j++)
2281 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2283 DEBUG("port %u cannot create indirection table", dev->data->port_id);
2288 * Get an indirection table.
2291 * Pointer to Ethernet device.
2293 * Queues entering in the indirection table.
2295 * Number of queues in the array.
2298 * An indirection table if found.
2300 static struct mlx5_ind_table_obj *
2301 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2304 struct mlx5_priv *priv = dev->data->dev_private;
2305 struct mlx5_ind_table_obj *ind_tbl;
2307 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2308 if ((ind_tbl->queues_n == queues_n) &&
2309 (memcmp(ind_tbl->queues, queues,
2310 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2317 rte_atomic32_inc(&ind_tbl->refcnt);
2318 for (i = 0; i != ind_tbl->queues_n; ++i)
2319 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2325 * Release an indirection table.
2328 * Pointer to Ethernet device.
2330 * Indirection table to release.
2333 * 1 while a reference on it exists, 0 when freed.
2336 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2337 struct mlx5_ind_table_obj *ind_tbl)
2341 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2342 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2343 claim_zero(mlx5_glue->destroy_rwq_ind_table
2344 (ind_tbl->ind_table));
2345 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2346 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2348 for (i = 0; i != ind_tbl->queues_n; ++i)
2349 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2350 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2351 LIST_REMOVE(ind_tbl, next);
2359 * Verify the Rx Queue list is empty
2362 * Pointer to Ethernet device.
2365 * The number of object not released.
2368 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2370 struct mlx5_priv *priv = dev->data->dev_private;
2371 struct mlx5_ind_table_obj *ind_tbl;
2374 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2376 "port %u indirection table obj %p still referenced",
2377 dev->data->port_id, (void *)ind_tbl);
2384 * Create an Rx Hash queue.
2387 * Pointer to Ethernet device.
2389 * RSS key for the Rx hash queue.
2390 * @param rss_key_len
2392 * @param hash_fields
2393 * Verbs protocol hash field to make the RSS on.
2395 * Queues entering in hash queue. In case of empty hash_fields only the
2396 * first queue index will be taken for the indirection table.
2403 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
2406 mlx5_hrxq_new(struct rte_eth_dev *dev,
2407 const uint8_t *rss_key, uint32_t rss_key_len,
2408 uint64_t hash_fields,
2409 const uint16_t *queues, uint32_t queues_n,
2410 int tunnel __rte_unused)
2412 struct mlx5_priv *priv = dev->data->dev_private;
2413 struct mlx5_hrxq *hrxq;
2414 uint32_t hrxq_idx = 0;
2415 struct ibv_qp *qp = NULL;
2416 struct mlx5_ind_table_obj *ind_tbl;
2418 struct mlx5_devx_obj *tir = NULL;
2419 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2420 struct mlx5_rxq_ctrl *rxq_ctrl =
2421 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2423 queues_n = hash_fields ? queues_n : 1;
2424 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2426 enum mlx5_ind_tbl_type type;
2428 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2429 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2430 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2436 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2437 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2438 struct mlx5dv_qp_init_attr qp_init_attr;
2440 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2442 qp_init_attr.comp_mask =
2443 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2444 qp_init_attr.create_flags =
2445 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2447 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2448 if (dev->data->dev_conf.lpbk_mode) {
2450 * Allow packet sent from NIC loop back
2451 * w/o source MAC check.
2453 qp_init_attr.comp_mask |=
2454 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2455 qp_init_attr.create_flags |=
2456 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2459 qp = mlx5_glue->dv_create_qp
2461 &(struct ibv_qp_init_attr_ex){
2462 .qp_type = IBV_QPT_RAW_PACKET,
2464 IBV_QP_INIT_ATTR_PD |
2465 IBV_QP_INIT_ATTR_IND_TABLE |
2466 IBV_QP_INIT_ATTR_RX_HASH,
2467 .rx_hash_conf = (struct ibv_rx_hash_conf){
2469 IBV_RX_HASH_FUNC_TOEPLITZ,
2470 .rx_hash_key_len = rss_key_len,
2472 (void *)(uintptr_t)rss_key,
2473 .rx_hash_fields_mask = hash_fields,
2475 .rwq_ind_tbl = ind_tbl->ind_table,
2480 qp = mlx5_glue->create_qp_ex
2482 &(struct ibv_qp_init_attr_ex){
2483 .qp_type = IBV_QPT_RAW_PACKET,
2485 IBV_QP_INIT_ATTR_PD |
2486 IBV_QP_INIT_ATTR_IND_TABLE |
2487 IBV_QP_INIT_ATTR_RX_HASH,
2488 .rx_hash_conf = (struct ibv_rx_hash_conf){
2490 IBV_RX_HASH_FUNC_TOEPLITZ,
2491 .rx_hash_key_len = rss_key_len,
2493 (void *)(uintptr_t)rss_key,
2494 .rx_hash_fields_mask = hash_fields,
2496 .rwq_ind_tbl = ind_tbl->ind_table,
2504 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2505 struct mlx5_devx_tir_attr tir_attr;
2509 /* Enable TIR LRO only if all the queues were configured for. */
2510 for (i = 0; i < queues_n; ++i) {
2511 if (!(*priv->rxqs)[queues[i]]->lro) {
2516 memset(&tir_attr, 0, sizeof(tir_attr));
2517 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2518 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2519 tir_attr.tunneled_offload_en = !!tunnel;
2520 /* If needed, translate hash_fields bitmap to PRM format. */
2522 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2523 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2524 hash_fields & IBV_RX_HASH_INNER ?
2525 &tir_attr.rx_hash_field_selector_inner :
2526 &tir_attr.rx_hash_field_selector_outer;
2528 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2529 &tir_attr.rx_hash_field_selector_outer;
2532 /* 1 bit: 0: IPv4, 1: IPv6. */
2533 rx_hash_field_select->l3_prot_type =
2534 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2535 /* 1 bit: 0: TCP, 1: UDP. */
2536 rx_hash_field_select->l4_prot_type =
2537 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2538 /* Bitmask which sets which fields to use in RX Hash. */
2539 rx_hash_field_select->selected_fields =
2540 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2541 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2542 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2543 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2544 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2545 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2546 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2547 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2549 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2550 tir_attr.transport_domain = priv->sh->td->id;
2552 tir_attr.transport_domain = priv->sh->tdn;
2553 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
2554 MLX5_RSS_HASH_KEY_LEN);
2555 tir_attr.indirect_table = ind_tbl->rqt->id;
2556 if (dev->data->dev_conf.lpbk_mode)
2557 tir_attr.self_lb_block =
2558 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2560 tir_attr.lro_timeout_period_usecs =
2561 priv->config.lro.timeout;
2562 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2563 tir_attr.lro_enable_mask =
2564 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2565 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2567 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2569 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2570 dev->data->port_id);
2575 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2578 hrxq->ind_table = ind_tbl;
2579 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2581 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2583 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2584 if (!hrxq->action) {
2589 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2591 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2592 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2594 if (!hrxq->action) {
2600 hrxq->rss_key_len = rss_key_len;
2601 hrxq->hash_fields = hash_fields;
2602 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2603 rte_atomic32_inc(&hrxq->refcnt);
2604 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2608 err = rte_errno; /* Save rte_errno before cleanup. */
2609 mlx5_ind_table_obj_release(dev, ind_tbl);
2611 claim_zero(mlx5_glue->destroy_qp(qp));
2613 claim_zero(mlx5_devx_cmd_destroy(tir));
2614 rte_errno = err; /* Restore rte_errno. */
2619 * Get an Rx Hash queue.
2622 * Pointer to Ethernet device.
2624 * RSS configuration for the Rx hash queue.
2626 * Queues entering in hash queue. In case of empty hash_fields only the
2627 * first queue index will be taken for the indirection table.
2632 * An hash Rx queue index on success.
2635 mlx5_hrxq_get(struct rte_eth_dev *dev,
2636 const uint8_t *rss_key, uint32_t rss_key_len,
2637 uint64_t hash_fields,
2638 const uint16_t *queues, uint32_t queues_n)
2640 struct mlx5_priv *priv = dev->data->dev_private;
2641 struct mlx5_hrxq *hrxq;
2644 queues_n = hash_fields ? queues_n : 1;
2645 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2647 struct mlx5_ind_table_obj *ind_tbl;
2649 if (hrxq->rss_key_len != rss_key_len)
2651 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2653 if (hrxq->hash_fields != hash_fields)
2655 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2658 if (ind_tbl != hrxq->ind_table) {
2659 mlx5_ind_table_obj_release(dev, ind_tbl);
2662 rte_atomic32_inc(&hrxq->refcnt);
2669 * Release the hash Rx queue.
2672 * Pointer to Ethernet device.
2674 * Index to Hash Rx queue to release.
2677 * 1 while a reference on it exists, 0 when freed.
2680 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2682 struct mlx5_priv *priv = dev->data->dev_private;
2683 struct mlx5_hrxq *hrxq;
2685 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2688 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2689 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2690 mlx5_glue->destroy_flow_action(hrxq->action);
2692 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2693 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2694 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2695 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2696 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2697 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
2698 hrxq_idx, hrxq, next);
2699 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2702 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2707 * Verify the Rx Queue list is empty
2710 * Pointer to Ethernet device.
2713 * The number of object not released.
2716 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2718 struct mlx5_priv *priv = dev->data->dev_private;
2719 struct mlx5_hrxq *hrxq;
2723 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2726 "port %u hash Rx queue %p still referenced",
2727 dev->data->port_id, (void *)hrxq);
2734 * Create a drop Rx queue Verbs/DevX object.
2737 * Pointer to Ethernet device.
2740 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2742 static struct mlx5_rxq_obj *
2743 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2745 struct mlx5_priv *priv = dev->data->dev_private;
2746 struct ibv_context *ctx = priv->sh->ctx;
2748 struct ibv_wq *wq = NULL;
2749 struct mlx5_rxq_obj *rxq;
2751 if (priv->drop_queue.rxq)
2752 return priv->drop_queue.rxq;
2753 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2755 DEBUG("port %u cannot allocate CQ for drop queue",
2756 dev->data->port_id);
2760 wq = mlx5_glue->create_wq(ctx,
2761 &(struct ibv_wq_init_attr){
2762 .wq_type = IBV_WQT_RQ,
2769 DEBUG("port %u cannot allocate WQ for drop queue",
2770 dev->data->port_id);
2774 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
2776 DEBUG("port %u cannot allocate drop Rx queue memory",
2777 dev->data->port_id);
2783 priv->drop_queue.rxq = rxq;
2787 claim_zero(mlx5_glue->destroy_wq(wq));
2789 claim_zero(mlx5_glue->destroy_cq(cq));
2794 * Release a drop Rx queue Verbs/DevX object.
2797 * Pointer to Ethernet device.
2800 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2803 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2805 struct mlx5_priv *priv = dev->data->dev_private;
2806 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2809 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2811 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2813 priv->drop_queue.rxq = NULL;
2817 * Create a drop indirection table.
2820 * Pointer to Ethernet device.
2823 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2825 static struct mlx5_ind_table_obj *
2826 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2828 struct mlx5_priv *priv = dev->data->dev_private;
2829 struct mlx5_ind_table_obj *ind_tbl;
2830 struct mlx5_rxq_obj *rxq;
2831 struct mlx5_ind_table_obj tmpl;
2833 rxq = mlx5_rxq_obj_drop_new(dev);
2836 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2838 &(struct ibv_rwq_ind_table_init_attr){
2839 .log_ind_tbl_size = 0,
2840 .ind_tbl = &rxq->wq,
2843 if (!tmpl.ind_table) {
2844 DEBUG("port %u cannot allocate indirection table for drop"
2846 dev->data->port_id);
2850 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
2856 ind_tbl->ind_table = tmpl.ind_table;
2859 mlx5_rxq_obj_drop_release(dev);
2864 * Release a drop indirection table.
2867 * Pointer to Ethernet device.
2870 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2872 struct mlx5_priv *priv = dev->data->dev_private;
2873 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2875 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2876 mlx5_rxq_obj_drop_release(dev);
2878 priv->drop_queue.hrxq->ind_table = NULL;
2882 * Create a drop Rx Hash queue.
2885 * Pointer to Ethernet device.
2888 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2891 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2893 struct mlx5_priv *priv = dev->data->dev_private;
2894 struct mlx5_ind_table_obj *ind_tbl = NULL;
2895 struct ibv_qp *qp = NULL;
2896 struct mlx5_hrxq *hrxq = NULL;
2898 if (priv->drop_queue.hrxq) {
2899 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2900 return priv->drop_queue.hrxq;
2902 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2905 "port %u cannot allocate memory for drop queue",
2906 dev->data->port_id);
2910 priv->drop_queue.hrxq = hrxq;
2911 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2914 hrxq->ind_table = ind_tbl;
2915 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2916 &(struct ibv_qp_init_attr_ex){
2917 .qp_type = IBV_QPT_RAW_PACKET,
2919 IBV_QP_INIT_ATTR_PD |
2920 IBV_QP_INIT_ATTR_IND_TABLE |
2921 IBV_QP_INIT_ATTR_RX_HASH,
2922 .rx_hash_conf = (struct ibv_rx_hash_conf){
2924 IBV_RX_HASH_FUNC_TOEPLITZ,
2925 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2926 .rx_hash_key = rss_hash_default_key,
2927 .rx_hash_fields_mask = 0,
2929 .rwq_ind_tbl = ind_tbl->ind_table,
2933 DEBUG("port %u cannot allocate QP for drop queue",
2934 dev->data->port_id);
2939 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2940 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2941 if (!hrxq->action) {
2946 rte_atomic32_set(&hrxq->refcnt, 1);
2949 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2950 if (hrxq && hrxq->action)
2951 mlx5_glue->destroy_flow_action(hrxq->action);
2954 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2956 mlx5_ind_table_obj_drop_release(dev);
2958 priv->drop_queue.hrxq = NULL;
2965 * Release a drop hash Rx queue.
2968 * Pointer to Ethernet device.
2971 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2973 struct mlx5_priv *priv = dev->data->dev_private;
2974 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2976 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2977 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2978 mlx5_glue->destroy_flow_action(hrxq->action);
2980 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2981 mlx5_ind_table_obj_drop_release(dev);
2983 priv->drop_queue.hrxq = NULL;
2989 * Set the Rx queue timestamp conversion parameters
2992 * Pointer to the Ethernet device structure.
2995 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2997 struct mlx5_priv *priv = dev->data->dev_private;
2998 struct mlx5_dev_ctx_shared *sh = priv->sh;
2999 struct mlx5_rxq_data *data;
3002 for (i = 0; i != priv->rxqs_n; ++i) {
3003 if (!(*priv->rxqs)[i])
3005 data = (*priv->rxqs)[i];
3007 data->rt_timestamp = priv->config.rt_timestamp;