1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56 (unsigned int)sizeof(rss_hash_default_key),
57 "wrong RSS default key size.");
60 * Check whether Multi-Packet RQ can be enabled for the device.
63 * Pointer to Ethernet device.
66 * 1 if supported, negative errno value if not.
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
71 struct mlx5_priv *priv = dev->data->dev_private;
73 if (priv->config.mprq.enabled &&
74 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
80 * Check whether Multi-Packet RQ is enabled for the Rx queue.
83 * Pointer to receive queue structure.
86 * 0 if disabled, otherwise enabled.
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
91 return rxq->strd_num_n > 0;
95 * Check whether Multi-Packet RQ is enabled for the device.
96 * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
99 * Pointer to Ethernet device.
102 * 0 if disabled, otherwise enabled.
105 mlx5_mprq_enabled(struct rte_eth_dev *dev)
107 struct mlx5_priv *priv = dev->data->dev_private;
111 if (mlx5_check_mprq_support(dev) < 0)
113 /* All the configured queues should be enabled. */
114 for (i = 0; i < priv->rxqs_n; ++i) {
115 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
119 if (mlx5_rxq_mprq_enabled(rxq))
122 /* Multi-Packet RQ can't be partially configured. */
123 assert(n == 0 || n == priv->rxqs_n);
124 return n == priv->rxqs_n;
128 * Check whether LRO is supported and enabled for the device.
131 * Pointer to Ethernet device.
134 * 0 if disabled, 1 if enabled.
137 mlx5_lro_on(struct rte_eth_dev *dev)
139 return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
143 * Allocate RX queue elements for Multi-Packet RQ.
146 * Pointer to RX queue structure.
149 * 0 on success, a negative errno value otherwise and rte_errno is set.
152 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
154 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
155 unsigned int wqe_n = 1 << rxq->elts_n;
159 /* Iterate on segments. */
160 for (i = 0; i <= wqe_n; ++i) {
161 struct mlx5_mprq_buf *buf;
163 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
164 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
169 (*rxq->mprq_bufs)[i] = buf;
171 rxq->mprq_repl = buf;
174 "port %u Rx queue %u allocated and configured %u segments",
175 rxq->port_id, rxq->idx, wqe_n);
178 err = rte_errno; /* Save rte_errno before cleanup. */
180 for (i = 0; (i != wqe_n); ++i) {
181 if ((*rxq->mprq_bufs)[i] != NULL)
182 rte_mempool_put(rxq->mprq_mp,
183 (*rxq->mprq_bufs)[i]);
184 (*rxq->mprq_bufs)[i] = NULL;
186 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
187 rxq->port_id, rxq->idx);
188 rte_errno = err; /* Restore rte_errno. */
193 * Allocate RX queue elements for Single-Packet RQ.
196 * Pointer to RX queue structure.
199 * 0 on success, errno value on failure.
202 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
204 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
205 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
209 /* Iterate on segments. */
210 for (i = 0; (i != elts_n); ++i) {
211 struct rte_mbuf *buf;
213 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
215 DRV_LOG(ERR, "port %u empty mbuf pool",
216 PORT_ID(rxq_ctrl->priv));
220 /* Headroom is reserved by rte_pktmbuf_alloc(). */
221 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
222 /* Buffer is supposed to be empty. */
223 assert(rte_pktmbuf_data_len(buf) == 0);
224 assert(rte_pktmbuf_pkt_len(buf) == 0);
226 /* Only the first segment keeps headroom. */
228 SET_DATA_OFF(buf, 0);
229 PORT(buf) = rxq_ctrl->rxq.port_id;
230 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
231 PKT_LEN(buf) = DATA_LEN(buf);
233 (*rxq_ctrl->rxq.elts)[i] = buf;
235 /* If Rx vector is activated. */
236 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
237 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
238 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
241 /* Initialize default rearm_data for vPMD. */
242 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
243 rte_mbuf_refcnt_set(mbuf_init, 1);
244 mbuf_init->nb_segs = 1;
245 mbuf_init->port = rxq->port_id;
247 * prevent compiler reordering:
248 * rearm_data covers previous fields.
250 rte_compiler_barrier();
251 rxq->mbuf_initializer =
252 *(uint64_t *)&mbuf_init->rearm_data;
253 /* Padding with a fake mbuf for vectorized Rx. */
254 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
255 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
258 "port %u Rx queue %u allocated and configured %u segments"
260 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
261 elts_n / (1 << rxq_ctrl->rxq.sges_n));
264 err = rte_errno; /* Save rte_errno before cleanup. */
266 for (i = 0; (i != elts_n); ++i) {
267 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
268 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
269 (*rxq_ctrl->rxq.elts)[i] = NULL;
271 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
272 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
273 rte_errno = err; /* Restore rte_errno. */
278 * Allocate RX queue elements.
281 * Pointer to RX queue structure.
284 * 0 on success, errno value on failure.
287 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
289 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
290 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
294 * Free RX queue elements for Multi-Packet RQ.
297 * Pointer to RX queue structure.
300 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
302 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
305 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
306 rxq->port_id, rxq->idx);
307 if (rxq->mprq_bufs == NULL)
309 assert(mlx5_rxq_check_vec_support(rxq) < 0);
310 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
311 if ((*rxq->mprq_bufs)[i] != NULL)
312 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
313 (*rxq->mprq_bufs)[i] = NULL;
315 if (rxq->mprq_repl != NULL) {
316 mlx5_mprq_buf_free(rxq->mprq_repl);
317 rxq->mprq_repl = NULL;
322 * Free RX queue elements for Single-Packet RQ.
325 * Pointer to RX queue structure.
328 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
330 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
331 const uint16_t q_n = (1 << rxq->elts_n);
332 const uint16_t q_mask = q_n - 1;
333 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
336 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
337 PORT_ID(rxq_ctrl->priv), rxq->idx);
338 if (rxq->elts == NULL)
341 * Some mbuf in the Ring belongs to the application. They cannot be
344 if (mlx5_rxq_check_vec_support(rxq) > 0) {
345 for (i = 0; i < used; ++i)
346 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
347 rxq->rq_pi = rxq->rq_ci;
349 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
350 if ((*rxq->elts)[i] != NULL)
351 rte_pktmbuf_free_seg((*rxq->elts)[i]);
352 (*rxq->elts)[i] = NULL;
357 * Free RX queue elements.
360 * Pointer to RX queue structure.
363 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
365 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
366 rxq_free_elts_mprq(rxq_ctrl);
368 rxq_free_elts_sprq(rxq_ctrl);
372 * Returns the per-queue supported offloads.
375 * Pointer to Ethernet device.
378 * Supported Rx offloads.
381 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
383 struct mlx5_priv *priv = dev->data->dev_private;
384 struct mlx5_dev_config *config = &priv->config;
385 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
386 DEV_RX_OFFLOAD_TIMESTAMP |
387 DEV_RX_OFFLOAD_JUMBO_FRAME);
389 if (config->hw_fcs_strip)
390 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
393 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
394 DEV_RX_OFFLOAD_UDP_CKSUM |
395 DEV_RX_OFFLOAD_TCP_CKSUM);
396 if (config->hw_vlan_strip)
397 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
403 * Returns the per-port supported offloads.
406 * Pointer to Ethernet device.
409 * Supported Rx offloads.
412 mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
414 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
416 if (MLX5_LRO_SUPPORTED(dev))
417 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
422 * Verify if the queue can be released.
425 * Pointer to Ethernet device.
430 * 1 if the queue can be released
431 * 0 if the queue can not be released, there are references to it.
432 * Negative errno and rte_errno is set if queue doesn't exist.
435 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
437 struct mlx5_priv *priv = dev->data->dev_private;
438 struct mlx5_rxq_ctrl *rxq_ctrl;
440 if (!(*priv->rxqs)[idx]) {
444 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
445 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
451 * Pointer to Ethernet device structure.
455 * Number of descriptors to configure in queue.
457 * NUMA socket on which memory must be allocated.
459 * Thresholds parameters.
461 * Memory pool for buffer allocations.
464 * 0 on success, a negative errno value otherwise and rte_errno is set.
467 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
468 unsigned int socket, const struct rte_eth_rxconf *conf,
469 struct rte_mempool *mp)
471 struct mlx5_priv *priv = dev->data->dev_private;
472 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
473 struct mlx5_rxq_ctrl *rxq_ctrl =
474 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
476 if (!rte_is_power_of_2(desc)) {
477 desc = 1 << log2above(desc);
479 "port %u increased number of descriptors in Rx queue %u"
480 " to the next power of two (%d)",
481 dev->data->port_id, idx, desc);
483 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
484 dev->data->port_id, idx, desc);
485 if (idx >= priv->rxqs_n) {
486 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
487 dev->data->port_id, idx, priv->rxqs_n);
488 rte_errno = EOVERFLOW;
491 if (!mlx5_rxq_releasable(dev, idx)) {
492 DRV_LOG(ERR, "port %u unable to release queue index %u",
493 dev->data->port_id, idx);
497 mlx5_rxq_release(dev, idx);
498 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
500 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
501 dev->data->port_id, idx);
505 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
506 dev->data->port_id, idx);
507 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
512 * DPDK callback to release a RX queue.
515 * Generic RX queue pointer.
518 mlx5_rx_queue_release(void *dpdk_rxq)
520 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
521 struct mlx5_rxq_ctrl *rxq_ctrl;
522 struct mlx5_priv *priv;
526 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
527 priv = rxq_ctrl->priv;
528 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
529 rte_panic("port %u Rx queue %u is still used by a flow and"
530 " cannot be removed\n",
531 PORT_ID(priv), rxq->idx);
532 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
536 * Get an Rx queue Verbs/DevX object.
539 * Pointer to Ethernet device.
541 * Queue index in DPDK Rx queue array
544 * The Verbs/DevX object if it exists.
546 static struct mlx5_rxq_obj *
547 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
549 struct mlx5_priv *priv = dev->data->dev_private;
550 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
551 struct mlx5_rxq_ctrl *rxq_ctrl;
553 if (idx >= priv->rxqs_n)
557 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
559 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
560 return rxq_ctrl->obj;
564 * Release an Rx verbs/DevX queue object.
567 * Verbs/DevX Rx queue object.
570 * 1 while a reference on it exists, 0 when freed.
573 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
578 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
579 rxq_free_elts(rxq_obj->rxq_ctrl);
580 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
581 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
582 if (rxq_obj->channel)
583 claim_zero(mlx5_glue->destroy_comp_channel
585 LIST_REMOVE(rxq_obj, next);
593 * Allocate queue vector and fill epoll fd list for Rx interrupts.
596 * Pointer to Ethernet device.
599 * 0 on success, a negative errno value otherwise and rte_errno is set.
602 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
604 struct mlx5_priv *priv = dev->data->dev_private;
606 unsigned int rxqs_n = priv->rxqs_n;
607 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
608 unsigned int count = 0;
609 struct rte_intr_handle *intr_handle = dev->intr_handle;
611 if (!dev->data->dev_conf.intr_conf.rxq)
613 mlx5_rx_intr_vec_disable(dev);
614 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
615 if (intr_handle->intr_vec == NULL) {
617 "port %u failed to allocate memory for interrupt"
618 " vector, Rx interrupts will not be supported",
623 intr_handle->type = RTE_INTR_HANDLE_EXT;
624 for (i = 0; i != n; ++i) {
625 /* This rxq obj must not be released in this function. */
626 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
631 /* Skip queues that cannot request interrupts. */
632 if (!rxq_obj || !rxq_obj->channel) {
633 /* Use invalid intr_vec[] index to disable entry. */
634 intr_handle->intr_vec[i] =
635 RTE_INTR_VEC_RXTX_OFFSET +
636 RTE_MAX_RXTX_INTR_VEC_ID;
639 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
641 "port %u too many Rx queues for interrupt"
642 " vector size (%d), Rx interrupts cannot be"
644 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
645 mlx5_rx_intr_vec_disable(dev);
649 fd = rxq_obj->channel->fd;
650 flags = fcntl(fd, F_GETFL);
651 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
655 "port %u failed to make Rx interrupt file"
656 " descriptor %d non-blocking for queue index"
658 dev->data->port_id, fd, i);
659 mlx5_rx_intr_vec_disable(dev);
662 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
663 intr_handle->efds[count] = fd;
667 mlx5_rx_intr_vec_disable(dev);
669 intr_handle->nb_efd = count;
674 * Clean up Rx interrupts handler.
677 * Pointer to Ethernet device.
680 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
682 struct mlx5_priv *priv = dev->data->dev_private;
683 struct rte_intr_handle *intr_handle = dev->intr_handle;
685 unsigned int rxqs_n = priv->rxqs_n;
686 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
688 if (!dev->data->dev_conf.intr_conf.rxq)
690 if (!intr_handle->intr_vec)
692 for (i = 0; i != n; ++i) {
693 struct mlx5_rxq_ctrl *rxq_ctrl;
694 struct mlx5_rxq_data *rxq_data;
696 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
697 RTE_MAX_RXTX_INTR_VEC_ID)
700 * Need to access directly the queue to release the reference
701 * kept in mlx5_rx_intr_vec_enable().
703 rxq_data = (*priv->rxqs)[i];
704 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
706 mlx5_rxq_obj_release(rxq_ctrl->obj);
709 rte_intr_free_epoll_fd(intr_handle);
710 if (intr_handle->intr_vec)
711 free(intr_handle->intr_vec);
712 intr_handle->nb_efd = 0;
713 intr_handle->intr_vec = NULL;
717 * MLX5 CQ notification .
720 * Pointer to receive queue structure.
722 * Sequence number per receive queue .
725 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
728 uint32_t doorbell_hi;
730 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
732 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
733 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
734 doorbell = (uint64_t)doorbell_hi << 32;
735 doorbell |= rxq->cqn;
736 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
737 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
738 cq_db_reg, rxq->uar_lock_cq);
742 * DPDK callback for Rx queue interrupt enable.
745 * Pointer to Ethernet device structure.
750 * 0 on success, a negative errno value otherwise and rte_errno is set.
753 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
755 struct mlx5_priv *priv = dev->data->dev_private;
756 struct mlx5_rxq_data *rxq_data;
757 struct mlx5_rxq_ctrl *rxq_ctrl;
759 rxq_data = (*priv->rxqs)[rx_queue_id];
764 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
766 struct mlx5_rxq_obj *rxq_obj;
768 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
773 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
774 mlx5_rxq_obj_release(rxq_obj);
780 * DPDK callback for Rx queue interrupt disable.
783 * Pointer to Ethernet device structure.
788 * 0 on success, a negative errno value otherwise and rte_errno is set.
791 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
793 struct mlx5_priv *priv = dev->data->dev_private;
794 struct mlx5_rxq_data *rxq_data;
795 struct mlx5_rxq_ctrl *rxq_ctrl;
796 struct mlx5_rxq_obj *rxq_obj = NULL;
797 struct ibv_cq *ev_cq;
801 rxq_data = (*priv->rxqs)[rx_queue_id];
806 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
809 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
814 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
815 if (ret || ev_cq != rxq_obj->cq) {
819 rxq_data->cq_arm_sn++;
820 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
821 mlx5_rxq_obj_release(rxq_obj);
824 ret = rte_errno; /* Save rte_errno before cleanup. */
826 mlx5_rxq_obj_release(rxq_obj);
827 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
828 dev->data->port_id, rx_queue_id);
829 rte_errno = ret; /* Restore rte_errno. */
834 * Create the Rx queue Verbs/DevX object.
837 * Pointer to Ethernet device.
839 * Queue index in DPDK Rx queue array
842 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
844 struct mlx5_rxq_obj *
845 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
847 struct mlx5_priv *priv = dev->data->dev_private;
848 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
849 struct mlx5_rxq_ctrl *rxq_ctrl =
850 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
851 struct ibv_wq_attr mod;
854 struct ibv_cq_init_attr_ex ibv;
855 struct mlx5dv_cq_init_attr mlx5;
858 struct ibv_wq_init_attr ibv;
859 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
860 struct mlx5dv_wq_init_attr mlx5;
865 unsigned int wqe_n = 1 << rxq_data->elts_n;
866 struct mlx5_rxq_obj *tmpl = NULL;
867 struct mlx5dv_cq cq_info;
868 struct mlx5dv_rwq rwq;
870 struct mlx5dv_obj obj;
871 struct mlx5_dev_config *config = &priv->config;
872 const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
875 assert(!rxq_ctrl->ibv);
876 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
877 priv->verbs_alloc_ctx.obj = rxq_ctrl;
878 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
882 "port %u Rx queue %u cannot allocate verbs resources",
883 dev->data->port_id, rxq_data->idx);
887 tmpl->rxq_ctrl = rxq_ctrl;
889 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
890 if (!tmpl->channel) {
891 DRV_LOG(ERR, "port %u: comp channel creation failure",
898 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
901 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
903 .channel = tmpl->channel,
906 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
909 if (config->cqe_comp && !rxq_data->hw_timestamp) {
910 attr.cq.mlx5.comp_mask |=
911 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
912 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
913 attr.cq.mlx5.cqe_comp_res_format =
914 mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
915 MLX5DV_CQE_RES_FORMAT_HASH;
917 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
920 * For vectorized Rx, it must not be doubled in order to
921 * make cq_ci and rq_ci aligned.
923 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
924 attr.cq.ibv.cqe *= 2;
925 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
927 "port %u Rx CQE compression is disabled for HW"
931 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
932 if (config->cqe_pad) {
933 attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
934 attr.cq.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
937 tmpl->cq = mlx5_glue->cq_ex_to_cq
938 (mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv,
940 if (tmpl->cq == NULL) {
941 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
942 dev->data->port_id, idx);
946 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
947 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
948 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
949 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
950 attr.wq.ibv = (struct ibv_wq_init_attr){
951 .wq_context = NULL, /* Could be useful in the future. */
952 .wq_type = IBV_WQT_RQ,
953 /* Max number of outstanding WRs. */
954 .max_wr = wqe_n >> rxq_data->sges_n,
955 /* Max number of scatter/gather elements in a WR. */
956 .max_sge = 1 << rxq_data->sges_n,
960 IBV_WQ_FLAGS_CVLAN_STRIPPING |
962 .create_flags = (rxq_data->vlan_strip ?
963 IBV_WQ_FLAGS_CVLAN_STRIPPING :
966 /* By default, FCS (CRC) is stripped by hardware. */
967 if (rxq_data->crc_present) {
968 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
969 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
971 if (config->hw_padding) {
972 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
973 attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
974 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
975 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
976 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
977 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
980 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
981 attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
985 struct mlx5dv_striding_rq_init_attr *mprq_attr =
986 &attr.wq.mlx5.striding_rq_attrs;
988 attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
989 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
990 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
991 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
992 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
995 tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv,
998 tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv);
1000 if (tmpl->wq == NULL) {
1001 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1002 dev->data->port_id, idx);
1007 * Make sure number of WRs*SGEs match expectations since a queue
1008 * cannot allocate more than "desc" buffers.
1010 if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1011 attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
1013 "port %u Rx queue %u requested %u*%u but got %u*%u"
1015 dev->data->port_id, idx,
1016 wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
1017 attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
1021 /* Change queue state to ready. */
1022 mod = (struct ibv_wq_attr){
1023 .attr_mask = IBV_WQ_ATTR_STATE,
1024 .wq_state = IBV_WQS_RDY,
1026 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1029 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
1030 dev->data->port_id, idx);
1034 obj.cq.in = tmpl->cq;
1035 obj.cq.out = &cq_info;
1036 obj.rwq.in = tmpl->wq;
1038 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
1043 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1045 "port %u wrong MLX5_CQE_SIZE environment variable"
1046 " value: it should be set to %u",
1047 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1051 /* Fill the rings. */
1052 rxq_data->wqes = rwq.buf;
1053 rxq_data->rq_db = rwq.dbrec;
1054 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1055 rxq_data->cq_db = cq_info.dbrec;
1056 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1057 rxq_data->cq_uar = cq_info.cq_uar;
1058 rxq_data->cqn = cq_info.cqn;
1059 rxq_data->cq_arm_sn = 0;
1060 mlx5_rxq_initialize(rxq_data);
1061 rxq_data->cq_ci = 0;
1062 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1063 idx, (void *)&tmpl);
1064 rte_atomic32_inc(&tmpl->refcnt);
1065 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1066 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1070 ret = rte_errno; /* Save rte_errno before cleanup. */
1072 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1074 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1076 claim_zero(mlx5_glue->destroy_comp_channel
1079 rte_errno = ret; /* Restore rte_errno. */
1081 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1086 * Verify the Rx queue objects list is empty
1089 * Pointer to Ethernet device.
1092 * The number of objects not released.
1095 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1097 struct mlx5_priv *priv = dev->data->dev_private;
1099 struct mlx5_rxq_obj *rxq_obj;
1101 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1102 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1103 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1110 * Callback function to initialize mbufs for Multi-Packet RQ.
1113 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
1114 void *_m, unsigned int i __rte_unused)
1116 struct mlx5_mprq_buf *buf = _m;
1118 memset(_m, 0, sizeof(*buf));
1120 rte_atomic16_set(&buf->refcnt, 1);
1124 * Free mempool of Multi-Packet RQ.
1127 * Pointer to Ethernet device.
1130 * 0 on success, negative errno value on failure.
1133 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1135 struct mlx5_priv *priv = dev->data->dev_private;
1136 struct rte_mempool *mp = priv->mprq_mp;
1141 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1142 dev->data->port_id, mp->name);
1144 * If a buffer in the pool has been externally attached to a mbuf and it
1145 * is still in use by application, destroying the Rx queue can spoil
1146 * the packet. It is unlikely to happen but if application dynamically
1147 * creates and destroys with holding Rx packets, this can happen.
1149 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1150 * RQ isn't provided by application but managed by PMD.
1152 if (!rte_mempool_full(mp)) {
1154 "port %u mempool for Multi-Packet RQ is still in use",
1155 dev->data->port_id);
1159 rte_mempool_free(mp);
1160 /* Unset mempool for each Rx queue. */
1161 for (i = 0; i != priv->rxqs_n; ++i) {
1162 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1166 rxq->mprq_mp = NULL;
1168 priv->mprq_mp = NULL;
1173 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1174 * mempool. If already allocated, reuse it if there're enough elements.
1175 * Otherwise, resize it.
1178 * Pointer to Ethernet device.
1181 * 0 on success, negative errno value on failure.
1184 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1186 struct mlx5_priv *priv = dev->data->dev_private;
1187 struct rte_mempool *mp = priv->mprq_mp;
1188 char name[RTE_MEMPOOL_NAMESIZE];
1189 unsigned int desc = 0;
1190 unsigned int buf_len;
1191 unsigned int obj_num;
1192 unsigned int obj_size;
1193 unsigned int strd_num_n = 0;
1194 unsigned int strd_sz_n = 0;
1197 if (!mlx5_mprq_enabled(dev))
1199 /* Count the total number of descriptors configured. */
1200 for (i = 0; i != priv->rxqs_n; ++i) {
1201 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1205 desc += 1 << rxq->elts_n;
1206 /* Get the max number of strides. */
1207 if (strd_num_n < rxq->strd_num_n)
1208 strd_num_n = rxq->strd_num_n;
1209 /* Get the max size of a stride. */
1210 if (strd_sz_n < rxq->strd_sz_n)
1211 strd_sz_n = rxq->strd_sz_n;
1213 assert(strd_num_n && strd_sz_n);
1214 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1215 obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
1217 * Received packets can be either memcpy'd or externally referenced. In
1218 * case that the packet is attached to an mbuf as an external buffer, as
1219 * it isn't possible to predict how the buffers will be queued by
1220 * application, there's no option to exactly pre-allocate needed buffers
1221 * in advance but to speculatively prepares enough buffers.
1223 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1224 * received packets to buffers provided by application (rxq->mp) until
1225 * this Mempool gets available again.
1228 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1230 * rte_mempool_create_empty() has sanity check to refuse large cache
1231 * size compared to the number of elements.
1232 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1233 * constant number 2 instead.
1235 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1236 /* Check a mempool is already allocated and if it can be resued. */
1237 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1238 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1239 dev->data->port_id, mp->name);
1242 } else if (mp != NULL) {
1243 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1244 dev->data->port_id, mp->name);
1246 * If failed to free, which means it may be still in use, no way
1247 * but to keep using the existing one. On buffer underrun,
1248 * packets will be memcpy'd instead of external buffer
1251 if (mlx5_mprq_free_mp(dev)) {
1252 if (mp->elt_size >= obj_size)
1258 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1259 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1260 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
1261 dev->device->numa_node, 0);
1264 "port %u failed to allocate a mempool for"
1265 " Multi-Packet RQ, count=%u, size=%u",
1266 dev->data->port_id, obj_num, obj_size);
1272 /* Set mempool for each Rx queue. */
1273 for (i = 0; i != priv->rxqs_n; ++i) {
1274 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1280 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1281 dev->data->port_id);
1286 * Create a DPDK Rx queue.
1289 * Pointer to Ethernet device.
1293 * Number of descriptors to configure in queue.
1295 * NUMA socket on which memory must be allocated.
1298 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1300 struct mlx5_rxq_ctrl *
1301 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1302 unsigned int socket, const struct rte_eth_rxconf *conf,
1303 struct rte_mempool *mp)
1305 struct mlx5_priv *priv = dev->data->dev_private;
1306 struct mlx5_rxq_ctrl *tmpl;
1307 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1308 unsigned int mprq_stride_size;
1309 struct mlx5_dev_config *config = &priv->config;
1311 * Always allocate extra slots, even if eventually
1312 * the vector Rx will not be used.
1315 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1316 uint64_t offloads = conf->offloads |
1317 dev->data->dev_conf.rxmode.offloads;
1318 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1320 tmpl = rte_calloc_socket("RXQ", 1,
1322 desc_n * sizeof(struct rte_mbuf *),
1328 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1329 MLX5_MR_BTREE_CACHE_N, socket)) {
1330 /* rte_errno is already set. */
1333 tmpl->socket = socket;
1334 if (dev->data->dev_conf.intr_conf.rxq)
1337 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1338 * following conditions are met:
1339 * - MPRQ is enabled.
1340 * - The number of descs is more than the number of strides.
1341 * - max_rx_pkt_len plus overhead is less than the max size of a
1343 * Otherwise, enable Rx scatter if necessary.
1345 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1347 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1348 sizeof(struct rte_mbuf_ext_shared_info) +
1349 RTE_PKTMBUF_HEADROOM;
1351 desc > (1U << config->mprq.stride_num_n) &&
1352 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1353 /* TODO: Rx scatter isn't supported yet. */
1354 tmpl->rxq.sges_n = 0;
1355 /* Trim the number of descs needed. */
1356 desc >>= config->mprq.stride_num_n;
1357 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1358 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1359 config->mprq.min_stride_size_n);
1360 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1361 tmpl->rxq.mprq_max_memcpy_len =
1362 RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
1363 config->mprq.max_memcpy_len);
1365 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1366 " strd_num_n = %u, strd_sz_n = %u",
1367 dev->data->port_id, idx,
1368 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1369 } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
1370 (mb_len - RTE_PKTMBUF_HEADROOM)) {
1371 tmpl->rxq.sges_n = 0;
1372 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1374 RTE_PKTMBUF_HEADROOM +
1375 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1376 unsigned int sges_n;
1379 * Determine the number of SGEs needed for a full packet
1380 * and round it to the next power of two.
1382 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1383 tmpl->rxq.sges_n = sges_n;
1384 /* Make sure rxq.sges_n did not overflow. */
1385 size = mb_len * (1 << tmpl->rxq.sges_n);
1386 size -= RTE_PKTMBUF_HEADROOM;
1387 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1389 "port %u too many SGEs (%u) needed to handle"
1390 " requested maximum packet size %u",
1393 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1394 rte_errno = EOVERFLOW;
1399 "port %u the requested maximum Rx packet size (%u) is"
1400 " larger than a single mbuf (%u) and scattered mode has"
1401 " not been requested",
1403 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1404 mb_len - RTE_PKTMBUF_HEADROOM);
1406 if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1408 "port %u MPRQ is requested but cannot be enabled"
1409 " (requested: desc = %u, stride_sz = %u,"
1410 " supported: min_stride_num = %u, max_stride_sz = %u).",
1411 dev->data->port_id, desc, mprq_stride_size,
1412 (1 << config->mprq.stride_num_n),
1413 (1 << config->mprq.max_stride_size_n));
1414 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1415 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1416 if (desc % (1 << tmpl->rxq.sges_n)) {
1418 "port %u number of Rx queue descriptors (%u) is not a"
1419 " multiple of SGEs per packet (%u)",
1422 1 << tmpl->rxq.sges_n);
1426 /* Toggle RX checksum offload if hardware supports it. */
1427 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1428 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1429 /* Configure VLAN stripping. */
1430 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1431 /* By default, FCS (CRC) is stripped by hardware. */
1432 tmpl->rxq.crc_present = 0;
1433 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1434 if (config->hw_fcs_strip) {
1436 * RQs used for LRO-enabled TIRs should not be
1437 * configured to scatter the FCS.
1439 if (mlx5_lro_on(dev))
1441 "port %u CRC stripping has been "
1442 "disabled but will still be performed "
1443 "by hardware, because LRO is enabled",
1444 dev->data->port_id);
1446 tmpl->rxq.crc_present = 1;
1449 "port %u CRC stripping has been disabled but will"
1450 " still be performed by hardware, make sure MLNX_OFED"
1451 " and firmware are up to date",
1452 dev->data->port_id);
1456 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1457 " incoming frames to hide it",
1459 tmpl->rxq.crc_present ? "disabled" : "enabled",
1460 tmpl->rxq.crc_present << 2);
1462 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1463 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1464 tmpl->rxq.port_id = dev->data->port_id;
1467 tmpl->rxq.elts_n = log2above(desc);
1468 tmpl->rxq.rq_repl_thresh =
1469 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1471 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1473 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1475 tmpl->rxq.idx = idx;
1476 rte_atomic32_inc(&tmpl->refcnt);
1477 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1488 * Pointer to Ethernet device.
1493 * A pointer to the queue if it exists, NULL otherwise.
1495 struct mlx5_rxq_ctrl *
1496 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1498 struct mlx5_priv *priv = dev->data->dev_private;
1499 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1501 if ((*priv->rxqs)[idx]) {
1502 rxq_ctrl = container_of((*priv->rxqs)[idx],
1503 struct mlx5_rxq_ctrl,
1505 mlx5_rxq_obj_get(dev, idx);
1506 rte_atomic32_inc(&rxq_ctrl->refcnt);
1512 * Release a Rx queue.
1515 * Pointer to Ethernet device.
1520 * 1 while a reference on it exists, 0 when freed.
1523 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1525 struct mlx5_priv *priv = dev->data->dev_private;
1526 struct mlx5_rxq_ctrl *rxq_ctrl;
1528 if (!(*priv->rxqs)[idx])
1530 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1531 assert(rxq_ctrl->priv);
1532 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
1533 rxq_ctrl->obj = NULL;
1534 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1535 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1536 LIST_REMOVE(rxq_ctrl, next);
1538 (*priv->rxqs)[idx] = NULL;
1545 * Verify the Rx Queue list is empty
1548 * Pointer to Ethernet device.
1551 * The number of object not released.
1554 mlx5_rxq_verify(struct rte_eth_dev *dev)
1556 struct mlx5_priv *priv = dev->data->dev_private;
1557 struct mlx5_rxq_ctrl *rxq_ctrl;
1560 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1561 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1562 dev->data->port_id, rxq_ctrl->rxq.idx);
1569 * Create an indirection table.
1572 * Pointer to Ethernet device.
1574 * Queues entering in the indirection table.
1576 * Number of queues in the array.
1579 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1581 static struct mlx5_ind_table_obj *
1582 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1585 struct mlx5_priv *priv = dev->data->dev_private;
1586 struct mlx5_ind_table_obj *ind_tbl;
1587 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1588 log2above(queues_n) :
1589 log2above(priv->config.ind_table_max_size);
1590 struct ibv_wq *wq[1 << wq_n];
1594 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1595 queues_n * sizeof(uint16_t), 0);
1600 for (i = 0; i != queues_n; ++i) {
1601 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1605 wq[i] = rxq->obj->wq;
1606 ind_tbl->queues[i] = queues[i];
1608 ind_tbl->queues_n = queues_n;
1609 /* Finalise indirection table. */
1610 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1612 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1614 &(struct ibv_rwq_ind_table_init_attr){
1615 .log_ind_tbl_size = wq_n,
1619 if (!ind_tbl->ind_table) {
1623 rte_atomic32_inc(&ind_tbl->refcnt);
1624 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1628 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1633 * Get an indirection table.
1636 * Pointer to Ethernet device.
1638 * Queues entering in the indirection table.
1640 * Number of queues in the array.
1643 * An indirection table if found.
1645 static struct mlx5_ind_table_obj *
1646 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1649 struct mlx5_priv *priv = dev->data->dev_private;
1650 struct mlx5_ind_table_obj *ind_tbl;
1652 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1653 if ((ind_tbl->queues_n == queues_n) &&
1654 (memcmp(ind_tbl->queues, queues,
1655 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1662 rte_atomic32_inc(&ind_tbl->refcnt);
1663 for (i = 0; i != ind_tbl->queues_n; ++i)
1664 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1670 * Release an indirection table.
1673 * Pointer to Ethernet device.
1675 * Indirection table to release.
1678 * 1 while a reference on it exists, 0 when freed.
1681 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1682 struct mlx5_ind_table_obj *ind_tbl)
1686 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1687 claim_zero(mlx5_glue->destroy_rwq_ind_table
1688 (ind_tbl->ind_table));
1689 for (i = 0; i != ind_tbl->queues_n; ++i)
1690 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1691 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1692 LIST_REMOVE(ind_tbl, next);
1700 * Verify the Rx Queue list is empty
1703 * Pointer to Ethernet device.
1706 * The number of object not released.
1709 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1711 struct mlx5_priv *priv = dev->data->dev_private;
1712 struct mlx5_ind_table_obj *ind_tbl;
1715 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1717 "port %u indirection table obj %p still referenced",
1718 dev->data->port_id, (void *)ind_tbl);
1725 * Create an Rx Hash queue.
1728 * Pointer to Ethernet device.
1730 * RSS key for the Rx hash queue.
1731 * @param rss_key_len
1733 * @param hash_fields
1734 * Verbs protocol hash field to make the RSS on.
1736 * Queues entering in hash queue. In case of empty hash_fields only the
1737 * first queue index will be taken for the indirection table.
1744 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1747 mlx5_hrxq_new(struct rte_eth_dev *dev,
1748 const uint8_t *rss_key, uint32_t rss_key_len,
1749 uint64_t hash_fields,
1750 const uint16_t *queues, uint32_t queues_n,
1751 int tunnel __rte_unused)
1753 struct mlx5_priv *priv = dev->data->dev_private;
1754 struct mlx5_hrxq *hrxq;
1755 struct mlx5_ind_table_obj *ind_tbl;
1757 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1758 struct mlx5dv_qp_init_attr qp_init_attr;
1762 queues_n = hash_fields ? queues_n : 1;
1763 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
1765 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
1770 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1771 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
1773 qp_init_attr.comp_mask =
1774 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1775 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
1777 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1778 if (dev->data->dev_conf.lpbk_mode) {
1779 /* Allow packet sent from NIC loop back w/o source MAC check. */
1780 qp_init_attr.comp_mask |=
1781 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1782 qp_init_attr.create_flags |=
1783 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
1786 qp = mlx5_glue->dv_create_qp
1788 &(struct ibv_qp_init_attr_ex){
1789 .qp_type = IBV_QPT_RAW_PACKET,
1791 IBV_QP_INIT_ATTR_PD |
1792 IBV_QP_INIT_ATTR_IND_TABLE |
1793 IBV_QP_INIT_ATTR_RX_HASH,
1794 .rx_hash_conf = (struct ibv_rx_hash_conf){
1795 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1796 .rx_hash_key_len = rss_key_len,
1797 .rx_hash_key = (void *)(uintptr_t)rss_key,
1798 .rx_hash_fields_mask = hash_fields,
1800 .rwq_ind_tbl = ind_tbl->ind_table,
1805 qp = mlx5_glue->create_qp_ex
1807 &(struct ibv_qp_init_attr_ex){
1808 .qp_type = IBV_QPT_RAW_PACKET,
1810 IBV_QP_INIT_ATTR_PD |
1811 IBV_QP_INIT_ATTR_IND_TABLE |
1812 IBV_QP_INIT_ATTR_RX_HASH,
1813 .rx_hash_conf = (struct ibv_rx_hash_conf){
1814 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1815 .rx_hash_key_len = rss_key_len,
1816 .rx_hash_key = (void *)(uintptr_t)rss_key,
1817 .rx_hash_fields_mask = hash_fields,
1819 .rwq_ind_tbl = ind_tbl->ind_table,
1827 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1830 hrxq->ind_table = ind_tbl;
1832 hrxq->rss_key_len = rss_key_len;
1833 hrxq->hash_fields = hash_fields;
1834 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1835 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1836 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
1837 if (!hrxq->action) {
1842 rte_atomic32_inc(&hrxq->refcnt);
1843 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1846 err = rte_errno; /* Save rte_errno before cleanup. */
1847 mlx5_ind_table_obj_release(dev, ind_tbl);
1849 claim_zero(mlx5_glue->destroy_qp(qp));
1850 rte_errno = err; /* Restore rte_errno. */
1855 * Get an Rx Hash queue.
1858 * Pointer to Ethernet device.
1860 * RSS configuration for the Rx hash queue.
1862 * Queues entering in hash queue. In case of empty hash_fields only the
1863 * first queue index will be taken for the indirection table.
1868 * An hash Rx queue on success.
1871 mlx5_hrxq_get(struct rte_eth_dev *dev,
1872 const uint8_t *rss_key, uint32_t rss_key_len,
1873 uint64_t hash_fields,
1874 const uint16_t *queues, uint32_t queues_n)
1876 struct mlx5_priv *priv = dev->data->dev_private;
1877 struct mlx5_hrxq *hrxq;
1879 queues_n = hash_fields ? queues_n : 1;
1880 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1881 struct mlx5_ind_table_obj *ind_tbl;
1883 if (hrxq->rss_key_len != rss_key_len)
1885 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1887 if (hrxq->hash_fields != hash_fields)
1889 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
1892 if (ind_tbl != hrxq->ind_table) {
1893 mlx5_ind_table_obj_release(dev, ind_tbl);
1896 rte_atomic32_inc(&hrxq->refcnt);
1903 * Release the hash Rx queue.
1906 * Pointer to Ethernet device.
1908 * Pointer to Hash Rx queue to release.
1911 * 1 while a reference on it exists, 0 when freed.
1914 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1916 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1917 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1918 mlx5_glue->destroy_flow_action(hrxq->action);
1920 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1921 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
1922 LIST_REMOVE(hrxq, next);
1926 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
1931 * Verify the Rx Queue list is empty
1934 * Pointer to Ethernet device.
1937 * The number of object not released.
1940 mlx5_hrxq_verify(struct rte_eth_dev *dev)
1942 struct mlx5_priv *priv = dev->data->dev_private;
1943 struct mlx5_hrxq *hrxq;
1946 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1948 "port %u hash Rx queue %p still referenced",
1949 dev->data->port_id, (void *)hrxq);
1956 * Create a drop Rx queue Verbs/DevX object.
1959 * Pointer to Ethernet device.
1962 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1964 static struct mlx5_rxq_obj *
1965 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
1967 struct mlx5_priv *priv = dev->data->dev_private;
1968 struct ibv_context *ctx = priv->sh->ctx;
1970 struct ibv_wq *wq = NULL;
1971 struct mlx5_rxq_obj *rxq;
1973 if (priv->drop_queue.rxq)
1974 return priv->drop_queue.rxq;
1975 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
1977 DEBUG("port %u cannot allocate CQ for drop queue",
1978 dev->data->port_id);
1982 wq = mlx5_glue->create_wq(ctx,
1983 &(struct ibv_wq_init_attr){
1984 .wq_type = IBV_WQT_RQ,
1991 DEBUG("port %u cannot allocate WQ for drop queue",
1992 dev->data->port_id);
1996 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
1998 DEBUG("port %u cannot allocate drop Rx queue memory",
1999 dev->data->port_id);
2005 priv->drop_queue.rxq = rxq;
2009 claim_zero(mlx5_glue->destroy_wq(wq));
2011 claim_zero(mlx5_glue->destroy_cq(cq));
2016 * Release a drop Rx queue Verbs/DevX object.
2019 * Pointer to Ethernet device.
2022 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2025 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2027 struct mlx5_priv *priv = dev->data->dev_private;
2028 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2031 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2033 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2035 priv->drop_queue.rxq = NULL;
2039 * Create a drop indirection table.
2042 * Pointer to Ethernet device.
2045 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2047 static struct mlx5_ind_table_obj *
2048 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2050 struct mlx5_priv *priv = dev->data->dev_private;
2051 struct mlx5_ind_table_obj *ind_tbl;
2052 struct mlx5_rxq_obj *rxq;
2053 struct mlx5_ind_table_obj tmpl;
2055 rxq = mlx5_rxq_obj_drop_new(dev);
2058 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2060 &(struct ibv_rwq_ind_table_init_attr){
2061 .log_ind_tbl_size = 0,
2062 .ind_tbl = &rxq->wq,
2065 if (!tmpl.ind_table) {
2066 DEBUG("port %u cannot allocate indirection table for drop"
2068 dev->data->port_id);
2072 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2077 ind_tbl->ind_table = tmpl.ind_table;
2080 mlx5_rxq_obj_drop_release(dev);
2085 * Release a drop indirection table.
2088 * Pointer to Ethernet device.
2091 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2093 struct mlx5_priv *priv = dev->data->dev_private;
2094 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2096 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2097 mlx5_rxq_obj_drop_release(dev);
2099 priv->drop_queue.hrxq->ind_table = NULL;
2103 * Create a drop Rx Hash queue.
2106 * Pointer to Ethernet device.
2109 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2112 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2114 struct mlx5_priv *priv = dev->data->dev_private;
2115 struct mlx5_ind_table_obj *ind_tbl;
2117 struct mlx5_hrxq *hrxq;
2119 if (priv->drop_queue.hrxq) {
2120 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2121 return priv->drop_queue.hrxq;
2123 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2126 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2127 &(struct ibv_qp_init_attr_ex){
2128 .qp_type = IBV_QPT_RAW_PACKET,
2130 IBV_QP_INIT_ATTR_PD |
2131 IBV_QP_INIT_ATTR_IND_TABLE |
2132 IBV_QP_INIT_ATTR_RX_HASH,
2133 .rx_hash_conf = (struct ibv_rx_hash_conf){
2135 IBV_RX_HASH_FUNC_TOEPLITZ,
2136 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2137 .rx_hash_key = rss_hash_default_key,
2138 .rx_hash_fields_mask = 0,
2140 .rwq_ind_tbl = ind_tbl->ind_table,
2144 DEBUG("port %u cannot allocate QP for drop queue",
2145 dev->data->port_id);
2149 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2152 "port %u cannot allocate memory for drop queue",
2153 dev->data->port_id);
2157 hrxq->ind_table = ind_tbl;
2159 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2160 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2161 if (!hrxq->action) {
2166 priv->drop_queue.hrxq = hrxq;
2167 rte_atomic32_set(&hrxq->refcnt, 1);
2171 mlx5_ind_table_obj_drop_release(dev);
2176 * Release a drop hash Rx queue.
2179 * Pointer to Ethernet device.
2182 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2184 struct mlx5_priv *priv = dev->data->dev_private;
2185 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2187 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2188 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2189 mlx5_glue->destroy_flow_action(hrxq->action);
2191 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2192 mlx5_ind_table_obj_drop_release(dev);
2194 priv->drop_queue.hrxq = NULL;