1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox.
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
58 * Allocate RX queue elements.
61 * Pointer to RX queue structure.
64 * 0 on success, errno value on failure.
67 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
69 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
70 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
74 /* Iterate on segments. */
75 for (i = 0; (i != elts_n); ++i) {
78 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
80 ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
84 /* Headroom is reserved by rte_pktmbuf_alloc(). */
85 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
86 /* Buffer is supposed to be empty. */
87 assert(rte_pktmbuf_data_len(buf) == 0);
88 assert(rte_pktmbuf_pkt_len(buf) == 0);
90 /* Only the first segment keeps headroom. */
93 PORT(buf) = rxq_ctrl->rxq.port_id;
94 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
95 PKT_LEN(buf) = DATA_LEN(buf);
97 (*rxq_ctrl->rxq.elts)[i] = buf;
99 /* If Rx vector is activated. */
100 if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
101 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
102 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
105 /* Initialize default rearm_data for vPMD. */
106 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
107 rte_mbuf_refcnt_set(mbuf_init, 1);
108 mbuf_init->nb_segs = 1;
109 mbuf_init->port = rxq->port_id;
111 * prevent compiler reordering:
112 * rearm_data covers previous fields.
114 rte_compiler_barrier();
115 rxq->mbuf_initializer =
116 *(uint64_t *)&mbuf_init->rearm_data;
117 /* Padding with a fake mbuf for vectorized Rx. */
118 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
119 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
121 DEBUG("%p: allocated and configured %u segments (max %u packets)",
122 (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
127 for (i = 0; (i != elts_n); ++i) {
128 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
129 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
130 (*rxq_ctrl->rxq.elts)[i] = NULL;
132 DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
138 * Free RX queue elements.
141 * Pointer to RX queue structure.
144 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
146 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
147 const uint16_t q_n = (1 << rxq->elts_n);
148 const uint16_t q_mask = q_n - 1;
149 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
152 DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
153 if (rxq->elts == NULL)
156 * Some mbuf in the Ring belongs to the application. They cannot be
159 if (rxq_check_vec_support(rxq) > 0) {
160 for (i = 0; i < used; ++i)
161 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
162 rxq->rq_pi = rxq->rq_ci;
164 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
165 if ((*rxq->elts)[i] != NULL)
166 rte_pktmbuf_free_seg((*rxq->elts)[i]);
167 (*rxq->elts)[i] = NULL;
172 * Clean up a RX queue.
174 * Destroy objects, free allocated memory and reset the structure for reuse.
177 * Pointer to RX queue structure.
180 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
182 DEBUG("cleaning up %p", (void *)rxq_ctrl);
184 mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
185 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
189 * Returns the per-queue supported offloads.
192 * Pointer to private structure.
195 * Supported Rx offloads.
198 mlx5_priv_get_rx_queue_offloads(struct priv *priv)
200 struct mlx5_dev_config *config = &priv->config;
201 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
202 DEV_RX_OFFLOAD_TIMESTAMP |
203 DEV_RX_OFFLOAD_JUMBO_FRAME);
205 if (config->hw_fcs_strip)
206 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
208 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
209 DEV_RX_OFFLOAD_UDP_CKSUM |
210 DEV_RX_OFFLOAD_TCP_CKSUM);
211 if (config->hw_vlan_strip)
212 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
218 * Returns the per-port supported offloads.
221 * Pointer to private structure.
223 * Supported Rx offloads.
226 mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
228 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
234 * Checks if the per-queue offload configuration is valid.
237 * Pointer to private structure.
239 * Per-queue offloads configuration.
242 * 1 if the configuration is valid, 0 otherwise.
245 priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
247 uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
248 uint64_t queue_supp_offloads =
249 mlx5_priv_get_rx_queue_offloads(priv);
250 uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
252 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
255 if (((port_offloads ^ offloads) & port_supp_offloads))
263 * Pointer to Ethernet device structure.
267 * Number of descriptors to configure in queue.
269 * NUMA socket on which memory must be allocated.
271 * Thresholds parameters.
273 * Memory pool for buffer allocations.
276 * 0 on success, negative errno value on failure.
279 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
280 unsigned int socket, const struct rte_eth_rxconf *conf,
281 struct rte_mempool *mp)
283 struct priv *priv = dev->data->dev_private;
284 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
285 struct mlx5_rxq_ctrl *rxq_ctrl =
286 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
290 if (!rte_is_power_of_2(desc)) {
291 desc = 1 << log2above(desc);
292 WARN("%p: increased number of descriptors in RX queue %u"
293 " to the next power of two (%d)",
294 (void *)dev, idx, desc);
296 DEBUG("%p: configuring queue %u for %u descriptors",
297 (void *)dev, idx, desc);
298 if (idx >= priv->rxqs_n) {
299 ERROR("%p: queue index out of range (%u >= %u)",
300 (void *)dev, idx, priv->rxqs_n);
304 if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
306 ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
307 "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
308 (void *)dev, conf->offloads,
309 dev->data->dev_conf.rxmode.offloads,
310 (mlx5_priv_get_rx_port_offloads(priv) |
311 mlx5_priv_get_rx_queue_offloads(priv)));
314 if (!mlx5_priv_rxq_releasable(priv, idx)) {
316 ERROR("%p: unable to release queue index %u",
320 mlx5_priv_rxq_release(priv, idx);
321 rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
323 ERROR("%p: unable to allocate queue index %u",
328 DEBUG("%p: adding RX queue %p to list",
329 (void *)dev, (void *)rxq_ctrl);
330 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
337 * DPDK callback to release a RX queue.
340 * Generic RX queue pointer.
343 mlx5_rx_queue_release(void *dpdk_rxq)
345 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
346 struct mlx5_rxq_ctrl *rxq_ctrl;
351 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
352 priv = rxq_ctrl->priv;
354 if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
355 rte_panic("Rx queue %p is still used by a flow and cannot be"
356 " removed\n", (void *)rxq_ctrl);
357 mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
362 * Allocate queue vector and fill epoll fd list for Rx interrupts.
365 * Pointer to private structure.
368 * 0 on success, negative on failure.
371 priv_rx_intr_vec_enable(struct priv *priv)
374 unsigned int rxqs_n = priv->rxqs_n;
375 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
376 unsigned int count = 0;
377 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
379 if (!priv->dev->data->dev_conf.intr_conf.rxq)
381 priv_rx_intr_vec_disable(priv);
382 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
383 if (intr_handle->intr_vec == NULL) {
384 ERROR("failed to allocate memory for interrupt vector,"
385 " Rx interrupts will not be supported");
388 intr_handle->type = RTE_INTR_HANDLE_EXT;
389 for (i = 0; i != n; ++i) {
390 /* This rxq ibv must not be released in this function. */
391 struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i);
396 /* Skip queues that cannot request interrupts. */
397 if (!rxq_ibv || !rxq_ibv->channel) {
398 /* Use invalid intr_vec[] index to disable entry. */
399 intr_handle->intr_vec[i] =
400 RTE_INTR_VEC_RXTX_OFFSET +
401 RTE_MAX_RXTX_INTR_VEC_ID;
404 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
405 ERROR("too many Rx queues for interrupt vector size"
406 " (%d), Rx interrupts cannot be enabled",
407 RTE_MAX_RXTX_INTR_VEC_ID);
408 priv_rx_intr_vec_disable(priv);
411 fd = rxq_ibv->channel->fd;
412 flags = fcntl(fd, F_GETFL);
413 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
415 ERROR("failed to make Rx interrupt file descriptor"
416 " %d non-blocking for queue index %d", fd, i);
417 priv_rx_intr_vec_disable(priv);
420 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
421 intr_handle->efds[count] = fd;
425 priv_rx_intr_vec_disable(priv);
427 intr_handle->nb_efd = count;
432 * Clean up Rx interrupts handler.
435 * Pointer to private structure.
438 priv_rx_intr_vec_disable(struct priv *priv)
440 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
442 unsigned int rxqs_n = priv->rxqs_n;
443 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
445 if (!priv->dev->data->dev_conf.intr_conf.rxq)
447 if (!intr_handle->intr_vec)
449 for (i = 0; i != n; ++i) {
450 struct mlx5_rxq_ctrl *rxq_ctrl;
451 struct mlx5_rxq_data *rxq_data;
453 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
454 RTE_MAX_RXTX_INTR_VEC_ID)
457 * Need to access directly the queue to release the reference
458 * kept in priv_rx_intr_vec_enable().
460 rxq_data = (*priv->rxqs)[i];
461 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
462 mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
465 rte_intr_free_epoll_fd(intr_handle);
466 if (intr_handle->intr_vec)
467 free(intr_handle->intr_vec);
468 intr_handle->nb_efd = 0;
469 intr_handle->intr_vec = NULL;
473 * MLX5 CQ notification .
476 * Pointer to receive queue structure.
478 * Sequence number per receive queue .
481 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
484 uint32_t doorbell_hi;
486 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
488 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
489 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
490 doorbell = (uint64_t)doorbell_hi << 32;
491 doorbell |= rxq->cqn;
492 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
493 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
497 * DPDK callback for Rx queue interrupt enable.
500 * Pointer to Ethernet device structure.
505 * 0 on success, negative on failure.
508 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
510 struct priv *priv = dev->data->dev_private;
511 struct mlx5_rxq_data *rxq_data;
512 struct mlx5_rxq_ctrl *rxq_ctrl;
516 rxq_data = (*priv->rxqs)[rx_queue_id];
521 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
523 struct mlx5_rxq_ibv *rxq_ibv;
525 rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
530 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
531 mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
536 WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
541 * DPDK callback for Rx queue interrupt disable.
544 * Pointer to Ethernet device structure.
549 * 0 on success, negative on failure.
552 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
554 struct priv *priv = dev->data->dev_private;
555 struct mlx5_rxq_data *rxq_data;
556 struct mlx5_rxq_ctrl *rxq_ctrl;
557 struct mlx5_rxq_ibv *rxq_ibv = NULL;
558 struct ibv_cq *ev_cq;
563 rxq_data = (*priv->rxqs)[rx_queue_id];
568 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
571 rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
576 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
577 if (ret || ev_cq != rxq_ibv->cq) {
581 rxq_data->cq_arm_sn++;
582 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
585 mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
588 WARN("unable to disable interrupt on rx queue %d",
594 * Create the Rx queue Verbs object.
597 * Pointer to private structure.
599 * Queue index in DPDK Rx queue array
602 * The Verbs object initialised if it can be created.
604 struct mlx5_rxq_ibv *
605 mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
607 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
608 struct mlx5_rxq_ctrl *rxq_ctrl =
609 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
610 struct ibv_wq_attr mod;
613 struct ibv_cq_init_attr_ex ibv;
614 struct mlx5dv_cq_init_attr mlx5;
616 struct ibv_wq_init_attr wq;
617 struct ibv_cq_ex cq_attr;
619 unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
620 struct mlx5_rxq_ibv *tmpl;
621 struct mlx5dv_cq cq_info;
622 struct mlx5dv_rwq rwq;
625 struct mlx5dv_obj obj;
626 struct mlx5_dev_config *config = &priv->config;
629 assert(!rxq_ctrl->ibv);
630 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
631 priv->verbs_alloc_ctx.obj = rxq_ctrl;
632 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
635 ERROR("%p: cannot allocate verbs resources",
639 tmpl->rxq_ctrl = rxq_ctrl;
640 /* Use the entire RX mempool as the memory region. */
641 tmpl->mr = priv_mr_get(priv, rxq_data->mp);
643 tmpl->mr = priv_mr_new(priv, rxq_data->mp);
645 ERROR("%p: MR creation failure", (void *)rxq_ctrl);
650 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
651 if (!tmpl->channel) {
652 ERROR("%p: Comp Channel creation failure",
657 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
659 .channel = tmpl->channel,
662 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
665 if (config->cqe_comp && !rxq_data->hw_timestamp) {
666 attr.cq.mlx5.comp_mask |=
667 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
668 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
670 * For vectorized Rx, it must not be doubled in order to
671 * make cq_ci and rq_ci aligned.
673 if (rxq_check_vec_support(rxq_data) < 0)
674 attr.cq.ibv.cqe *= 2;
675 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
676 DEBUG("Rx CQE compression is disabled for HW timestamp");
678 tmpl->cq = mlx5_glue->cq_ex_to_cq
679 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
681 if (tmpl->cq == NULL) {
682 ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
685 DEBUG("priv->device_attr.max_qp_wr is %d",
686 priv->device_attr.orig_attr.max_qp_wr);
687 DEBUG("priv->device_attr.max_sge is %d",
688 priv->device_attr.orig_attr.max_sge);
689 attr.wq = (struct ibv_wq_init_attr){
690 .wq_context = NULL, /* Could be useful in the future. */
691 .wq_type = IBV_WQT_RQ,
692 /* Max number of outstanding WRs. */
693 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
694 /* Max number of scatter/gather elements in a WR. */
695 .max_sge = 1 << rxq_data->sges_n,
699 IBV_WQ_FLAGS_CVLAN_STRIPPING |
701 .create_flags = (rxq_data->vlan_strip ?
702 IBV_WQ_FLAGS_CVLAN_STRIPPING :
705 /* By default, FCS (CRC) is stripped by hardware. */
706 if (rxq_data->crc_present) {
707 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
708 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
710 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
711 if (config->hw_padding) {
712 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
713 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
716 tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
717 if (tmpl->wq == NULL) {
718 ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
722 * Make sure number of WRs*SGEs match expectations since a queue
723 * cannot allocate more than "desc" buffers.
725 if (((int)attr.wq.max_wr !=
726 ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
727 ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
728 ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
730 ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
731 (1 << rxq_data->sges_n),
732 attr.wq.max_wr, attr.wq.max_sge);
735 /* Change queue state to ready. */
736 mod = (struct ibv_wq_attr){
737 .attr_mask = IBV_WQ_ATTR_STATE,
738 .wq_state = IBV_WQS_RDY,
740 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
742 ERROR("%p: WQ state to IBV_WQS_RDY failed",
746 obj.cq.in = tmpl->cq;
747 obj.cq.out = &cq_info;
748 obj.rwq.in = tmpl->wq;
750 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
753 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
754 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
755 "it should be set to %u", RTE_CACHE_LINE_SIZE);
758 /* Fill the rings. */
759 rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
761 for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
762 struct rte_mbuf *buf = (*rxq_data->elts)[i];
763 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
765 /* scat->addr must be able to store a pointer. */
766 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
767 *scat = (struct mlx5_wqe_data_seg){
768 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
770 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
771 .lkey = tmpl->mr->lkey,
774 rxq_data->rq_db = rwq.dbrec;
775 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
779 rxq_data->zip = (struct rxq_zip){
782 rxq_data->cq_db = cq_info.dbrec;
783 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
784 rxq_data->cq_uar = cq_info.cq_uar;
785 rxq_data->cqn = cq_info.cqn;
786 rxq_data->cq_arm_sn = 0;
787 /* Update doorbell counter. */
788 rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
790 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
791 DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
792 rte_atomic32_inc(&tmpl->refcnt);
793 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
794 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
795 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
796 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
800 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
802 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
804 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
806 priv_mr_release(priv, tmpl->mr);
807 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
812 * Get an Rx queue Verbs object.
815 * Pointer to private structure.
817 * Queue index in DPDK Rx queue array
820 * The Verbs object if it exists.
822 struct mlx5_rxq_ibv *
823 mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
825 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
826 struct mlx5_rxq_ctrl *rxq_ctrl;
828 if (idx >= priv->rxqs_n)
832 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
834 priv_mr_get(priv, rxq_data->mp);
835 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
836 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
837 (void *)rxq_ctrl->ibv,
838 rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
840 return rxq_ctrl->ibv;
844 * Release an Rx verbs queue object.
847 * Pointer to private structure.
849 * Verbs Rx queue object.
852 * 0 on success, errno value on failure.
855 mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
863 ret = priv_mr_release(priv, rxq_ibv->mr);
866 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
867 (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
868 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
869 rxq_free_elts(rxq_ibv->rxq_ctrl);
870 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
871 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
872 if (rxq_ibv->channel)
873 claim_zero(mlx5_glue->destroy_comp_channel
875 LIST_REMOVE(rxq_ibv, next);
883 * Verify the Verbs Rx queue list is empty
886 * Pointer to private structure.
888 * @return the number of object not released.
891 mlx5_priv_rxq_ibv_verify(struct priv *priv)
894 struct mlx5_rxq_ibv *rxq_ibv;
896 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
897 DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv,
905 * Return true if a single reference exists on the object.
908 * Pointer to private structure.
910 * Verbs Rx queue object.
913 mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused,
914 struct mlx5_rxq_ibv *rxq_ibv)
917 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
921 * Create a DPDK Rx queue.
924 * Pointer to private structure.
928 * Number of descriptors to configure in queue.
930 * NUMA socket on which memory must be allocated.
933 * A DPDK queue object on success.
935 struct mlx5_rxq_ctrl *
936 mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
937 unsigned int socket, const struct rte_eth_rxconf *conf,
938 struct rte_mempool *mp)
940 struct rte_eth_dev *dev = priv->dev;
941 struct mlx5_rxq_ctrl *tmpl;
942 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
943 struct mlx5_dev_config *config = &priv->config;
945 * Always allocate extra slots, even if eventually
946 * the vector Rx will not be used.
948 const uint16_t desc_n =
949 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
951 tmpl = rte_calloc_socket("RXQ", 1,
953 desc_n * sizeof(struct rte_mbuf *),
957 tmpl->socket = socket;
958 if (priv->dev->data->dev_conf.intr_conf.rxq)
960 /* Enable scattered packets support for this queue if necessary. */
961 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
962 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
963 (mb_len - RTE_PKTMBUF_HEADROOM)) {
964 tmpl->rxq.sges_n = 0;
965 } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
967 RTE_PKTMBUF_HEADROOM +
968 dev->data->dev_conf.rxmode.max_rx_pkt_len;
972 * Determine the number of SGEs needed for a full packet
973 * and round it to the next power of two.
975 sges_n = log2above((size / mb_len) + !!(size % mb_len));
976 tmpl->rxq.sges_n = sges_n;
977 /* Make sure rxq.sges_n did not overflow. */
978 size = mb_len * (1 << tmpl->rxq.sges_n);
979 size -= RTE_PKTMBUF_HEADROOM;
980 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
981 ERROR("%p: too many SGEs (%u) needed to handle"
982 " requested maximum packet size %u",
985 dev->data->dev_conf.rxmode.max_rx_pkt_len);
989 WARN("%p: the requested maximum Rx packet size (%u) is"
990 " larger than a single mbuf (%u) and scattered"
991 " mode has not been requested",
993 dev->data->dev_conf.rxmode.max_rx_pkt_len,
994 mb_len - RTE_PKTMBUF_HEADROOM);
996 DEBUG("%p: maximum number of segments per packet: %u",
997 (void *)dev, 1 << tmpl->rxq.sges_n);
998 if (desc % (1 << tmpl->rxq.sges_n)) {
999 ERROR("%p: number of RX queue descriptors (%u) is not a"
1000 " multiple of SGEs per packet (%u)",
1003 1 << tmpl->rxq.sges_n);
1006 /* Toggle RX checksum offload if hardware supports it. */
1007 tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
1008 tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
1009 priv->config.tunnel_en);
1010 tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1011 /* Configure VLAN stripping. */
1012 tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1013 /* By default, FCS (CRC) is stripped by hardware. */
1014 if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1015 tmpl->rxq.crc_present = 0;
1016 } else if (config->hw_fcs_strip) {
1017 tmpl->rxq.crc_present = 1;
1019 WARN("%p: CRC stripping has been disabled but will still"
1020 " be performed by hardware, make sure MLNX_OFED and"
1021 " firmware are up to date",
1023 tmpl->rxq.crc_present = 0;
1025 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
1026 " incoming frames to hide it",
1028 tmpl->rxq.crc_present ? "disabled" : "enabled",
1029 tmpl->rxq.crc_present << 2);
1031 tmpl->rxq.rss_hash = priv->rxqs_n > 1;
1032 tmpl->rxq.port_id = dev->data->port_id;
1035 tmpl->rxq.stats.idx = idx;
1036 tmpl->rxq.elts_n = log2above(desc);
1038 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1039 rte_atomic32_inc(&tmpl->refcnt);
1040 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1041 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
1042 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1053 * Pointer to private structure.
1058 * A pointer to the queue if it exists.
1060 struct mlx5_rxq_ctrl *
1061 mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
1063 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1065 if ((*priv->rxqs)[idx]) {
1066 rxq_ctrl = container_of((*priv->rxqs)[idx],
1067 struct mlx5_rxq_ctrl,
1070 mlx5_priv_rxq_ibv_get(priv, idx);
1071 rte_atomic32_inc(&rxq_ctrl->refcnt);
1072 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1073 (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
1079 * Release a Rx queue.
1082 * Pointer to private structure.
1087 * 0 on success, errno value on failure.
1090 mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
1092 struct mlx5_rxq_ctrl *rxq_ctrl;
1094 if (!(*priv->rxqs)[idx])
1096 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1097 assert(rxq_ctrl->priv);
1098 if (rxq_ctrl->ibv) {
1101 ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
1103 rxq_ctrl->ibv = NULL;
1105 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1106 (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
1107 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1108 LIST_REMOVE(rxq_ctrl, next);
1110 (*priv->rxqs)[idx] = NULL;
1117 * Verify if the queue can be released.
1120 * Pointer to private structure.
1125 * 1 if the queue can be released.
1128 mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
1130 struct mlx5_rxq_ctrl *rxq_ctrl;
1132 if (!(*priv->rxqs)[idx])
1134 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1135 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1139 * Verify the Rx Queue list is empty
1142 * Pointer to private structure.
1144 * @return the number of object not released.
1147 mlx5_priv_rxq_verify(struct priv *priv)
1149 struct mlx5_rxq_ctrl *rxq_ctrl;
1152 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1153 DEBUG("%p: Rx Queue %p still referenced", (void *)priv,
1161 * Create an indirection table.
1164 * Pointer to private structure.
1166 * Queues entering in the indirection table.
1168 * Number of queues in the array.
1171 * A new indirection table.
1173 struct mlx5_ind_table_ibv *
1174 mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
1177 struct mlx5_ind_table_ibv *ind_tbl;
1178 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1179 log2above(queues_n) :
1180 log2above(priv->config.ind_table_max_size);
1181 struct ibv_wq *wq[1 << wq_n];
1185 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1186 queues_n * sizeof(uint16_t), 0);
1189 for (i = 0; i != queues_n; ++i) {
1190 struct mlx5_rxq_ctrl *rxq =
1191 mlx5_priv_rxq_get(priv, queues[i]);
1195 wq[i] = rxq->ibv->wq;
1196 ind_tbl->queues[i] = queues[i];
1198 ind_tbl->queues_n = queues_n;
1199 /* Finalise indirection table. */
1200 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1202 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1204 &(struct ibv_rwq_ind_table_init_attr){
1205 .log_ind_tbl_size = wq_n,
1209 if (!ind_tbl->ind_table)
1211 rte_atomic32_inc(&ind_tbl->refcnt);
1212 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1213 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1214 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1218 DEBUG("%p cannot create indirection table", (void *)priv);
1223 * Get an indirection table.
1226 * Pointer to private structure.
1228 * Queues entering in the indirection table.
1230 * Number of queues in the array.
1233 * An indirection table if found.
1235 struct mlx5_ind_table_ibv *
1236 mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
1239 struct mlx5_ind_table_ibv *ind_tbl;
1241 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1242 if ((ind_tbl->queues_n == queues_n) &&
1243 (memcmp(ind_tbl->queues, queues,
1244 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1251 rte_atomic32_inc(&ind_tbl->refcnt);
1252 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1253 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1254 for (i = 0; i != ind_tbl->queues_n; ++i)
1255 mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
1261 * Release an indirection table.
1264 * Pointer to private structure.
1266 * Indirection table to release.
1269 * 0 on success, errno value on failure.
1272 mlx5_priv_ind_table_ibv_release(struct priv *priv,
1273 struct mlx5_ind_table_ibv *ind_tbl)
1277 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1278 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1279 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1280 claim_zero(mlx5_glue->destroy_rwq_ind_table
1281 (ind_tbl->ind_table));
1282 for (i = 0; i != ind_tbl->queues_n; ++i)
1283 claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
1284 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1285 LIST_REMOVE(ind_tbl, next);
1293 * Verify the Rx Queue list is empty
1296 * Pointer to private structure.
1298 * @return the number of object not released.
1301 mlx5_priv_ind_table_ibv_verify(struct priv *priv)
1303 struct mlx5_ind_table_ibv *ind_tbl;
1306 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1307 DEBUG("%p: Verbs indirection table %p still referenced",
1308 (void *)priv, (void *)ind_tbl);
1315 * Create an Rx Hash queue.
1318 * Pointer to private structure.
1320 * RSS key for the Rx hash queue.
1321 * @param rss_key_len
1323 * @param hash_fields
1324 * Verbs protocol hash field to make the RSS on.
1326 * Queues entering in hash queue. In case of empty hash_fields only the
1327 * first queue index will be taken for the indirection table.
1332 * An hash Rx queue on success.
1335 mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
1336 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1338 struct mlx5_hrxq *hrxq;
1339 struct mlx5_ind_table_ibv *ind_tbl;
1342 queues_n = hash_fields ? queues_n : 1;
1343 ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
1345 ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
1348 qp = mlx5_glue->create_qp_ex
1350 &(struct ibv_qp_init_attr_ex){
1351 .qp_type = IBV_QPT_RAW_PACKET,
1353 IBV_QP_INIT_ATTR_PD |
1354 IBV_QP_INIT_ATTR_IND_TABLE |
1355 IBV_QP_INIT_ATTR_RX_HASH,
1356 .rx_hash_conf = (struct ibv_rx_hash_conf){
1357 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1358 .rx_hash_key_len = rss_key_len,
1359 .rx_hash_key = rss_key,
1360 .rx_hash_fields_mask = hash_fields,
1362 .rwq_ind_tbl = ind_tbl->ind_table,
1367 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1370 hrxq->ind_table = ind_tbl;
1372 hrxq->rss_key_len = rss_key_len;
1373 hrxq->hash_fields = hash_fields;
1374 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1375 rte_atomic32_inc(&hrxq->refcnt);
1376 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1377 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1378 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1381 mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
1383 claim_zero(mlx5_glue->destroy_qp(qp));
1388 * Get an Rx Hash queue.
1391 * Pointer to private structure.
1393 * RSS configuration for the Rx hash queue.
1395 * Queues entering in hash queue. In case of empty hash_fields only the
1396 * first queue index will be taken for the indirection table.
1401 * An hash Rx queue on success.
1404 mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
1405 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1407 struct mlx5_hrxq *hrxq;
1409 queues_n = hash_fields ? queues_n : 1;
1410 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1411 struct mlx5_ind_table_ibv *ind_tbl;
1413 if (hrxq->rss_key_len != rss_key_len)
1415 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1417 if (hrxq->hash_fields != hash_fields)
1419 ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
1422 if (ind_tbl != hrxq->ind_table) {
1423 mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
1426 rte_atomic32_inc(&hrxq->refcnt);
1427 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1428 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1435 * Release the hash Rx queue.
1438 * Pointer to private structure.
1440 * Pointer to Hash Rx queue to release.
1443 * 0 on success, errno value on failure.
1446 mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
1448 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1449 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1450 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1451 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1452 mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
1453 LIST_REMOVE(hrxq, next);
1457 claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
1462 * Verify the Rx Queue list is empty
1465 * Pointer to private structure.
1467 * @return the number of object not released.
1470 mlx5_priv_hrxq_ibv_verify(struct priv *priv)
1472 struct mlx5_hrxq *hrxq;
1475 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1476 DEBUG("%p: Verbs Hash Rx queue %p still referenced",
1477 (void *)priv, (void *)hrxq);