4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
48 #include <infiniband/mlx5dv.h>
50 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_malloc.h>
55 #include <rte_ethdev_driver.h>
56 #include <rte_common.h>
57 #include <rte_interrupts.h>
58 #include <rte_debug.h>
62 #include "mlx5_rxtx.h"
63 #include "mlx5_utils.h"
64 #include "mlx5_autoconf.h"
65 #include "mlx5_defs.h"
66 #include "mlx5_glue.h"
68 /* Default RSS hash key also used for ConnectX-3. */
69 uint8_t rss_hash_default_key[] = {
70 0x2c, 0xc6, 0x81, 0xd1,
71 0x5b, 0xdb, 0xf4, 0xf7,
72 0xfc, 0xa2, 0x83, 0x19,
73 0xdb, 0x1a, 0x3e, 0x94,
74 0x6b, 0x9e, 0x38, 0xd9,
75 0x2c, 0x9c, 0x03, 0xd1,
76 0xad, 0x99, 0x44, 0xa7,
77 0xd9, 0x56, 0x3d, 0x59,
78 0x06, 0x3c, 0x25, 0xf3,
79 0xfc, 0x1f, 0xdc, 0x2a,
82 /* Length of the default RSS hash key. */
83 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
86 * Allocate RX queue elements.
89 * Pointer to RX queue structure.
92 * 0 on success, errno value on failure.
95 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
97 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
98 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
102 /* Iterate on segments. */
103 for (i = 0; (i != elts_n); ++i) {
104 struct rte_mbuf *buf;
106 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
108 ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
112 /* Headroom is reserved by rte_pktmbuf_alloc(). */
113 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
114 /* Buffer is supposed to be empty. */
115 assert(rte_pktmbuf_data_len(buf) == 0);
116 assert(rte_pktmbuf_pkt_len(buf) == 0);
118 /* Only the first segment keeps headroom. */
120 SET_DATA_OFF(buf, 0);
121 PORT(buf) = rxq_ctrl->rxq.port_id;
122 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
123 PKT_LEN(buf) = DATA_LEN(buf);
125 (*rxq_ctrl->rxq.elts)[i] = buf;
127 /* If Rx vector is activated. */
128 if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
129 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
130 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
133 /* Initialize default rearm_data for vPMD. */
134 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
135 rte_mbuf_refcnt_set(mbuf_init, 1);
136 mbuf_init->nb_segs = 1;
137 mbuf_init->port = rxq->port_id;
139 * prevent compiler reordering:
140 * rearm_data covers previous fields.
142 rte_compiler_barrier();
143 rxq->mbuf_initializer =
144 *(uint64_t *)&mbuf_init->rearm_data;
145 /* Padding with a fake mbuf for vectorized Rx. */
146 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
147 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
149 DEBUG("%p: allocated and configured %u segments (max %u packets)",
150 (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
155 for (i = 0; (i != elts_n); ++i) {
156 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
157 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
158 (*rxq_ctrl->rxq.elts)[i] = NULL;
160 DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
166 * Free RX queue elements.
169 * Pointer to RX queue structure.
172 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
174 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
175 const uint16_t q_n = (1 << rxq->elts_n);
176 const uint16_t q_mask = q_n - 1;
177 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
180 DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
181 if (rxq->elts == NULL)
184 * Some mbuf in the Ring belongs to the application. They cannot be
187 if (rxq_check_vec_support(rxq) > 0) {
188 for (i = 0; i < used; ++i)
189 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
190 rxq->rq_pi = rxq->rq_ci;
192 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
193 if ((*rxq->elts)[i] != NULL)
194 rte_pktmbuf_free_seg((*rxq->elts)[i]);
195 (*rxq->elts)[i] = NULL;
200 * Clean up a RX queue.
202 * Destroy objects, free allocated memory and reset the structure for reuse.
205 * Pointer to RX queue structure.
208 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
210 DEBUG("cleaning up %p", (void *)rxq_ctrl);
212 mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
213 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
217 * Returns the per-queue supported offloads.
220 * Pointer to private structure.
223 * Supported Rx offloads.
226 mlx5_priv_get_rx_queue_offloads(struct priv *priv)
228 struct mlx5_dev_config *config = &priv->config;
229 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
230 DEV_RX_OFFLOAD_TIMESTAMP |
231 DEV_RX_OFFLOAD_JUMBO_FRAME);
233 if (config->hw_fcs_strip)
234 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
236 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
237 DEV_RX_OFFLOAD_UDP_CKSUM |
238 DEV_RX_OFFLOAD_TCP_CKSUM);
239 if (config->hw_vlan_strip)
240 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
246 * Returns the per-port supported offloads.
249 * Pointer to private structure.
251 * Supported Rx offloads.
254 mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
256 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
262 * Checks if the per-queue offload configuration is valid.
265 * Pointer to private structure.
267 * Per-queue offloads configuration.
270 * 1 if the configuration is valid, 0 otherwise.
273 priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
275 uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
276 uint64_t queue_supp_offloads =
277 mlx5_priv_get_rx_queue_offloads(priv);
278 uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
280 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
283 if (((port_offloads ^ offloads) & port_supp_offloads))
291 * Pointer to Ethernet device structure.
295 * Number of descriptors to configure in queue.
297 * NUMA socket on which memory must be allocated.
299 * Thresholds parameters.
301 * Memory pool for buffer allocations.
304 * 0 on success, negative errno value on failure.
307 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
308 unsigned int socket, const struct rte_eth_rxconf *conf,
309 struct rte_mempool *mp)
311 struct priv *priv = dev->data->dev_private;
312 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
313 struct mlx5_rxq_ctrl *rxq_ctrl =
314 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
318 if (!rte_is_power_of_2(desc)) {
319 desc = 1 << log2above(desc);
320 WARN("%p: increased number of descriptors in RX queue %u"
321 " to the next power of two (%d)",
322 (void *)dev, idx, desc);
324 DEBUG("%p: configuring queue %u for %u descriptors",
325 (void *)dev, idx, desc);
326 if (idx >= priv->rxqs_n) {
327 ERROR("%p: queue index out of range (%u >= %u)",
328 (void *)dev, idx, priv->rxqs_n);
332 if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
334 ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
335 "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
336 (void *)dev, conf->offloads,
337 dev->data->dev_conf.rxmode.offloads,
338 (mlx5_priv_get_rx_port_offloads(priv) |
339 mlx5_priv_get_rx_queue_offloads(priv)));
342 if (!mlx5_priv_rxq_releasable(priv, idx)) {
344 ERROR("%p: unable to release queue index %u",
348 mlx5_priv_rxq_release(priv, idx);
349 rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
351 ERROR("%p: unable to allocate queue index %u",
356 DEBUG("%p: adding RX queue %p to list",
357 (void *)dev, (void *)rxq_ctrl);
358 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
365 * DPDK callback to release a RX queue.
368 * Generic RX queue pointer.
371 mlx5_rx_queue_release(void *dpdk_rxq)
373 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
374 struct mlx5_rxq_ctrl *rxq_ctrl;
379 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
380 priv = rxq_ctrl->priv;
382 if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
383 rte_panic("Rx queue %p is still used by a flow and cannot be"
384 " removed\n", (void *)rxq_ctrl);
385 mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
390 * Allocate queue vector and fill epoll fd list for Rx interrupts.
393 * Pointer to private structure.
396 * 0 on success, negative on failure.
399 priv_rx_intr_vec_enable(struct priv *priv)
402 unsigned int rxqs_n = priv->rxqs_n;
403 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
404 unsigned int count = 0;
405 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
407 if (!priv->dev->data->dev_conf.intr_conf.rxq)
409 priv_rx_intr_vec_disable(priv);
410 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
411 if (intr_handle->intr_vec == NULL) {
412 ERROR("failed to allocate memory for interrupt vector,"
413 " Rx interrupts will not be supported");
416 intr_handle->type = RTE_INTR_HANDLE_EXT;
417 for (i = 0; i != n; ++i) {
418 /* This rxq ibv must not be released in this function. */
419 struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i);
424 /* Skip queues that cannot request interrupts. */
425 if (!rxq_ibv || !rxq_ibv->channel) {
426 /* Use invalid intr_vec[] index to disable entry. */
427 intr_handle->intr_vec[i] =
428 RTE_INTR_VEC_RXTX_OFFSET +
429 RTE_MAX_RXTX_INTR_VEC_ID;
432 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
433 ERROR("too many Rx queues for interrupt vector size"
434 " (%d), Rx interrupts cannot be enabled",
435 RTE_MAX_RXTX_INTR_VEC_ID);
436 priv_rx_intr_vec_disable(priv);
439 fd = rxq_ibv->channel->fd;
440 flags = fcntl(fd, F_GETFL);
441 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
443 ERROR("failed to make Rx interrupt file descriptor"
444 " %d non-blocking for queue index %d", fd, i);
445 priv_rx_intr_vec_disable(priv);
448 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
449 intr_handle->efds[count] = fd;
453 priv_rx_intr_vec_disable(priv);
455 intr_handle->nb_efd = count;
460 * Clean up Rx interrupts handler.
463 * Pointer to private structure.
466 priv_rx_intr_vec_disable(struct priv *priv)
468 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
470 unsigned int rxqs_n = priv->rxqs_n;
471 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
473 if (!priv->dev->data->dev_conf.intr_conf.rxq)
475 if (!intr_handle->intr_vec)
477 for (i = 0; i != n; ++i) {
478 struct mlx5_rxq_ctrl *rxq_ctrl;
479 struct mlx5_rxq_data *rxq_data;
481 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
482 RTE_MAX_RXTX_INTR_VEC_ID)
485 * Need to access directly the queue to release the reference
486 * kept in priv_rx_intr_vec_enable().
488 rxq_data = (*priv->rxqs)[i];
489 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
490 mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
493 rte_intr_free_epoll_fd(intr_handle);
494 if (intr_handle->intr_vec)
495 free(intr_handle->intr_vec);
496 intr_handle->nb_efd = 0;
497 intr_handle->intr_vec = NULL;
501 * MLX5 CQ notification .
504 * Pointer to receive queue structure.
506 * Sequence number per receive queue .
509 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
512 uint32_t doorbell_hi;
514 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
516 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
517 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
518 doorbell = (uint64_t)doorbell_hi << 32;
519 doorbell |= rxq->cqn;
520 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
521 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
525 * DPDK callback for Rx queue interrupt enable.
528 * Pointer to Ethernet device structure.
533 * 0 on success, negative on failure.
536 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
538 struct priv *priv = dev->data->dev_private;
539 struct mlx5_rxq_data *rxq_data;
540 struct mlx5_rxq_ctrl *rxq_ctrl;
544 rxq_data = (*priv->rxqs)[rx_queue_id];
549 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
551 struct mlx5_rxq_ibv *rxq_ibv;
553 rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
558 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
559 mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
564 WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
569 * DPDK callback for Rx queue interrupt disable.
572 * Pointer to Ethernet device structure.
577 * 0 on success, negative on failure.
580 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
582 struct priv *priv = dev->data->dev_private;
583 struct mlx5_rxq_data *rxq_data;
584 struct mlx5_rxq_ctrl *rxq_ctrl;
585 struct mlx5_rxq_ibv *rxq_ibv = NULL;
586 struct ibv_cq *ev_cq;
591 rxq_data = (*priv->rxqs)[rx_queue_id];
596 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
599 rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
604 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
605 if (ret || ev_cq != rxq_ibv->cq) {
609 rxq_data->cq_arm_sn++;
610 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
613 mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
616 WARN("unable to disable interrupt on rx queue %d",
622 * Create the Rx queue Verbs object.
625 * Pointer to private structure.
627 * Queue index in DPDK Rx queue array
630 * The Verbs object initialised if it can be created.
633 mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
635 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
636 struct mlx5_rxq_ctrl *rxq_ctrl =
637 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
638 struct ibv_wq_attr mod;
641 struct ibv_cq_init_attr_ex ibv;
642 struct mlx5dv_cq_init_attr mlx5;
644 struct ibv_wq_init_attr wq;
645 struct ibv_cq_ex cq_attr;
647 unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
648 struct mlx5_rxq_ibv *tmpl;
649 struct mlx5dv_cq cq_info;
650 struct mlx5dv_rwq rwq;
653 struct mlx5dv_obj obj;
654 struct mlx5_dev_config *config = &priv->config;
657 assert(!rxq_ctrl->ibv);
658 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
659 priv->verbs_alloc_ctx.obj = rxq_ctrl;
660 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
663 ERROR("%p: cannot allocate verbs resources",
667 tmpl->rxq_ctrl = rxq_ctrl;
668 /* Use the entire RX mempool as the memory region. */
669 tmpl->mr = priv_mr_get(priv, rxq_data->mp);
671 tmpl->mr = priv_mr_new(priv, rxq_data->mp);
673 ERROR("%p: MR creation failure", (void *)rxq_ctrl);
678 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
679 if (!tmpl->channel) {
680 ERROR("%p: Comp Channel creation failure",
685 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
687 .channel = tmpl->channel,
690 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
693 if (config->cqe_comp && !rxq_data->hw_timestamp) {
694 attr.cq.mlx5.comp_mask |=
695 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
696 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
698 * For vectorized Rx, it must not be doubled in order to
699 * make cq_ci and rq_ci aligned.
701 if (rxq_check_vec_support(rxq_data) < 0)
702 attr.cq.ibv.cqe *= 2;
703 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
704 DEBUG("Rx CQE compression is disabled for HW timestamp");
706 tmpl->cq = mlx5_glue->cq_ex_to_cq
707 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
709 if (tmpl->cq == NULL) {
710 ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
713 DEBUG("priv->device_attr.max_qp_wr is %d",
714 priv->device_attr.orig_attr.max_qp_wr);
715 DEBUG("priv->device_attr.max_sge is %d",
716 priv->device_attr.orig_attr.max_sge);
717 attr.wq = (struct ibv_wq_init_attr){
718 .wq_context = NULL, /* Could be useful in the future. */
719 .wq_type = IBV_WQT_RQ,
720 /* Max number of outstanding WRs. */
721 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
722 /* Max number of scatter/gather elements in a WR. */
723 .max_sge = 1 << rxq_data->sges_n,
727 IBV_WQ_FLAGS_CVLAN_STRIPPING |
729 .create_flags = (rxq_data->vlan_strip ?
730 IBV_WQ_FLAGS_CVLAN_STRIPPING :
733 /* By default, FCS (CRC) is stripped by hardware. */
734 if (rxq_data->crc_present) {
735 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
736 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
738 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
739 if (config->hw_padding) {
740 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
741 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
744 tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
745 if (tmpl->wq == NULL) {
746 ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
750 * Make sure number of WRs*SGEs match expectations since a queue
751 * cannot allocate more than "desc" buffers.
753 if (((int)attr.wq.max_wr !=
754 ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
755 ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
756 ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
758 ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
759 (1 << rxq_data->sges_n),
760 attr.wq.max_wr, attr.wq.max_sge);
763 /* Change queue state to ready. */
764 mod = (struct ibv_wq_attr){
765 .attr_mask = IBV_WQ_ATTR_STATE,
766 .wq_state = IBV_WQS_RDY,
768 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
770 ERROR("%p: WQ state to IBV_WQS_RDY failed",
774 obj.cq.in = tmpl->cq;
775 obj.cq.out = &cq_info;
776 obj.rwq.in = tmpl->wq;
778 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
781 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
782 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
783 "it should be set to %u", RTE_CACHE_LINE_SIZE);
786 /* Fill the rings. */
787 rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
789 for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
790 struct rte_mbuf *buf = (*rxq_data->elts)[i];
791 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
793 /* scat->addr must be able to store a pointer. */
794 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
795 *scat = (struct mlx5_wqe_data_seg){
796 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
798 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
799 .lkey = tmpl->mr->lkey,
802 rxq_data->rq_db = rwq.dbrec;
803 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
807 rxq_data->zip = (struct rxq_zip){
810 rxq_data->cq_db = cq_info.dbrec;
811 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
812 rxq_data->cq_uar = cq_info.cq_uar;
813 rxq_data->cqn = cq_info.cqn;
814 rxq_data->cq_arm_sn = 0;
815 /* Update doorbell counter. */
816 rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
818 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
819 DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
820 rte_atomic32_inc(&tmpl->refcnt);
821 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
822 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
823 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
824 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
828 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
830 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
832 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
834 priv_mr_release(priv, tmpl->mr);
835 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
840 * Get an Rx queue Verbs object.
843 * Pointer to private structure.
845 * Queue index in DPDK Rx queue array
848 * The Verbs object if it exists.
851 mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
853 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
854 struct mlx5_rxq_ctrl *rxq_ctrl;
856 if (idx >= priv->rxqs_n)
860 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
862 priv_mr_get(priv, rxq_data->mp);
863 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
864 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
865 (void *)rxq_ctrl->ibv,
866 rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
868 return rxq_ctrl->ibv;
872 * Release an Rx verbs queue object.
875 * Pointer to private structure.
877 * Verbs Rx queue object.
880 * 0 on success, errno value on failure.
883 mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
891 ret = priv_mr_release(priv, rxq_ibv->mr);
894 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
895 (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
896 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
897 rxq_free_elts(rxq_ibv->rxq_ctrl);
898 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
899 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
900 if (rxq_ibv->channel)
901 claim_zero(mlx5_glue->destroy_comp_channel
903 LIST_REMOVE(rxq_ibv, next);
911 * Verify the Verbs Rx queue list is empty
914 * Pointer to private structure.
916 * @return the number of object not released.
919 mlx5_priv_rxq_ibv_verify(struct priv *priv)
922 struct mlx5_rxq_ibv *rxq_ibv;
924 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
925 DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv,
933 * Return true if a single reference exists on the object.
936 * Pointer to private structure.
938 * Verbs Rx queue object.
941 mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
945 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
949 * Create a DPDK Rx queue.
952 * Pointer to private structure.
956 * Number of descriptors to configure in queue.
958 * NUMA socket on which memory must be allocated.
961 * A DPDK queue object on success.
963 struct mlx5_rxq_ctrl*
964 mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
965 unsigned int socket, const struct rte_eth_rxconf *conf,
966 struct rte_mempool *mp)
968 struct rte_eth_dev *dev = priv->dev;
969 struct mlx5_rxq_ctrl *tmpl;
970 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
971 struct mlx5_dev_config *config = &priv->config;
973 * Always allocate extra slots, even if eventually
974 * the vector Rx will not be used.
976 const uint16_t desc_n =
977 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
979 tmpl = rte_calloc_socket("RXQ", 1,
981 desc_n * sizeof(struct rte_mbuf *),
985 tmpl->socket = socket;
986 if (priv->dev->data->dev_conf.intr_conf.rxq)
988 /* Enable scattered packets support for this queue if necessary. */
989 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
990 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
991 (mb_len - RTE_PKTMBUF_HEADROOM)) {
992 tmpl->rxq.sges_n = 0;
993 } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
995 RTE_PKTMBUF_HEADROOM +
996 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1000 * Determine the number of SGEs needed for a full packet
1001 * and round it to the next power of two.
1003 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1004 tmpl->rxq.sges_n = sges_n;
1005 /* Make sure rxq.sges_n did not overflow. */
1006 size = mb_len * (1 << tmpl->rxq.sges_n);
1007 size -= RTE_PKTMBUF_HEADROOM;
1008 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1009 ERROR("%p: too many SGEs (%u) needed to handle"
1010 " requested maximum packet size %u",
1013 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1017 WARN("%p: the requested maximum Rx packet size (%u) is"
1018 " larger than a single mbuf (%u) and scattered"
1019 " mode has not been requested",
1021 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1022 mb_len - RTE_PKTMBUF_HEADROOM);
1024 DEBUG("%p: maximum number of segments per packet: %u",
1025 (void *)dev, 1 << tmpl->rxq.sges_n);
1026 if (desc % (1 << tmpl->rxq.sges_n)) {
1027 ERROR("%p: number of RX queue descriptors (%u) is not a"
1028 " multiple of SGEs per packet (%u)",
1031 1 << tmpl->rxq.sges_n);
1034 /* Toggle RX checksum offload if hardware supports it. */
1035 tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
1036 tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
1037 priv->config.hw_csum_l2tun);
1038 tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1039 /* Configure VLAN stripping. */
1040 tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1041 /* By default, FCS (CRC) is stripped by hardware. */
1042 if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1043 tmpl->rxq.crc_present = 0;
1044 } else if (config->hw_fcs_strip) {
1045 tmpl->rxq.crc_present = 1;
1047 WARN("%p: CRC stripping has been disabled but will still"
1048 " be performed by hardware, make sure MLNX_OFED and"
1049 " firmware are up to date",
1051 tmpl->rxq.crc_present = 0;
1053 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
1054 " incoming frames to hide it",
1056 tmpl->rxq.crc_present ? "disabled" : "enabled",
1057 tmpl->rxq.crc_present << 2);
1059 tmpl->rxq.rss_hash = priv->rxqs_n > 1;
1060 tmpl->rxq.port_id = dev->data->port_id;
1063 tmpl->rxq.stats.idx = idx;
1064 tmpl->rxq.elts_n = log2above(desc);
1066 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1067 rte_atomic32_inc(&tmpl->refcnt);
1068 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1069 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
1070 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1081 * Pointer to private structure.
1086 * A pointer to the queue if it exists.
1088 struct mlx5_rxq_ctrl*
1089 mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
1091 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1093 if ((*priv->rxqs)[idx]) {
1094 rxq_ctrl = container_of((*priv->rxqs)[idx],
1095 struct mlx5_rxq_ctrl,
1098 mlx5_priv_rxq_ibv_get(priv, idx);
1099 rte_atomic32_inc(&rxq_ctrl->refcnt);
1100 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1101 (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
1107 * Release a Rx queue.
1110 * Pointer to private structure.
1115 * 0 on success, errno value on failure.
1118 mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
1120 struct mlx5_rxq_ctrl *rxq_ctrl;
1122 if (!(*priv->rxqs)[idx])
1124 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1125 assert(rxq_ctrl->priv);
1126 if (rxq_ctrl->ibv) {
1129 ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
1131 rxq_ctrl->ibv = NULL;
1133 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1134 (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
1135 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1136 LIST_REMOVE(rxq_ctrl, next);
1138 (*priv->rxqs)[idx] = NULL;
1145 * Verify if the queue can be released.
1148 * Pointer to private structure.
1153 * 1 if the queue can be released.
1156 mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
1158 struct mlx5_rxq_ctrl *rxq_ctrl;
1160 if (!(*priv->rxqs)[idx])
1162 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1163 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1167 * Verify the Rx Queue list is empty
1170 * Pointer to private structure.
1172 * @return the number of object not released.
1175 mlx5_priv_rxq_verify(struct priv *priv)
1177 struct mlx5_rxq_ctrl *rxq_ctrl;
1180 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1181 DEBUG("%p: Rx Queue %p still referenced", (void *)priv,
1189 * Create an indirection table.
1192 * Pointer to private structure.
1194 * Queues entering in the indirection table.
1196 * Number of queues in the array.
1199 * A new indirection table.
1201 struct mlx5_ind_table_ibv*
1202 mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
1205 struct mlx5_ind_table_ibv *ind_tbl;
1206 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1207 log2above(queues_n) :
1208 log2above(priv->config.ind_table_max_size);
1209 struct ibv_wq *wq[1 << wq_n];
1213 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1214 queues_n * sizeof(uint16_t), 0);
1217 for (i = 0; i != queues_n; ++i) {
1218 struct mlx5_rxq_ctrl *rxq =
1219 mlx5_priv_rxq_get(priv, queues[i]);
1223 wq[i] = rxq->ibv->wq;
1224 ind_tbl->queues[i] = queues[i];
1226 ind_tbl->queues_n = queues_n;
1227 /* Finalise indirection table. */
1228 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1230 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1232 &(struct ibv_rwq_ind_table_init_attr){
1233 .log_ind_tbl_size = wq_n,
1237 if (!ind_tbl->ind_table)
1239 rte_atomic32_inc(&ind_tbl->refcnt);
1240 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1241 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1242 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1246 DEBUG("%p cannot create indirection table", (void *)priv);
1251 * Get an indirection table.
1254 * Pointer to private structure.
1256 * Queues entering in the indirection table.
1258 * Number of queues in the array.
1261 * An indirection table if found.
1263 struct mlx5_ind_table_ibv*
1264 mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
1267 struct mlx5_ind_table_ibv *ind_tbl;
1269 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1270 if ((ind_tbl->queues_n == queues_n) &&
1271 (memcmp(ind_tbl->queues, queues,
1272 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1279 rte_atomic32_inc(&ind_tbl->refcnt);
1280 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1281 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1282 for (i = 0; i != ind_tbl->queues_n; ++i)
1283 mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
1289 * Release an indirection table.
1292 * Pointer to private structure.
1294 * Indirection table to release.
1297 * 0 on success, errno value on failure.
1300 mlx5_priv_ind_table_ibv_release(struct priv *priv,
1301 struct mlx5_ind_table_ibv *ind_tbl)
1305 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1306 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1307 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1308 claim_zero(mlx5_glue->destroy_rwq_ind_table
1309 (ind_tbl->ind_table));
1310 for (i = 0; i != ind_tbl->queues_n; ++i)
1311 claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
1312 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1313 LIST_REMOVE(ind_tbl, next);
1321 * Verify the Rx Queue list is empty
1324 * Pointer to private structure.
1326 * @return the number of object not released.
1329 mlx5_priv_ind_table_ibv_verify(struct priv *priv)
1331 struct mlx5_ind_table_ibv *ind_tbl;
1334 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1335 DEBUG("%p: Verbs indirection table %p still referenced",
1336 (void *)priv, (void *)ind_tbl);
1343 * Create an Rx Hash queue.
1346 * Pointer to private structure.
1348 * RSS key for the Rx hash queue.
1349 * @param rss_key_len
1351 * @param hash_fields
1352 * Verbs protocol hash field to make the RSS on.
1354 * Queues entering in hash queue. In case of empty hash_fields only the
1355 * first queue index will be taken for the indirection table.
1360 * An hash Rx queue on success.
1363 mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
1364 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1366 struct mlx5_hrxq *hrxq;
1367 struct mlx5_ind_table_ibv *ind_tbl;
1370 queues_n = hash_fields ? queues_n : 1;
1371 ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
1373 ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
1376 qp = mlx5_glue->create_qp_ex
1378 &(struct ibv_qp_init_attr_ex){
1379 .qp_type = IBV_QPT_RAW_PACKET,
1381 IBV_QP_INIT_ATTR_PD |
1382 IBV_QP_INIT_ATTR_IND_TABLE |
1383 IBV_QP_INIT_ATTR_RX_HASH,
1384 .rx_hash_conf = (struct ibv_rx_hash_conf){
1385 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1386 .rx_hash_key_len = rss_key_len,
1387 .rx_hash_key = rss_key,
1388 .rx_hash_fields_mask = hash_fields,
1390 .rwq_ind_tbl = ind_tbl->ind_table,
1395 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1398 hrxq->ind_table = ind_tbl;
1400 hrxq->rss_key_len = rss_key_len;
1401 hrxq->hash_fields = hash_fields;
1402 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1403 rte_atomic32_inc(&hrxq->refcnt);
1404 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1405 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1406 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1409 mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
1411 claim_zero(mlx5_glue->destroy_qp(qp));
1416 * Get an Rx Hash queue.
1419 * Pointer to private structure.
1421 * RSS configuration for the Rx hash queue.
1423 * Queues entering in hash queue. In case of empty hash_fields only the
1424 * first queue index will be taken for the indirection table.
1429 * An hash Rx queue on success.
1432 mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
1433 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1435 struct mlx5_hrxq *hrxq;
1437 queues_n = hash_fields ? queues_n : 1;
1438 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1439 struct mlx5_ind_table_ibv *ind_tbl;
1441 if (hrxq->rss_key_len != rss_key_len)
1443 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1445 if (hrxq->hash_fields != hash_fields)
1447 ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
1450 if (ind_tbl != hrxq->ind_table) {
1451 mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
1454 rte_atomic32_inc(&hrxq->refcnt);
1455 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1456 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1463 * Release the hash Rx queue.
1466 * Pointer to private structure.
1468 * Pointer to Hash Rx queue to release.
1471 * 0 on success, errno value on failure.
1474 mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
1476 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1477 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1478 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1479 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1480 mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
1481 LIST_REMOVE(hrxq, next);
1485 claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
1490 * Verify the Rx Queue list is empty
1493 * Pointer to private structure.
1495 * @return the number of object not released.
1498 mlx5_priv_hrxq_ibv_verify(struct priv *priv)
1500 struct mlx5_hrxq *hrxq;
1503 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1504 DEBUG("%p: Verbs Hash Rx queue %p still referenced",
1505 (void *)priv, (void *)hrxq);