4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
48 #include <infiniband/mlx5dv.h>
50 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_malloc.h>
55 #include <rte_ethdev.h>
56 #include <rte_common.h>
57 #include <rte_interrupts.h>
58 #include <rte_debug.h>
62 #include "mlx5_rxtx.h"
63 #include "mlx5_utils.h"
64 #include "mlx5_autoconf.h"
65 #include "mlx5_defs.h"
67 /* Default RSS hash key also used for ConnectX-3. */
68 uint8_t rss_hash_default_key[] = {
69 0x2c, 0xc6, 0x81, 0xd1,
70 0x5b, 0xdb, 0xf4, 0xf7,
71 0xfc, 0xa2, 0x83, 0x19,
72 0xdb, 0x1a, 0x3e, 0x94,
73 0x6b, 0x9e, 0x38, 0xd9,
74 0x2c, 0x9c, 0x03, 0xd1,
75 0xad, 0x99, 0x44, 0xa7,
76 0xd9, 0x56, 0x3d, 0x59,
77 0x06, 0x3c, 0x25, 0xf3,
78 0xfc, 0x1f, 0xdc, 0x2a,
81 /* Length of the default RSS hash key. */
82 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
85 * Allocate RX queue elements.
88 * Pointer to RX queue structure.
91 * 0 on success, errno value on failure.
94 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
96 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
97 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
101 /* Iterate on segments. */
102 for (i = 0; (i != elts_n); ++i) {
103 struct rte_mbuf *buf;
105 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
107 ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
111 /* Headroom is reserved by rte_pktmbuf_alloc(). */
112 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
113 /* Buffer is supposed to be empty. */
114 assert(rte_pktmbuf_data_len(buf) == 0);
115 assert(rte_pktmbuf_pkt_len(buf) == 0);
117 /* Only the first segment keeps headroom. */
119 SET_DATA_OFF(buf, 0);
120 PORT(buf) = rxq_ctrl->rxq.port_id;
121 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
122 PKT_LEN(buf) = DATA_LEN(buf);
124 (*rxq_ctrl->rxq.elts)[i] = buf;
126 /* If Rx vector is activated. */
127 if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
128 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
129 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
132 /* Initialize default rearm_data for vPMD. */
133 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
134 rte_mbuf_refcnt_set(mbuf_init, 1);
135 mbuf_init->nb_segs = 1;
136 mbuf_init->port = rxq->port_id;
138 * prevent compiler reordering:
139 * rearm_data covers previous fields.
141 rte_compiler_barrier();
142 rxq->mbuf_initializer =
143 *(uint64_t *)&mbuf_init->rearm_data;
144 /* Padding with a fake mbuf for vectorized Rx. */
145 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
146 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
148 DEBUG("%p: allocated and configured %u segments (max %u packets)",
149 (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
154 for (i = 0; (i != elts_n); ++i) {
155 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
156 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
157 (*rxq_ctrl->rxq.elts)[i] = NULL;
159 DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
165 * Free RX queue elements.
168 * Pointer to RX queue structure.
171 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
173 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
174 const uint16_t q_n = (1 << rxq->elts_n);
175 const uint16_t q_mask = q_n - 1;
176 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
179 DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
180 if (rxq->elts == NULL)
183 * Some mbuf in the Ring belongs to the application. They cannot be
186 if (rxq_check_vec_support(rxq) > 0) {
187 for (i = 0; i < used; ++i)
188 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
189 rxq->rq_pi = rxq->rq_ci;
191 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
192 if ((*rxq->elts)[i] != NULL)
193 rte_pktmbuf_free_seg((*rxq->elts)[i]);
194 (*rxq->elts)[i] = NULL;
199 * Clean up a RX queue.
201 * Destroy objects, free allocated memory and reset the structure for reuse.
204 * Pointer to RX queue structure.
207 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
209 DEBUG("cleaning up %p", (void *)rxq_ctrl);
211 mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
212 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
218 * Pointer to Ethernet device structure.
222 * Number of descriptors to configure in queue.
224 * NUMA socket on which memory must be allocated.
226 * Thresholds parameters.
228 * Memory pool for buffer allocations.
231 * 0 on success, negative errno value on failure.
234 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
235 unsigned int socket, const struct rte_eth_rxconf *conf,
236 struct rte_mempool *mp)
238 struct priv *priv = dev->data->dev_private;
239 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
240 struct mlx5_rxq_ctrl *rxq_ctrl =
241 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
246 if (!rte_is_power_of_2(desc)) {
247 desc = 1 << log2above(desc);
248 WARN("%p: increased number of descriptors in RX queue %u"
249 " to the next power of two (%d)",
250 (void *)dev, idx, desc);
252 DEBUG("%p: configuring queue %u for %u descriptors",
253 (void *)dev, idx, desc);
254 if (idx >= priv->rxqs_n) {
255 ERROR("%p: queue index out of range (%u >= %u)",
256 (void *)dev, idx, priv->rxqs_n);
260 if (!mlx5_priv_rxq_releasable(priv, idx)) {
262 ERROR("%p: unable to release queue index %u",
266 mlx5_priv_rxq_release(priv, idx);
267 rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);
269 ERROR("%p: unable to allocate queue index %u",
274 DEBUG("%p: adding RX queue %p to list",
275 (void *)dev, (void *)rxq_ctrl);
276 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
283 * DPDK callback to release a RX queue.
286 * Generic RX queue pointer.
289 mlx5_rx_queue_release(void *dpdk_rxq)
291 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
292 struct mlx5_rxq_ctrl *rxq_ctrl;
297 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
298 priv = rxq_ctrl->priv;
300 if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
301 rte_panic("Rx queue %p is still used by a flow and cannot be"
302 " removed\n", (void *)rxq_ctrl);
303 mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
308 * Allocate queue vector and fill epoll fd list for Rx interrupts.
311 * Pointer to private structure.
314 * 0 on success, negative on failure.
317 priv_rx_intr_vec_enable(struct priv *priv)
320 unsigned int rxqs_n = priv->rxqs_n;
321 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
322 unsigned int count = 0;
323 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
325 if (!priv->dev->data->dev_conf.intr_conf.rxq)
327 priv_rx_intr_vec_disable(priv);
328 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
329 if (intr_handle->intr_vec == NULL) {
330 ERROR("failed to allocate memory for interrupt vector,"
331 " Rx interrupts will not be supported");
334 intr_handle->type = RTE_INTR_HANDLE_EXT;
335 for (i = 0; i != n; ++i) {
336 /* This rxq ibv must not be released in this function. */
337 struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i);
342 /* Skip queues that cannot request interrupts. */
343 if (!rxq_ibv || !rxq_ibv->channel) {
344 /* Use invalid intr_vec[] index to disable entry. */
345 intr_handle->intr_vec[i] =
346 RTE_INTR_VEC_RXTX_OFFSET +
347 RTE_MAX_RXTX_INTR_VEC_ID;
350 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
351 ERROR("too many Rx queues for interrupt vector size"
352 " (%d), Rx interrupts cannot be enabled",
353 RTE_MAX_RXTX_INTR_VEC_ID);
354 priv_rx_intr_vec_disable(priv);
357 fd = rxq_ibv->channel->fd;
358 flags = fcntl(fd, F_GETFL);
359 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
361 ERROR("failed to make Rx interrupt file descriptor"
362 " %d non-blocking for queue index %d", fd, i);
363 priv_rx_intr_vec_disable(priv);
366 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
367 intr_handle->efds[count] = fd;
371 priv_rx_intr_vec_disable(priv);
373 intr_handle->nb_efd = count;
378 * Clean up Rx interrupts handler.
381 * Pointer to private structure.
384 priv_rx_intr_vec_disable(struct priv *priv)
386 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
388 unsigned int rxqs_n = priv->rxqs_n;
389 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
391 if (!priv->dev->data->dev_conf.intr_conf.rxq)
393 if (!intr_handle->intr_vec)
395 for (i = 0; i != n; ++i) {
396 struct mlx5_rxq_ctrl *rxq_ctrl;
397 struct mlx5_rxq_data *rxq_data;
399 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
400 RTE_MAX_RXTX_INTR_VEC_ID)
403 * Need to access directly the queue to release the reference
404 * kept in priv_rx_intr_vec_enable().
406 rxq_data = (*priv->rxqs)[i];
407 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
408 mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
411 rte_intr_free_epoll_fd(intr_handle);
412 if (intr_handle->intr_vec)
413 free(intr_handle->intr_vec);
414 intr_handle->nb_efd = 0;
415 intr_handle->intr_vec = NULL;
419 * MLX5 CQ notification .
422 * Pointer to receive queue structure.
424 * Sequence number per receive queue .
427 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
430 uint32_t doorbell_hi;
432 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
434 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
435 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
436 doorbell = (uint64_t)doorbell_hi << 32;
437 doorbell |= rxq->cqn;
438 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
440 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
444 * DPDK callback for Rx queue interrupt enable.
447 * Pointer to Ethernet device structure.
452 * 0 on success, negative on failure.
455 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
457 struct priv *priv = dev->data->dev_private;
458 struct mlx5_rxq_data *rxq_data;
459 struct mlx5_rxq_ctrl *rxq_ctrl;
463 rxq_data = (*priv->rxqs)[rx_queue_id];
468 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
470 struct mlx5_rxq_ibv *rxq_ibv;
472 rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
477 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
478 mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
483 WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
488 * DPDK callback for Rx queue interrupt disable.
491 * Pointer to Ethernet device structure.
496 * 0 on success, negative on failure.
499 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
501 struct priv *priv = dev->data->dev_private;
502 struct mlx5_rxq_data *rxq_data;
503 struct mlx5_rxq_ctrl *rxq_ctrl;
504 struct mlx5_rxq_ibv *rxq_ibv = NULL;
505 struct ibv_cq *ev_cq;
510 rxq_data = (*priv->rxqs)[rx_queue_id];
515 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
518 rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
523 ret = ibv_get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
524 if (ret || ev_cq != rxq_ibv->cq) {
528 rxq_data->cq_arm_sn++;
529 ibv_ack_cq_events(rxq_ibv->cq, 1);
532 mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
535 WARN("unable to disable interrupt on rx queue %d",
541 * Create the Rx queue Verbs object.
544 * Pointer to private structure.
546 * Queue index in DPDK Rx queue array
549 * The Verbs object initialised if it can be created.
552 mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
554 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
555 struct mlx5_rxq_ctrl *rxq_ctrl =
556 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
557 struct ibv_wq_attr mod;
560 struct ibv_cq_init_attr_ex ibv;
561 struct mlx5dv_cq_init_attr mlx5;
563 struct ibv_wq_init_attr wq;
564 struct ibv_cq_ex cq_attr;
566 unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
567 struct mlx5_rxq_ibv *tmpl;
568 struct mlx5dv_cq cq_info;
569 struct mlx5dv_rwq rwq;
572 struct mlx5dv_obj obj;
575 assert(!rxq_ctrl->ibv);
576 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
579 ERROR("%p: cannot allocate verbs resources",
583 tmpl->rxq_ctrl = rxq_ctrl;
584 /* Use the entire RX mempool as the memory region. */
585 tmpl->mr = priv_mr_get(priv, rxq_data->mp);
587 tmpl->mr = priv_mr_new(priv, rxq_data->mp);
589 ERROR("%p: MR creation failure", (void *)rxq_ctrl);
594 tmpl->channel = ibv_create_comp_channel(priv->ctx);
595 if (!tmpl->channel) {
596 ERROR("%p: Comp Channel creation failure",
601 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
603 .channel = tmpl->channel,
606 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
609 if (priv->cqe_comp && !rxq_data->hw_timestamp) {
610 attr.cq.mlx5.comp_mask |=
611 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
612 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
614 * For vectorized Rx, it must not be doubled in order to
615 * make cq_ci and rq_ci aligned.
617 if (rxq_check_vec_support(rxq_data) < 0)
618 attr.cq.ibv.cqe *= 2;
619 } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
620 DEBUG("Rx CQE compression is disabled for HW timestamp");
622 tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
624 if (tmpl->cq == NULL) {
625 ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
628 DEBUG("priv->device_attr.max_qp_wr is %d",
629 priv->device_attr.orig_attr.max_qp_wr);
630 DEBUG("priv->device_attr.max_sge is %d",
631 priv->device_attr.orig_attr.max_sge);
632 attr.wq = (struct ibv_wq_init_attr){
633 .wq_context = NULL, /* Could be useful in the future. */
634 .wq_type = IBV_WQT_RQ,
635 /* Max number of outstanding WRs. */
636 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
637 /* Max number of scatter/gather elements in a WR. */
638 .max_sge = 1 << rxq_data->sges_n,
642 IBV_WQ_FLAGS_CVLAN_STRIPPING |
644 .create_flags = (rxq_data->vlan_strip ?
645 IBV_WQ_FLAGS_CVLAN_STRIPPING :
648 /* By default, FCS (CRC) is stripped by hardware. */
649 if (rxq_data->crc_present) {
650 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
651 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
653 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
654 if (priv->hw_padding) {
655 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
656 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
659 tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
660 if (tmpl->wq == NULL) {
661 ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
665 * Make sure number of WRs*SGEs match expectations since a queue
666 * cannot allocate more than "desc" buffers.
668 if (((int)attr.wq.max_wr !=
669 ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
670 ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
671 ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
673 ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
674 (1 << rxq_data->sges_n),
675 attr.wq.max_wr, attr.wq.max_sge);
678 /* Change queue state to ready. */
679 mod = (struct ibv_wq_attr){
680 .attr_mask = IBV_WQ_ATTR_STATE,
681 .wq_state = IBV_WQS_RDY,
683 ret = ibv_modify_wq(tmpl->wq, &mod);
685 ERROR("%p: WQ state to IBV_WQS_RDY failed",
689 obj.cq.in = tmpl->cq;
690 obj.cq.out = &cq_info;
691 obj.rwq.in = tmpl->wq;
693 ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
696 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
697 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
698 "it should be set to %u", RTE_CACHE_LINE_SIZE);
701 /* Fill the rings. */
702 rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
704 for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
705 struct rte_mbuf *buf = (*rxq_data->elts)[i];
706 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
708 /* scat->addr must be able to store a pointer. */
709 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
710 *scat = (struct mlx5_wqe_data_seg){
711 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
713 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
714 .lkey = tmpl->mr->lkey,
717 rxq_data->rq_db = rwq.dbrec;
718 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
722 rxq_data->zip = (struct rxq_zip){
725 rxq_data->cq_db = cq_info.dbrec;
726 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
727 rxq_data->cq_uar = cq_info.cq_uar;
728 rxq_data->cqn = cq_info.cqn;
729 rxq_data->cq_arm_sn = 0;
730 /* Update doorbell counter. */
731 rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
733 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
734 DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
735 rte_atomic32_inc(&tmpl->refcnt);
736 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
737 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
738 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
742 claim_zero(ibv_destroy_wq(tmpl->wq));
744 claim_zero(ibv_destroy_cq(tmpl->cq));
746 claim_zero(ibv_destroy_comp_channel(tmpl->channel));
748 priv_mr_release(priv, tmpl->mr);
753 * Get an Rx queue Verbs object.
756 * Pointer to private structure.
758 * Queue index in DPDK Rx queue array
761 * The Verbs object if it exists.
764 mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
766 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
767 struct mlx5_rxq_ctrl *rxq_ctrl;
769 if (idx >= priv->rxqs_n)
773 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
775 priv_mr_get(priv, rxq_data->mp);
776 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
777 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
778 (void *)rxq_ctrl->ibv,
779 rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
781 return rxq_ctrl->ibv;
785 * Release an Rx verbs queue object.
788 * Pointer to private structure.
790 * Verbs Rx queue object.
793 * 0 on success, errno value on failure.
796 mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
804 ret = priv_mr_release(priv, rxq_ibv->mr);
807 DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
808 (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
809 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
810 rxq_free_elts(rxq_ibv->rxq_ctrl);
811 claim_zero(ibv_destroy_wq(rxq_ibv->wq));
812 claim_zero(ibv_destroy_cq(rxq_ibv->cq));
813 if (rxq_ibv->channel)
814 claim_zero(ibv_destroy_comp_channel(rxq_ibv->channel));
815 LIST_REMOVE(rxq_ibv, next);
823 * Verify the Verbs Rx queue list is empty
826 * Pointer to private structure.
828 * @return the number of object not released.
831 mlx5_priv_rxq_ibv_verify(struct priv *priv)
834 struct mlx5_rxq_ibv *rxq_ibv;
836 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
837 DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv,
845 * Return true if a single reference exists on the object.
848 * Pointer to private structure.
850 * Verbs Rx queue object.
853 mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
857 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
861 * Create a DPDK Rx queue.
864 * Pointer to private structure.
868 * Number of descriptors to configure in queue.
870 * NUMA socket on which memory must be allocated.
873 * A DPDK queue object on success.
875 struct mlx5_rxq_ctrl*
876 mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
877 unsigned int socket, struct rte_mempool *mp)
879 struct rte_eth_dev *dev = priv->dev;
880 struct mlx5_rxq_ctrl *tmpl;
881 const uint16_t desc_n =
882 desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
883 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
885 tmpl = rte_calloc_socket("RXQ", 1,
887 desc_n * sizeof(struct rte_mbuf *),
891 tmpl->socket = socket;
892 if (priv->dev->data->dev_conf.intr_conf.rxq)
894 /* Enable scattered packets support for this queue if necessary. */
895 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
896 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
897 (mb_len - RTE_PKTMBUF_HEADROOM)) {
898 tmpl->rxq.sges_n = 0;
899 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
901 RTE_PKTMBUF_HEADROOM +
902 dev->data->dev_conf.rxmode.max_rx_pkt_len;
906 * Determine the number of SGEs needed for a full packet
907 * and round it to the next power of two.
909 sges_n = log2above((size / mb_len) + !!(size % mb_len));
910 tmpl->rxq.sges_n = sges_n;
911 /* Make sure rxq.sges_n did not overflow. */
912 size = mb_len * (1 << tmpl->rxq.sges_n);
913 size -= RTE_PKTMBUF_HEADROOM;
914 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
915 ERROR("%p: too many SGEs (%u) needed to handle"
916 " requested maximum packet size %u",
919 dev->data->dev_conf.rxmode.max_rx_pkt_len);
923 WARN("%p: the requested maximum Rx packet size (%u) is"
924 " larger than a single mbuf (%u) and scattered"
925 " mode has not been requested",
927 dev->data->dev_conf.rxmode.max_rx_pkt_len,
928 mb_len - RTE_PKTMBUF_HEADROOM);
930 DEBUG("%p: maximum number of segments per packet: %u",
931 (void *)dev, 1 << tmpl->rxq.sges_n);
932 if (desc % (1 << tmpl->rxq.sges_n)) {
933 ERROR("%p: number of RX queue descriptors (%u) is not a"
934 " multiple of SGEs per packet (%u)",
937 1 << tmpl->rxq.sges_n);
940 /* Toggle RX checksum offload if hardware supports it. */
942 tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
943 if (priv->hw_csum_l2tun)
944 tmpl->rxq.csum_l2tun =
945 !!dev->data->dev_conf.rxmode.hw_ip_checksum;
946 tmpl->rxq.hw_timestamp =
947 !!dev->data->dev_conf.rxmode.hw_timestamp;
948 /* Configure VLAN stripping. */
949 tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
950 !!dev->data->dev_conf.rxmode.hw_vlan_strip);
951 /* By default, FCS (CRC) is stripped by hardware. */
952 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
953 tmpl->rxq.crc_present = 0;
954 } else if (priv->hw_fcs_strip) {
955 tmpl->rxq.crc_present = 1;
957 WARN("%p: CRC stripping has been disabled but will still"
958 " be performed by hardware, make sure MLNX_OFED and"
959 " firmware are up to date",
961 tmpl->rxq.crc_present = 0;
963 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
964 " incoming frames to hide it",
966 tmpl->rxq.crc_present ? "disabled" : "enabled",
967 tmpl->rxq.crc_present << 2);
969 tmpl->rxq.rss_hash = priv->rxqs_n > 1;
970 tmpl->rxq.port_id = dev->data->port_id;
973 tmpl->rxq.stats.idx = idx;
974 tmpl->rxq.elts_n = log2above(desc);
976 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
977 rte_atomic32_inc(&tmpl->refcnt);
978 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
979 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
980 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
991 * Pointer to private structure.
996 * A pointer to the queue if it exists.
998 struct mlx5_rxq_ctrl*
999 mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
1001 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1003 if ((*priv->rxqs)[idx]) {
1004 rxq_ctrl = container_of((*priv->rxqs)[idx],
1005 struct mlx5_rxq_ctrl,
1008 mlx5_priv_rxq_ibv_get(priv, idx);
1009 rte_atomic32_inc(&rxq_ctrl->refcnt);
1010 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1011 (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
1017 * Release a Rx queue.
1020 * Pointer to private structure.
1025 * 0 on success, errno value on failure.
1028 mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
1030 struct mlx5_rxq_ctrl *rxq_ctrl;
1032 if (!(*priv->rxqs)[idx])
1034 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1035 assert(rxq_ctrl->priv);
1036 if (rxq_ctrl->ibv) {
1039 ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
1041 rxq_ctrl->ibv = NULL;
1043 DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
1044 (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
1045 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1046 LIST_REMOVE(rxq_ctrl, next);
1048 (*priv->rxqs)[idx] = NULL;
1055 * Verify if the queue can be released.
1058 * Pointer to private structure.
1063 * 1 if the queue can be released.
1066 mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
1068 struct mlx5_rxq_ctrl *rxq_ctrl;
1070 if (!(*priv->rxqs)[idx])
1072 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1073 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1077 * Verify the Rx Queue list is empty
1080 * Pointer to private structure.
1082 * @return the number of object not released.
1085 mlx5_priv_rxq_verify(struct priv *priv)
1087 struct mlx5_rxq_ctrl *rxq_ctrl;
1090 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1091 DEBUG("%p: Rx Queue %p still referenced", (void *)priv,
1099 * Create an indirection table.
1102 * Pointer to private structure.
1104 * Queues entering in the indirection table.
1106 * Number of queues in the array.
1109 * A new indirection table.
1111 struct mlx5_ind_table_ibv*
1112 mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
1115 struct mlx5_ind_table_ibv *ind_tbl;
1116 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1117 log2above(queues_n) :
1118 log2above(priv->ind_table_max_size);
1119 struct ibv_wq *wq[1 << wq_n];
1123 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1124 queues_n * sizeof(uint16_t), 0);
1127 for (i = 0; i != queues_n; ++i) {
1128 struct mlx5_rxq_ctrl *rxq =
1129 mlx5_priv_rxq_get(priv, queues[i]);
1133 wq[i] = rxq->ibv->wq;
1134 ind_tbl->queues[i] = queues[i];
1136 ind_tbl->queues_n = queues_n;
1137 /* Finalise indirection table. */
1138 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1140 ind_tbl->ind_table = ibv_create_rwq_ind_table(
1142 &(struct ibv_rwq_ind_table_init_attr){
1143 .log_ind_tbl_size = wq_n,
1147 if (!ind_tbl->ind_table)
1149 rte_atomic32_inc(&ind_tbl->refcnt);
1150 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1151 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1152 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1156 DEBUG("%p cannot create indirection table", (void *)priv);
1161 * Get an indirection table.
1164 * Pointer to private structure.
1166 * Queues entering in the indirection table.
1168 * Number of queues in the array.
1171 * An indirection table if found.
1173 struct mlx5_ind_table_ibv*
1174 mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
1177 struct mlx5_ind_table_ibv *ind_tbl;
1179 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1180 if ((ind_tbl->queues_n == queues_n) &&
1181 (memcmp(ind_tbl->queues, queues,
1182 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1189 rte_atomic32_inc(&ind_tbl->refcnt);
1190 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1191 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1192 for (i = 0; i != ind_tbl->queues_n; ++i)
1193 mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
1199 * Release an indirection table.
1202 * Pointer to private structure.
1204 * Indirection table to release.
1207 * 0 on success, errno value on failure.
1210 mlx5_priv_ind_table_ibv_release(struct priv *priv,
1211 struct mlx5_ind_table_ibv *ind_tbl)
1215 DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
1216 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1217 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1218 claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
1219 for (i = 0; i != ind_tbl->queues_n; ++i)
1220 claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
1221 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1222 LIST_REMOVE(ind_tbl, next);
1230 * Verify the Rx Queue list is empty
1233 * Pointer to private structure.
1235 * @return the number of object not released.
1238 mlx5_priv_ind_table_ibv_verify(struct priv *priv)
1240 struct mlx5_ind_table_ibv *ind_tbl;
1243 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1244 DEBUG("%p: Verbs indirection table %p still referenced",
1245 (void *)priv, (void *)ind_tbl);
1252 * Create an Rx Hash queue.
1255 * Pointer to private structure.
1257 * RSS key for the Rx hash queue.
1258 * @param rss_key_len
1260 * @param hash_fields
1261 * Verbs protocol hash field to make the RSS on.
1263 * Queues entering in hash queue. In case of empty hash_fields only the
1264 * first queue index will be taken for the indirection table.
1269 * An hash Rx queue on success.
1272 mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
1273 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1275 struct mlx5_hrxq *hrxq;
1276 struct mlx5_ind_table_ibv *ind_tbl;
1279 queues_n = hash_fields ? queues_n : 1;
1280 ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
1282 ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
1285 qp = ibv_create_qp_ex(
1287 &(struct ibv_qp_init_attr_ex){
1288 .qp_type = IBV_QPT_RAW_PACKET,
1290 IBV_QP_INIT_ATTR_PD |
1291 IBV_QP_INIT_ATTR_IND_TABLE |
1292 IBV_QP_INIT_ATTR_RX_HASH,
1293 .rx_hash_conf = (struct ibv_rx_hash_conf){
1294 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1295 .rx_hash_key_len = rss_key_len,
1296 .rx_hash_key = rss_key,
1297 .rx_hash_fields_mask = hash_fields,
1299 .rwq_ind_tbl = ind_tbl->ind_table,
1304 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1307 hrxq->ind_table = ind_tbl;
1309 hrxq->rss_key_len = rss_key_len;
1310 hrxq->hash_fields = hash_fields;
1311 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1312 rte_atomic32_inc(&hrxq->refcnt);
1313 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1314 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1315 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1318 mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
1320 claim_zero(ibv_destroy_qp(qp));
1325 * Get an Rx Hash queue.
1328 * Pointer to private structure.
1330 * RSS configuration for the Rx hash queue.
1332 * Queues entering in hash queue. In case of empty hash_fields only the
1333 * first queue index will be taken for the indirection table.
1338 * An hash Rx queue on success.
1341 mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
1342 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1344 struct mlx5_hrxq *hrxq;
1346 queues_n = hash_fields ? queues_n : 1;
1347 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1348 struct mlx5_ind_table_ibv *ind_tbl;
1350 if (hrxq->rss_key_len != rss_key_len)
1352 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1354 if (hrxq->hash_fields != hash_fields)
1356 ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
1359 if (ind_tbl != hrxq->ind_table) {
1360 mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
1363 rte_atomic32_inc(&hrxq->refcnt);
1364 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1365 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1372 * Release the hash Rx queue.
1375 * Pointer to private structure.
1377 * Pointer to Hash Rx queue to release.
1380 * 0 on success, errno value on failure.
1383 mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
1385 DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
1386 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1387 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1388 claim_zero(ibv_destroy_qp(hrxq->qp));
1389 mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
1390 LIST_REMOVE(hrxq, next);
1394 claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
1399 * Verify the Rx Queue list is empty
1402 * Pointer to private structure.
1404 * @return the number of object not released.
1407 mlx5_priv_hrxq_ibv_verify(struct priv *priv)
1409 struct mlx5_hrxq *hrxq;
1412 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1413 DEBUG("%p: Verbs Hash Rx queue %p still referenced",
1414 (void *)priv, (void *)hrxq);