4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-pedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-pedantic"
50 /* DPDK headers don't like -pedantic. */
52 #pragma GCC diagnostic ignored "-pedantic"
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
59 #pragma GCC diagnostic error "-pedantic"
63 #include "mlx5_rxtx.h"
64 #include "mlx5_utils.h"
65 #include "mlx5_defs.h"
68 * Allocate RX queue elements with scattered packets support.
71 * Pointer to RX queue structure.
73 * Number of elements to allocate.
75 * If not NULL, fetch buffers from this array instead of allocating them
76 * with rte_pktmbuf_alloc().
79 * 0 on success, errno value on failure.
82 rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n,
83 struct rte_mbuf **pool)
86 struct rxq_elt_sp (*elts)[elts_n] =
87 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
92 ERROR("%p: can't allocate packets array", (void *)rxq);
96 /* For each WR (packet). */
97 for (i = 0; (i != elts_n); ++i) {
99 struct rxq_elt_sp *elt = &(*elts)[i];
100 struct ibv_recv_wr *wr = &elt->wr;
101 struct ibv_sge (*sges)[RTE_DIM(elt->sges)] = &elt->sges;
103 /* These two arrays must have the same size. */
104 assert(RTE_DIM(elt->sges) == RTE_DIM(elt->bufs));
107 wr->next = &(*elts)[(i + 1)].wr;
108 wr->sg_list = &(*sges)[0];
109 wr->num_sge = RTE_DIM(*sges);
110 /* For each SGE (segment). */
111 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
112 struct ibv_sge *sge = &(*sges)[j];
113 struct rte_mbuf *buf;
118 rte_pktmbuf_reset(buf);
120 buf = rte_pktmbuf_alloc(rxq->mp);
122 assert(pool == NULL);
123 ERROR("%p: empty mbuf pool", (void *)rxq);
128 /* Headroom is reserved by rte_pktmbuf_alloc(). */
129 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
130 /* Buffer is supposed to be empty. */
131 assert(rte_pktmbuf_data_len(buf) == 0);
132 assert(rte_pktmbuf_pkt_len(buf) == 0);
133 /* sge->addr must be able to store a pointer. */
134 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
136 /* The first SGE keeps its headroom. */
137 sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
138 sge->length = (buf->buf_len -
139 RTE_PKTMBUF_HEADROOM);
141 /* Subsequent SGEs lose theirs. */
142 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
143 SET_DATA_OFF(buf, 0);
144 sge->addr = (uintptr_t)buf->buf_addr;
145 sge->length = buf->buf_len;
147 sge->lkey = rxq->mr->lkey;
148 /* Redundant check for tailroom. */
149 assert(sge->length == rte_pktmbuf_tailroom(buf));
152 /* The last WR pointer must be NULL. */
153 (*elts)[(i - 1)].wr.next = NULL;
154 DEBUG("%p: allocated and configured %u WRs (%zu segments)",
155 (void *)rxq, elts_n, (elts_n * RTE_DIM((*elts)[0].sges)));
156 rxq->elts_n = elts_n;
163 assert(pool == NULL);
164 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
166 struct rxq_elt_sp *elt = &(*elts)[i];
168 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
169 struct rte_mbuf *buf = elt->bufs[j];
172 rte_pktmbuf_free_seg(buf);
177 DEBUG("%p: failed, freed everything", (void *)rxq);
183 * Free RX queue elements with scattered packets support.
186 * Pointer to RX queue structure.
189 rxq_free_elts_sp(struct rxq *rxq)
192 unsigned int elts_n = rxq->elts_n;
193 struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp;
195 DEBUG("%p: freeing WRs", (void *)rxq);
200 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
202 struct rxq_elt_sp *elt = &(*elts)[i];
204 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
205 struct rte_mbuf *buf = elt->bufs[j];
208 rte_pktmbuf_free_seg(buf);
215 * Allocate RX queue elements.
218 * Pointer to RX queue structure.
220 * Number of elements to allocate.
222 * If not NULL, fetch buffers from this array instead of allocating them
223 * with rte_pktmbuf_alloc().
226 * 0 on success, errno value on failure.
229 rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool)
232 struct rxq_elt (*elts)[elts_n] =
233 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
238 ERROR("%p: can't allocate packets array", (void *)rxq);
242 /* For each WR (packet). */
243 for (i = 0; (i != elts_n); ++i) {
244 struct rxq_elt *elt = &(*elts)[i];
245 struct ibv_recv_wr *wr = &elt->wr;
246 struct ibv_sge *sge = &(*elts)[i].sge;
247 struct rte_mbuf *buf;
252 rte_pktmbuf_reset(buf);
254 buf = rte_pktmbuf_alloc(rxq->mp);
256 assert(pool == NULL);
257 ERROR("%p: empty mbuf pool", (void *)rxq);
261 /* Configure WR. Work request ID contains its own index in
262 * the elts array and the offset between SGE buffer header and
264 WR_ID(wr->wr_id).id = i;
265 WR_ID(wr->wr_id).offset =
266 (((uintptr_t)buf->buf_addr + RTE_PKTMBUF_HEADROOM) -
268 wr->next = &(*elts)[(i + 1)].wr;
271 /* Headroom is reserved by rte_pktmbuf_alloc(). */
272 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
273 /* Buffer is supposed to be empty. */
274 assert(rte_pktmbuf_data_len(buf) == 0);
275 assert(rte_pktmbuf_pkt_len(buf) == 0);
276 /* sge->addr must be able to store a pointer. */
277 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
278 /* SGE keeps its headroom. */
279 sge->addr = (uintptr_t)
280 ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
281 sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
282 sge->lkey = rxq->mr->lkey;
283 /* Redundant check for tailroom. */
284 assert(sge->length == rte_pktmbuf_tailroom(buf));
285 /* Make sure elts index and SGE mbuf pointer can be deduced
287 if ((WR_ID(wr->wr_id).id != i) ||
288 ((void *)((uintptr_t)sge->addr -
289 WR_ID(wr->wr_id).offset) != buf)) {
290 ERROR("%p: cannot store index and offset in WR ID",
293 rte_pktmbuf_free(buf);
298 /* The last WR pointer must be NULL. */
299 (*elts)[(i - 1)].wr.next = NULL;
300 DEBUG("%p: allocated and configured %u single-segment WRs",
301 (void *)rxq, elts_n);
302 rxq->elts_n = elts_n;
304 rxq->elts.no_sp = elts;
309 assert(pool == NULL);
310 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
311 struct rxq_elt *elt = &(*elts)[i];
312 struct rte_mbuf *buf;
314 if (elt->sge.addr == 0)
316 assert(WR_ID(elt->wr.wr_id).id == i);
317 buf = (void *)((uintptr_t)elt->sge.addr -
318 WR_ID(elt->wr.wr_id).offset);
319 rte_pktmbuf_free_seg(buf);
323 DEBUG("%p: failed, freed everything", (void *)rxq);
329 * Free RX queue elements.
332 * Pointer to RX queue structure.
335 rxq_free_elts(struct rxq *rxq)
338 unsigned int elts_n = rxq->elts_n;
339 struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp;
341 DEBUG("%p: freeing WRs", (void *)rxq);
343 rxq->elts.no_sp = NULL;
346 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
347 struct rxq_elt *elt = &(*elts)[i];
348 struct rte_mbuf *buf;
350 if (elt->sge.addr == 0)
352 assert(WR_ID(elt->wr.wr_id).id == i);
353 buf = (void *)((uintptr_t)elt->sge.addr -
354 WR_ID(elt->wr.wr_id).offset);
355 rte_pktmbuf_free_seg(buf);
361 * Clean up a RX queue.
363 * Destroy objects, free allocated memory and reset the structure for reuse.
366 * Pointer to RX queue structure.
369 rxq_cleanup(struct rxq *rxq)
371 struct ibv_exp_release_intf_params params;
373 DEBUG("cleaning up %p", (void *)rxq);
375 rxq_free_elts_sp(rxq);
378 if (rxq->if_qp != NULL) {
379 assert(rxq->priv != NULL);
380 assert(rxq->priv->ctx != NULL);
381 assert(rxq->qp != NULL);
382 params = (struct ibv_exp_release_intf_params){
385 claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
389 if (rxq->if_cq != NULL) {
390 assert(rxq->priv != NULL);
391 assert(rxq->priv->ctx != NULL);
392 assert(rxq->cq != NULL);
393 params = (struct ibv_exp_release_intf_params){
396 claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
400 if (rxq->qp != NULL) {
401 rxq_mac_addrs_del(rxq);
402 claim_zero(ibv_destroy_qp(rxq->qp));
405 claim_zero(ibv_destroy_cq(rxq->cq));
406 if (rxq->rd != NULL) {
407 struct ibv_exp_destroy_res_domain_attr attr = {
411 assert(rxq->priv != NULL);
412 assert(rxq->priv->ctx != NULL);
413 claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
418 claim_zero(ibv_dereg_mr(rxq->mr));
419 memset(rxq, 0, sizeof(*rxq));
423 * Allocate a Queue Pair.
424 * Optionally setup inline receive if supported.
427 * Pointer to private structure.
429 * Completion queue to associate with QP.
431 * Number of descriptors in QP (hint only).
434 * QP pointer or NULL in case of error.
436 static struct ibv_qp *
437 rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
438 struct ibv_exp_res_domain *rd)
440 struct ibv_exp_qp_init_attr attr = {
441 /* CQ to be associated with the send queue. */
443 /* CQ to be associated with the receive queue. */
446 /* Max number of outstanding WRs. */
447 .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
448 priv->device_attr.max_qp_wr :
450 /* Max number of scatter/gather elements in a WR. */
451 .max_recv_sge = ((priv->device_attr.max_sge <
453 priv->device_attr.max_sge :
456 .qp_type = IBV_QPT_RAW_PACKET,
457 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
458 IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
463 return ibv_exp_create_qp(priv->ctx, &attr);
469 * Allocate a RSS Queue Pair.
470 * Optionally setup inline receive if supported.
473 * Pointer to private structure.
475 * Completion queue to associate with QP.
477 * Number of descriptors in QP (hint only).
479 * If nonzero, create a parent QP, otherwise a child.
482 * QP pointer or NULL in case of error.
484 static struct ibv_qp *
485 rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
486 int parent, struct ibv_exp_res_domain *rd)
488 struct ibv_exp_qp_init_attr attr = {
489 /* CQ to be associated with the send queue. */
491 /* CQ to be associated with the receive queue. */
494 /* Max number of outstanding WRs. */
495 .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
496 priv->device_attr.max_qp_wr :
498 /* Max number of scatter/gather elements in a WR. */
499 .max_recv_sge = ((priv->device_attr.max_sge <
501 priv->device_attr.max_sge :
504 .qp_type = IBV_QPT_RAW_PACKET,
505 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
506 IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
507 IBV_EXP_QP_INIT_ATTR_QPG),
513 attr.qpg.qpg_type = IBV_EXP_QPG_PARENT;
514 /* TSS isn't necessary. */
515 attr.qpg.parent_attrib.tss_child_count = 0;
516 attr.qpg.parent_attrib.rss_child_count = priv->rxqs_n;
517 DEBUG("initializing parent RSS queue");
519 attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX;
520 attr.qpg.qpg_parent = priv->rxq_parent.qp;
521 DEBUG("initializing child RSS queue");
523 return ibv_exp_create_qp(priv->ctx, &attr);
526 #endif /* RSS_SUPPORT */
529 * Reconfigure a RX queue with new parameters.
531 * rxq_rehash() does not allocate mbufs, which, if not done from the right
532 * thread (such as a control thread), may corrupt the pool.
533 * In case of failure, the queue is left untouched.
536 * Pointer to Ethernet device structure.
541 * 0 on success, errno value on failure.
544 rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
546 struct priv *priv = rxq->priv;
547 struct rxq tmpl = *rxq;
550 struct rte_mbuf **pool;
552 struct ibv_exp_qp_attr mod;
553 struct ibv_recv_wr *bad_wr;
555 int parent = (rxq == &priv->rxq_parent);
558 ERROR("%p: cannot rehash parent queue %p",
559 (void *)dev, (void *)rxq);
562 DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
563 /* Number of descriptors and mbufs currently allocated. */
564 desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1));
566 /* Enable scattered packets support for this queue if necessary. */
567 if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
568 (dev->data->dev_conf.rxmode.max_rx_pkt_len >
569 (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
571 desc_n /= MLX5_PMD_SGE_WR_N;
574 DEBUG("%p: %s scattered packets support (%u WRs)",
575 (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n);
576 /* If scatter mode is the same as before, nothing to do. */
577 if (tmpl.sp == rxq->sp) {
578 DEBUG("%p: nothing to do", (void *)dev);
581 /* Remove attached flows if RSS is disabled (no parent queue). */
583 rxq_mac_addrs_del(&tmpl);
584 /* Update original queue in case of failure. */
585 memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow));
587 /* From now on, any failure will render the queue unusable.
588 * Reinitialize QP. */
589 mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RESET };
590 err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
592 ERROR("%p: cannot reset QP: %s", (void *)dev, strerror(err));
596 err = ibv_resize_cq(tmpl.cq, desc_n);
598 ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err));
602 mod = (struct ibv_exp_qp_attr){
603 /* Move the QP to this state. */
604 .qp_state = IBV_QPS_INIT,
605 /* Primary port number. */
606 .port_num = priv->port
608 err = ibv_exp_modify_qp(tmpl.qp, &mod,
611 (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
612 #endif /* RSS_SUPPORT */
615 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
616 (void *)dev, strerror(err));
620 /* Reconfigure flows. Do not care for errors. */
623 rxq_mac_addrs_add(&tmpl);
624 /* Update original queue in case of failure. */
625 memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow));
628 pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0);
630 ERROR("%p: cannot allocate memory", (void *)dev);
633 /* Snatch mbufs from original queue. */
636 struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
638 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
639 struct rxq_elt_sp *elt = &(*elts)[i];
642 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
643 assert(elt->bufs[j] != NULL);
644 pool[k++] = elt->bufs[j];
648 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
650 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
651 struct rxq_elt *elt = &(*elts)[i];
652 struct rte_mbuf *buf = (void *)
653 ((uintptr_t)elt->sge.addr -
654 WR_ID(elt->wr.wr_id).offset);
656 assert(WR_ID(elt->wr.wr_id).id == i);
663 assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp);
665 rxq_alloc_elts_sp(&tmpl, desc_n, pool) :
666 rxq_alloc_elts(&tmpl, desc_n, pool));
668 ERROR("%p: cannot reallocate WRs, aborting", (void *)dev);
673 assert(tmpl.elts_n == desc_n);
674 assert(tmpl.elts.sp != NULL);
676 /* Clean up original data. */
678 rte_free(rxq->elts.sp);
681 err = ibv_post_recv(tmpl.qp,
683 &(*tmpl.elts.sp)[0].wr :
684 &(*tmpl.elts.no_sp)[0].wr),
687 ERROR("%p: ibv_post_recv() failed for WR %p: %s",
693 mod = (struct ibv_exp_qp_attr){
694 .qp_state = IBV_QPS_RTR
696 err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
698 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
699 (void *)dev, strerror(err));
707 * Configure a RX queue.
710 * Pointer to Ethernet device structure.
712 * Pointer to RX queue structure.
714 * Number of descriptors to configure in queue.
716 * NUMA socket on which memory must be allocated.
718 * Thresholds parameters.
720 * Memory pool for buffer allocations.
723 * 0 on success, errno value on failure.
726 rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
727 unsigned int socket, const struct rte_eth_rxconf *conf,
728 struct rte_mempool *mp)
730 struct priv *priv = dev->data->dev_private;
736 struct ibv_exp_qp_attr mod;
738 struct ibv_exp_query_intf_params params;
739 struct ibv_exp_cq_init_attr cq;
740 struct ibv_exp_res_domain_init_attr rd;
742 enum ibv_exp_query_intf_status status;
743 struct ibv_recv_wr *bad_wr;
744 struct rte_mbuf *buf;
746 int parent = (rxq == &priv->rxq_parent);
748 (void)conf; /* Thresholds configuration (ignored). */
750 * If this is a parent queue, hardware must support RSS and
751 * RSS must be enabled.
753 assert((!parent) || ((priv->hw_rss) && (priv->rss)));
755 /* Even if unused, ibv_create_cq() requires at least one
760 if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) {
761 ERROR("%p: invalid number of RX descriptors (must be a"
762 " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N);
765 /* Get mbuf length. */
766 buf = rte_pktmbuf_alloc(mp);
768 ERROR("%p: unable to allocate mbuf", (void *)dev);
771 tmpl.mb_len = buf->buf_len;
772 assert((rte_pktmbuf_headroom(buf) +
773 rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
774 assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
775 rte_pktmbuf_free(buf);
776 /* Enable scattered packets support for this queue if necessary. */
777 if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
778 (dev->data->dev_conf.rxmode.max_rx_pkt_len >
779 (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
781 desc /= MLX5_PMD_SGE_WR_N;
783 DEBUG("%p: %s scattered packets support (%u WRs)",
784 (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
785 /* Use the entire RX mempool as the memory region. */
786 tmpl.mr = ibv_reg_mr(priv->pd,
787 (void *)mp->elt_va_start,
788 (mp->elt_va_end - mp->elt_va_start),
789 (IBV_ACCESS_LOCAL_WRITE |
790 IBV_ACCESS_REMOTE_WRITE));
791 if (tmpl.mr == NULL) {
793 ERROR("%p: MR creation failure: %s",
794 (void *)dev, strerror(ret));
798 attr.rd = (struct ibv_exp_res_domain_init_attr){
799 .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
800 IBV_EXP_RES_DOMAIN_MSG_MODEL),
801 .thread_model = IBV_EXP_THREAD_SINGLE,
802 .msg_model = IBV_EXP_MSG_HIGH_BW,
804 tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
805 if (tmpl.rd == NULL) {
807 ERROR("%p: RD creation failure: %s",
808 (void *)dev, strerror(ret));
811 attr.cq = (struct ibv_exp_cq_init_attr){
812 .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
813 .res_domain = tmpl.rd,
815 tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
816 if (tmpl.cq == NULL) {
818 ERROR("%p: CQ creation failure: %s",
819 (void *)dev, strerror(ret));
822 DEBUG("priv->device_attr.max_qp_wr is %d",
823 priv->device_attr.max_qp_wr);
824 DEBUG("priv->device_attr.max_sge is %d",
825 priv->device_attr.max_sge);
828 tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
831 #endif /* RSS_SUPPORT */
832 tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
833 if (tmpl.qp == NULL) {
834 ret = (errno ? errno : EINVAL);
835 ERROR("%p: QP creation failure: %s",
836 (void *)dev, strerror(ret));
839 mod = (struct ibv_exp_qp_attr){
840 /* Move the QP to this state. */
841 .qp_state = IBV_QPS_INIT,
842 /* Primary port number. */
843 .port_num = priv->port
845 ret = ibv_exp_modify_qp(tmpl.qp, &mod,
848 (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
849 #endif /* RSS_SUPPORT */
852 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
853 (void *)dev, strerror(ret));
856 if ((parent) || (!priv->rss)) {
857 /* Configure MAC and broadcast addresses. */
858 ret = rxq_mac_addrs_add(&tmpl);
860 ERROR("%p: QP flow attachment failed: %s",
861 (void *)dev, strerror(ret));
865 /* Allocate descriptors for RX queues, except for the RSS parent. */
869 ret = rxq_alloc_elts_sp(&tmpl, desc, NULL);
871 ret = rxq_alloc_elts(&tmpl, desc, NULL);
873 ERROR("%p: RXQ allocation failed: %s",
874 (void *)dev, strerror(ret));
877 ret = ibv_post_recv(tmpl.qp,
879 &(*tmpl.elts.sp)[0].wr :
880 &(*tmpl.elts.no_sp)[0].wr),
883 ERROR("%p: ibv_post_recv() failed for WR %p: %s",
890 mod = (struct ibv_exp_qp_attr){
891 .qp_state = IBV_QPS_RTR
893 ret = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
895 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
896 (void *)dev, strerror(ret));
900 tmpl.port_id = dev->data->port_id;
901 DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
902 attr.params = (struct ibv_exp_query_intf_params){
903 .intf_scope = IBV_EXP_INTF_GLOBAL,
904 .intf = IBV_EXP_INTF_CQ,
907 tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
908 if (tmpl.if_cq == NULL) {
909 ERROR("%p: CQ interface family query failed with status %d",
910 (void *)dev, status);
913 attr.params = (struct ibv_exp_query_intf_params){
914 .intf_scope = IBV_EXP_INTF_GLOBAL,
915 .intf = IBV_EXP_INTF_QP_BURST,
918 tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
919 if (tmpl.if_qp == NULL) {
920 ERROR("%p: QP interface family query failed with status %d",
921 (void *)dev, status);
924 /* Clean up rxq in case we're reinitializing it. */
925 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
928 DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
938 * DPDK callback to configure a RX queue.
941 * Pointer to Ethernet device structure.
945 * Number of descriptors to configure in queue.
947 * NUMA socket on which memory must be allocated.
949 * Thresholds parameters.
951 * Memory pool for buffer allocations.
954 * 0 on success, negative errno value on failure.
957 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
958 unsigned int socket, const struct rte_eth_rxconf *conf,
959 struct rte_mempool *mp)
961 struct priv *priv = dev->data->dev_private;
962 struct rxq *rxq = (*priv->rxqs)[idx];
966 DEBUG("%p: configuring queue %u for %u descriptors",
967 (void *)dev, idx, desc);
968 if (idx >= priv->rxqs_n) {
969 ERROR("%p: queue index out of range (%u >= %u)",
970 (void *)dev, idx, priv->rxqs_n);
975 DEBUG("%p: reusing already allocated queue index %u (%p)",
976 (void *)dev, idx, (void *)rxq);
981 (*priv->rxqs)[idx] = NULL;
984 rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
986 ERROR("%p: unable to allocate queue index %u",
992 ret = rxq_setup(dev, rxq, desc, socket, conf, mp);
996 rxq->stats.idx = idx;
997 DEBUG("%p: adding RX queue %p to list",
998 (void *)dev, (void *)rxq);
999 (*priv->rxqs)[idx] = rxq;
1000 /* Update receive callback. */
1002 dev->rx_pkt_burst = mlx5_rx_burst_sp;
1004 dev->rx_pkt_burst = mlx5_rx_burst;
1011 * DPDK callback to release a RX queue.
1014 * Generic RX queue pointer.
1017 mlx5_rx_queue_release(void *dpdk_rxq)
1019 struct rxq *rxq = (struct rxq *)dpdk_rxq;
1027 assert(rxq != &priv->rxq_parent);
1028 for (i = 0; (i != priv->rxqs_n); ++i)
1029 if ((*priv->rxqs)[i] == rxq) {
1030 DEBUG("%p: removing RX queue %p from list",
1031 (void *)priv->dev, (void *)rxq);
1032 (*priv->rxqs)[i] = NULL;