1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56 (unsigned int)sizeof(rss_hash_default_key),
57 "wrong RSS default key size.");
60 * Check whether Multi-Packet RQ can be enabled for the device.
63 * Pointer to Ethernet device.
66 * 1 if supported, negative errno value if not.
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
71 struct priv *priv = dev->data->dev_private;
73 if (priv->config.mprq.enabled &&
74 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
80 * Check whether Multi-Packet RQ is enabled for the Rx queue.
83 * Pointer to receive queue structure.
86 * 0 if disabled, otherwise enabled.
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
91 return rxq->strd_num_n > 0;
95 * Check whether Multi-Packet RQ is enabled for the device.
98 * Pointer to Ethernet device.
101 * 0 if disabled, otherwise enabled.
104 mlx5_mprq_enabled(struct rte_eth_dev *dev)
106 struct priv *priv = dev->data->dev_private;
110 if (mlx5_check_mprq_support(dev) < 0)
112 /* All the configured queues should be enabled. */
113 for (i = 0; i < priv->rxqs_n; ++i) {
114 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
118 if (mlx5_rxq_mprq_enabled(rxq))
121 /* Multi-Packet RQ can't be partially configured. */
122 assert(n == 0 || n == priv->rxqs_n);
123 return n == priv->rxqs_n;
127 * Allocate RX queue elements for Multi-Packet RQ.
130 * Pointer to RX queue structure.
133 * 0 on success, a negative errno value otherwise and rte_errno is set.
136 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
138 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
139 unsigned int wqe_n = 1 << rxq->elts_n;
143 /* Iterate on segments. */
144 for (i = 0; i <= wqe_n; ++i) {
145 struct mlx5_mprq_buf *buf;
147 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
148 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
153 (*rxq->mprq_bufs)[i] = buf;
155 rxq->mprq_repl = buf;
158 "port %u Rx queue %u allocated and configured %u segments",
159 rxq->port_id, rxq_ctrl->idx, wqe_n);
162 err = rte_errno; /* Save rte_errno before cleanup. */
164 for (i = 0; (i != wqe_n); ++i) {
165 if ((*rxq->mprq_bufs)[i] != NULL)
166 rte_mempool_put(rxq->mprq_mp,
167 (*rxq->mprq_bufs)[i]);
168 (*rxq->mprq_bufs)[i] = NULL;
170 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
171 rxq->port_id, rxq_ctrl->idx);
172 rte_errno = err; /* Restore rte_errno. */
177 * Allocate RX queue elements for Single-Packet RQ.
180 * Pointer to RX queue structure.
183 * 0 on success, errno value on failure.
186 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
188 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
189 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
193 /* Iterate on segments. */
194 for (i = 0; (i != elts_n); ++i) {
195 struct rte_mbuf *buf;
197 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
199 DRV_LOG(ERR, "port %u empty mbuf pool",
200 PORT_ID(rxq_ctrl->priv));
204 /* Headroom is reserved by rte_pktmbuf_alloc(). */
205 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
206 /* Buffer is supposed to be empty. */
207 assert(rte_pktmbuf_data_len(buf) == 0);
208 assert(rte_pktmbuf_pkt_len(buf) == 0);
210 /* Only the first segment keeps headroom. */
212 SET_DATA_OFF(buf, 0);
213 PORT(buf) = rxq_ctrl->rxq.port_id;
214 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
215 PKT_LEN(buf) = DATA_LEN(buf);
217 (*rxq_ctrl->rxq.elts)[i] = buf;
219 /* If Rx vector is activated. */
220 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
221 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
222 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
225 /* Initialize default rearm_data for vPMD. */
226 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
227 rte_mbuf_refcnt_set(mbuf_init, 1);
228 mbuf_init->nb_segs = 1;
229 mbuf_init->port = rxq->port_id;
231 * prevent compiler reordering:
232 * rearm_data covers previous fields.
234 rte_compiler_barrier();
235 rxq->mbuf_initializer =
236 *(uint64_t *)&mbuf_init->rearm_data;
237 /* Padding with a fake mbuf for vectorized Rx. */
238 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
239 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
242 "port %u Rx queue %u allocated and configured %u segments"
244 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
245 elts_n / (1 << rxq_ctrl->rxq.sges_n));
248 err = rte_errno; /* Save rte_errno before cleanup. */
250 for (i = 0; (i != elts_n); ++i) {
251 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
252 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
253 (*rxq_ctrl->rxq.elts)[i] = NULL;
255 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
256 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
257 rte_errno = err; /* Restore rte_errno. */
262 * Allocate RX queue elements.
265 * Pointer to RX queue structure.
268 * 0 on success, errno value on failure.
271 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
273 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
274 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
278 * Free RX queue elements for Multi-Packet RQ.
281 * Pointer to RX queue structure.
284 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
286 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
289 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
290 rxq->port_id, rxq_ctrl->idx);
291 if (rxq->mprq_bufs == NULL)
293 assert(mlx5_rxq_check_vec_support(rxq) < 0);
294 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
295 if ((*rxq->mprq_bufs)[i] != NULL)
296 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
297 (*rxq->mprq_bufs)[i] = NULL;
299 if (rxq->mprq_repl != NULL) {
300 mlx5_mprq_buf_free(rxq->mprq_repl);
301 rxq->mprq_repl = NULL;
306 * Free RX queue elements for Single-Packet RQ.
309 * Pointer to RX queue structure.
312 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
314 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
315 const uint16_t q_n = (1 << rxq->elts_n);
316 const uint16_t q_mask = q_n - 1;
317 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
320 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
321 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
322 if (rxq->elts == NULL)
325 * Some mbuf in the Ring belongs to the application. They cannot be
328 if (mlx5_rxq_check_vec_support(rxq) > 0) {
329 for (i = 0; i < used; ++i)
330 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
331 rxq->rq_pi = rxq->rq_ci;
333 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
334 if ((*rxq->elts)[i] != NULL)
335 rte_pktmbuf_free_seg((*rxq->elts)[i]);
336 (*rxq->elts)[i] = NULL;
341 * Free RX queue elements.
344 * Pointer to RX queue structure.
347 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
349 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
350 rxq_free_elts_mprq(rxq_ctrl);
352 rxq_free_elts_sprq(rxq_ctrl);
356 * Clean up a RX queue.
358 * Destroy objects, free allocated memory and reset the structure for reuse.
361 * Pointer to RX queue structure.
364 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
366 DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
367 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
369 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
370 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
374 * Returns the per-queue supported offloads.
377 * Pointer to Ethernet device.
380 * Supported Rx offloads.
383 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
385 struct priv *priv = dev->data->dev_private;
386 struct mlx5_dev_config *config = &priv->config;
387 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
388 DEV_RX_OFFLOAD_TIMESTAMP |
389 DEV_RX_OFFLOAD_JUMBO_FRAME);
391 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
392 if (config->hw_fcs_strip)
393 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
396 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
397 DEV_RX_OFFLOAD_UDP_CKSUM |
398 DEV_RX_OFFLOAD_TCP_CKSUM);
399 if (config->hw_vlan_strip)
400 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
406 * Returns the per-port supported offloads.
409 * Supported Rx offloads.
412 mlx5_get_rx_port_offloads(void)
414 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
422 * Pointer to Ethernet device structure.
426 * Number of descriptors to configure in queue.
428 * NUMA socket on which memory must be allocated.
430 * Thresholds parameters.
432 * Memory pool for buffer allocations.
435 * 0 on success, a negative errno value otherwise and rte_errno is set.
438 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
439 unsigned int socket, const struct rte_eth_rxconf *conf,
440 struct rte_mempool *mp)
442 struct priv *priv = dev->data->dev_private;
443 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
444 struct mlx5_rxq_ctrl *rxq_ctrl =
445 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
447 if (!rte_is_power_of_2(desc)) {
448 desc = 1 << log2above(desc);
450 "port %u increased number of descriptors in Rx queue %u"
451 " to the next power of two (%d)",
452 dev->data->port_id, idx, desc);
454 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
455 dev->data->port_id, idx, desc);
456 if (idx >= priv->rxqs_n) {
457 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
458 dev->data->port_id, idx, priv->rxqs_n);
459 rte_errno = EOVERFLOW;
462 if (!mlx5_rxq_releasable(dev, idx)) {
463 DRV_LOG(ERR, "port %u unable to release queue index %u",
464 dev->data->port_id, idx);
468 mlx5_rxq_release(dev, idx);
469 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
471 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
472 dev->data->port_id, idx);
476 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
477 dev->data->port_id, idx);
478 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
483 * DPDK callback to release a RX queue.
486 * Generic RX queue pointer.
489 mlx5_rx_queue_release(void *dpdk_rxq)
491 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
492 struct mlx5_rxq_ctrl *rxq_ctrl;
497 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
498 priv = rxq_ctrl->priv;
499 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
500 rte_panic("port %u Rx queue %u is still used by a flow and"
501 " cannot be removed\n",
502 PORT_ID(priv), rxq_ctrl->idx);
503 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
507 * Allocate queue vector and fill epoll fd list for Rx interrupts.
510 * Pointer to Ethernet device.
513 * 0 on success, a negative errno value otherwise and rte_errno is set.
516 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
518 struct priv *priv = dev->data->dev_private;
520 unsigned int rxqs_n = priv->rxqs_n;
521 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
522 unsigned int count = 0;
523 struct rte_intr_handle *intr_handle = dev->intr_handle;
525 if (!dev->data->dev_conf.intr_conf.rxq)
527 mlx5_rx_intr_vec_disable(dev);
528 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
529 if (intr_handle->intr_vec == NULL) {
531 "port %u failed to allocate memory for interrupt"
532 " vector, Rx interrupts will not be supported",
537 intr_handle->type = RTE_INTR_HANDLE_EXT;
538 for (i = 0; i != n; ++i) {
539 /* This rxq ibv must not be released in this function. */
540 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
545 /* Skip queues that cannot request interrupts. */
546 if (!rxq_ibv || !rxq_ibv->channel) {
547 /* Use invalid intr_vec[] index to disable entry. */
548 intr_handle->intr_vec[i] =
549 RTE_INTR_VEC_RXTX_OFFSET +
550 RTE_MAX_RXTX_INTR_VEC_ID;
553 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
555 "port %u too many Rx queues for interrupt"
556 " vector size (%d), Rx interrupts cannot be"
558 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
559 mlx5_rx_intr_vec_disable(dev);
563 fd = rxq_ibv->channel->fd;
564 flags = fcntl(fd, F_GETFL);
565 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
569 "port %u failed to make Rx interrupt file"
570 " descriptor %d non-blocking for queue index"
572 dev->data->port_id, fd, i);
573 mlx5_rx_intr_vec_disable(dev);
576 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
577 intr_handle->efds[count] = fd;
581 mlx5_rx_intr_vec_disable(dev);
583 intr_handle->nb_efd = count;
588 * Clean up Rx interrupts handler.
591 * Pointer to Ethernet device.
594 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
596 struct priv *priv = dev->data->dev_private;
597 struct rte_intr_handle *intr_handle = dev->intr_handle;
599 unsigned int rxqs_n = priv->rxqs_n;
600 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
602 if (!dev->data->dev_conf.intr_conf.rxq)
604 if (!intr_handle->intr_vec)
606 for (i = 0; i != n; ++i) {
607 struct mlx5_rxq_ctrl *rxq_ctrl;
608 struct mlx5_rxq_data *rxq_data;
610 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
611 RTE_MAX_RXTX_INTR_VEC_ID)
614 * Need to access directly the queue to release the reference
615 * kept in priv_rx_intr_vec_enable().
617 rxq_data = (*priv->rxqs)[i];
618 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
619 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
622 rte_intr_free_epoll_fd(intr_handle);
623 if (intr_handle->intr_vec)
624 free(intr_handle->intr_vec);
625 intr_handle->nb_efd = 0;
626 intr_handle->intr_vec = NULL;
630 * MLX5 CQ notification .
633 * Pointer to receive queue structure.
635 * Sequence number per receive queue .
638 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
641 uint32_t doorbell_hi;
643 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
645 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
646 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
647 doorbell = (uint64_t)doorbell_hi << 32;
648 doorbell |= rxq->cqn;
649 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
650 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
651 cq_db_reg, rxq->uar_lock_cq);
655 * DPDK callback for Rx queue interrupt enable.
658 * Pointer to Ethernet device structure.
663 * 0 on success, a negative errno value otherwise and rte_errno is set.
666 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
668 struct priv *priv = dev->data->dev_private;
669 struct mlx5_rxq_data *rxq_data;
670 struct mlx5_rxq_ctrl *rxq_ctrl;
672 rxq_data = (*priv->rxqs)[rx_queue_id];
677 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
679 struct mlx5_rxq_ibv *rxq_ibv;
681 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
686 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
687 mlx5_rxq_ibv_release(rxq_ibv);
693 * DPDK callback for Rx queue interrupt disable.
696 * Pointer to Ethernet device structure.
701 * 0 on success, a negative errno value otherwise and rte_errno is set.
704 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
706 struct priv *priv = dev->data->dev_private;
707 struct mlx5_rxq_data *rxq_data;
708 struct mlx5_rxq_ctrl *rxq_ctrl;
709 struct mlx5_rxq_ibv *rxq_ibv = NULL;
710 struct ibv_cq *ev_cq;
714 rxq_data = (*priv->rxqs)[rx_queue_id];
719 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
722 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
727 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
728 if (ret || ev_cq != rxq_ibv->cq) {
732 rxq_data->cq_arm_sn++;
733 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
736 ret = rte_errno; /* Save rte_errno before cleanup. */
738 mlx5_rxq_ibv_release(rxq_ibv);
739 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
740 dev->data->port_id, rx_queue_id);
741 rte_errno = ret; /* Restore rte_errno. */
746 * Create the Rx queue Verbs object.
749 * Pointer to Ethernet device.
751 * Queue index in DPDK Rx queue array
754 * The Verbs object initialised, NULL otherwise and rte_errno is set.
756 struct mlx5_rxq_ibv *
757 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
759 struct priv *priv = dev->data->dev_private;
760 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
761 struct mlx5_rxq_ctrl *rxq_ctrl =
762 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
763 struct ibv_wq_attr mod;
766 struct ibv_cq_init_attr_ex ibv;
767 struct mlx5dv_cq_init_attr mlx5;
770 struct ibv_wq_init_attr ibv;
771 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
772 struct mlx5dv_wq_init_attr mlx5;
775 struct ibv_cq_ex cq_attr;
778 unsigned int wqe_n = 1 << rxq_data->elts_n;
779 struct mlx5_rxq_ibv *tmpl;
780 struct mlx5dv_cq cq_info;
781 struct mlx5dv_rwq rwq;
784 struct mlx5dv_obj obj;
785 struct mlx5_dev_config *config = &priv->config;
786 const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
789 assert(!rxq_ctrl->ibv);
790 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
791 priv->verbs_alloc_ctx.obj = rxq_ctrl;
792 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
796 "port %u Rx queue %u cannot allocate verbs resources",
797 dev->data->port_id, rxq_ctrl->idx);
801 tmpl->rxq_ctrl = rxq_ctrl;
803 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
804 if (!tmpl->channel) {
805 DRV_LOG(ERR, "port %u: comp channel creation failure",
812 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
815 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
817 .channel = tmpl->channel,
820 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
823 if (config->cqe_comp && !rxq_data->hw_timestamp) {
824 attr.cq.mlx5.comp_mask |=
825 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
826 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
827 attr.cq.mlx5.cqe_comp_res_format =
828 mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
829 MLX5DV_CQE_RES_FORMAT_HASH;
831 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
834 * For vectorized Rx, it must not be doubled in order to
835 * make cq_ci and rq_ci aligned.
837 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
838 attr.cq.ibv.cqe *= 2;
839 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
841 "port %u Rx CQE compression is disabled for HW"
845 tmpl->cq = mlx5_glue->cq_ex_to_cq
846 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
848 if (tmpl->cq == NULL) {
849 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
850 dev->data->port_id, idx);
854 DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
855 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
856 DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
857 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
858 attr.wq.ibv = (struct ibv_wq_init_attr){
859 .wq_context = NULL, /* Could be useful in the future. */
860 .wq_type = IBV_WQT_RQ,
861 /* Max number of outstanding WRs. */
862 .max_wr = wqe_n >> rxq_data->sges_n,
863 /* Max number of scatter/gather elements in a WR. */
864 .max_sge = 1 << rxq_data->sges_n,
868 IBV_WQ_FLAGS_CVLAN_STRIPPING |
870 .create_flags = (rxq_data->vlan_strip ?
871 IBV_WQ_FLAGS_CVLAN_STRIPPING :
874 /* By default, FCS (CRC) is stripped by hardware. */
875 if (rxq_data->crc_present) {
876 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
877 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
879 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
880 if (config->hw_padding) {
881 attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
882 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
885 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
886 attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
890 struct mlx5dv_striding_rq_init_attr *mprq_attr =
891 &attr.wq.mlx5.striding_rq_attrs;
893 attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
894 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
895 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
896 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
897 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
900 tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
903 tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
905 if (tmpl->wq == NULL) {
906 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
907 dev->data->port_id, idx);
912 * Make sure number of WRs*SGEs match expectations since a queue
913 * cannot allocate more than "desc" buffers.
915 if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
916 attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
918 "port %u Rx queue %u requested %u*%u but got %u*%u"
920 dev->data->port_id, idx,
921 wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
922 attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
926 /* Change queue state to ready. */
927 mod = (struct ibv_wq_attr){
928 .attr_mask = IBV_WQ_ATTR_STATE,
929 .wq_state = IBV_WQS_RDY,
931 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
934 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
935 dev->data->port_id, idx);
939 obj.cq.in = tmpl->cq;
940 obj.cq.out = &cq_info;
941 obj.rwq.in = tmpl->wq;
943 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
948 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
950 "port %u wrong MLX5_CQE_SIZE environment variable"
951 " value: it should be set to %u",
952 dev->data->port_id, RTE_CACHE_LINE_SIZE);
956 /* Fill the rings. */
957 rxq_data->wqes = rwq.buf;
958 for (i = 0; (i != wqe_n); ++i) {
959 volatile struct mlx5_wqe_data_seg *scat;
964 struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
966 scat = &((volatile struct mlx5_wqe_mprq *)
967 rxq_data->wqes)[i].dseg;
968 addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
969 byte_count = (1 << rxq_data->strd_sz_n) *
970 (1 << rxq_data->strd_num_n);
972 struct rte_mbuf *buf = (*rxq_data->elts)[i];
974 scat = &((volatile struct mlx5_wqe_data_seg *)
976 addr = rte_pktmbuf_mtod(buf, uintptr_t);
977 byte_count = DATA_LEN(buf);
979 /* scat->addr must be able to store a pointer. */
980 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
981 *scat = (struct mlx5_wqe_data_seg){
982 .addr = rte_cpu_to_be_64(addr),
983 .byte_count = rte_cpu_to_be_32(byte_count),
984 .lkey = mlx5_rx_addr2mr(rxq_data, addr),
987 rxq_data->rq_db = rwq.dbrec;
988 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
990 rxq_data->consumed_strd = 0;
992 rxq_data->zip = (struct rxq_zip){
995 rxq_data->cq_db = cq_info.dbrec;
996 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
997 rxq_data->cq_uar = cq_info.cq_uar;
998 rxq_data->cqn = cq_info.cqn;
999 rxq_data->cq_arm_sn = 0;
1000 /* Update doorbell counter. */
1001 rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
1003 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
1004 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1005 idx, (void *)&tmpl);
1006 rte_atomic32_inc(&tmpl->refcnt);
1007 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
1008 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1011 ret = rte_errno; /* Save rte_errno before cleanup. */
1013 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1015 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1017 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
1018 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1019 rte_errno = ret; /* Restore rte_errno. */
1024 * Get an Rx queue Verbs object.
1027 * Pointer to Ethernet device.
1029 * Queue index in DPDK Rx queue array
1032 * The Verbs object if it exists.
1034 struct mlx5_rxq_ibv *
1035 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
1037 struct priv *priv = dev->data->dev_private;
1038 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1039 struct mlx5_rxq_ctrl *rxq_ctrl;
1041 if (idx >= priv->rxqs_n)
1045 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1046 if (rxq_ctrl->ibv) {
1047 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
1049 return rxq_ctrl->ibv;
1053 * Release an Rx verbs queue object.
1056 * Verbs Rx queue object.
1059 * 1 while a reference on it exists, 0 when freed.
1062 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
1065 assert(rxq_ibv->wq);
1066 assert(rxq_ibv->cq);
1067 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
1068 rxq_free_elts(rxq_ibv->rxq_ctrl);
1069 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
1070 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
1071 if (rxq_ibv->channel)
1072 claim_zero(mlx5_glue->destroy_comp_channel
1073 (rxq_ibv->channel));
1074 LIST_REMOVE(rxq_ibv, next);
1082 * Verify the Verbs Rx queue list is empty
1085 * Pointer to Ethernet device.
1088 * The number of object not released.
1091 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
1093 struct priv *priv = dev->data->dev_private;
1095 struct mlx5_rxq_ibv *rxq_ibv;
1097 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
1098 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
1099 dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
1106 * Return true if a single reference exists on the object.
1109 * Verbs Rx queue object.
1112 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
1115 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
1119 * Callback function to initialize mbufs for Multi-Packet RQ.
1122 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
1123 void *_m, unsigned int i __rte_unused)
1125 struct mlx5_mprq_buf *buf = _m;
1127 memset(_m, 0, sizeof(*buf));
1129 rte_atomic16_set(&buf->refcnt, 1);
1133 * Free mempool of Multi-Packet RQ.
1136 * Pointer to Ethernet device.
1139 * 0 on success, negative errno value on failure.
1142 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1144 struct priv *priv = dev->data->dev_private;
1145 struct rte_mempool *mp = priv->mprq_mp;
1150 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1151 dev->data->port_id, mp->name);
1153 * If a buffer in the pool has been externally attached to a mbuf and it
1154 * is still in use by application, destroying the Rx qeueue can spoil
1155 * the packet. It is unlikely to happen but if application dynamically
1156 * creates and destroys with holding Rx packets, this can happen.
1158 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1159 * RQ isn't provided by application but managed by PMD.
1161 if (!rte_mempool_full(mp)) {
1163 "port %u mempool for Multi-Packet RQ is still in use",
1164 dev->data->port_id);
1168 rte_mempool_free(mp);
1169 /* Unset mempool for each Rx queue. */
1170 for (i = 0; i != priv->rxqs_n; ++i) {
1171 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1175 rxq->mprq_mp = NULL;
1181 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1182 * mempool. If already allocated, reuse it if there're enough elements.
1183 * Otherwise, resize it.
1186 * Pointer to Ethernet device.
1189 * 0 on success, negative errno value on failure.
1192 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1194 struct priv *priv = dev->data->dev_private;
1195 struct rte_mempool *mp = priv->mprq_mp;
1196 char name[RTE_MEMPOOL_NAMESIZE];
1197 unsigned int desc = 0;
1198 unsigned int buf_len;
1199 unsigned int obj_num;
1200 unsigned int obj_size;
1201 unsigned int strd_num_n = 0;
1202 unsigned int strd_sz_n = 0;
1205 if (!mlx5_mprq_enabled(dev))
1207 /* Count the total number of descriptors configured. */
1208 for (i = 0; i != priv->rxqs_n; ++i) {
1209 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1213 desc += 1 << rxq->elts_n;
1214 /* Get the max number of strides. */
1215 if (strd_num_n < rxq->strd_num_n)
1216 strd_num_n = rxq->strd_num_n;
1217 /* Get the max size of a stride. */
1218 if (strd_sz_n < rxq->strd_sz_n)
1219 strd_sz_n = rxq->strd_sz_n;
1221 assert(strd_num_n && strd_sz_n);
1222 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1223 obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
1225 * Received packets can be either memcpy'd or externally referenced. In
1226 * case that the packet is attached to an mbuf as an external buffer, as
1227 * it isn't possible to predict how the buffers will be queued by
1228 * application, there's no option to exactly pre-allocate needed buffers
1229 * in advance but to speculatively prepares enough buffers.
1231 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1232 * received packets to buffers provided by application (rxq->mp) until
1233 * this Mempool gets available again.
1236 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1238 * rte_mempool_create_empty() has sanity check to refuse large cache
1239 * size compared to the number of elements.
1240 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1241 * constant number 2 instead.
1243 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1244 /* Check a mempool is already allocated and if it can be resued. */
1245 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1246 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1247 dev->data->port_id, mp->name);
1250 } else if (mp != NULL) {
1251 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1252 dev->data->port_id, mp->name);
1254 * If failed to free, which means it may be still in use, no way
1255 * but to keep using the existing one. On buffer underrun,
1256 * packets will be memcpy'd instead of external buffer
1259 if (mlx5_mprq_free_mp(dev)) {
1260 if (mp->elt_size >= obj_size)
1266 snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
1267 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1268 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
1269 dev->device->numa_node, 0);
1272 "port %u failed to allocate a mempool for"
1273 " Multi-Packet RQ, count=%u, size=%u",
1274 dev->data->port_id, obj_num, obj_size);
1280 /* Set mempool for each Rx queue. */
1281 for (i = 0; i != priv->rxqs_n; ++i) {
1282 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1288 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1289 dev->data->port_id);
1294 * Create a DPDK Rx queue.
1297 * Pointer to Ethernet device.
1301 * Number of descriptors to configure in queue.
1303 * NUMA socket on which memory must be allocated.
1306 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1308 struct mlx5_rxq_ctrl *
1309 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1310 unsigned int socket, const struct rte_eth_rxconf *conf,
1311 struct rte_mempool *mp)
1313 struct priv *priv = dev->data->dev_private;
1314 struct mlx5_rxq_ctrl *tmpl;
1315 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1316 unsigned int mprq_stride_size;
1317 struct mlx5_dev_config *config = &priv->config;
1319 * Always allocate extra slots, even if eventually
1320 * the vector Rx will not be used.
1323 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1324 uint64_t offloads = conf->offloads |
1325 dev->data->dev_conf.rxmode.offloads;
1326 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1328 tmpl = rte_calloc_socket("RXQ", 1,
1330 desc_n * sizeof(struct rte_mbuf *),
1336 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1337 MLX5_MR_BTREE_CACHE_N, socket)) {
1338 /* rte_errno is already set. */
1341 tmpl->socket = socket;
1342 if (dev->data->dev_conf.intr_conf.rxq)
1345 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1346 * following conditions are met:
1347 * - MPRQ is enabled.
1348 * - The number of descs is more than the number of strides.
1349 * - max_rx_pkt_len plus overhead is less than the max size of a
1351 * Otherwise, enable Rx scatter if necessary.
1353 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1355 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1356 sizeof(struct rte_mbuf_ext_shared_info) +
1357 RTE_PKTMBUF_HEADROOM;
1359 desc >= (1U << config->mprq.stride_num_n) &&
1360 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1361 /* TODO: Rx scatter isn't supported yet. */
1362 tmpl->rxq.sges_n = 0;
1363 /* Trim the number of descs needed. */
1364 desc >>= config->mprq.stride_num_n;
1365 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1366 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1367 config->mprq.min_stride_size_n);
1368 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1369 tmpl->rxq.mprq_max_memcpy_len =
1370 RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
1371 config->mprq.max_memcpy_len);
1373 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1374 " strd_num_n = %u, strd_sz_n = %u",
1375 dev->data->port_id, idx,
1376 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1377 } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
1378 (mb_len - RTE_PKTMBUF_HEADROOM)) {
1379 tmpl->rxq.sges_n = 0;
1380 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1382 RTE_PKTMBUF_HEADROOM +
1383 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1384 unsigned int sges_n;
1387 * Determine the number of SGEs needed for a full packet
1388 * and round it to the next power of two.
1390 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1391 tmpl->rxq.sges_n = sges_n;
1392 /* Make sure rxq.sges_n did not overflow. */
1393 size = mb_len * (1 << tmpl->rxq.sges_n);
1394 size -= RTE_PKTMBUF_HEADROOM;
1395 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1397 "port %u too many SGEs (%u) needed to handle"
1398 " requested maximum packet size %u",
1401 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1402 rte_errno = EOVERFLOW;
1407 "port %u the requested maximum Rx packet size (%u) is"
1408 " larger than a single mbuf (%u) and scattered mode has"
1409 " not been requested",
1411 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1412 mb_len - RTE_PKTMBUF_HEADROOM);
1414 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1415 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1416 if (desc % (1 << tmpl->rxq.sges_n)) {
1418 "port %u number of Rx queue descriptors (%u) is not a"
1419 " multiple of SGEs per packet (%u)",
1422 1 << tmpl->rxq.sges_n);
1426 /* Toggle RX checksum offload if hardware supports it. */
1427 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1428 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1429 /* Configure VLAN stripping. */
1430 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1431 /* By default, FCS (CRC) is stripped by hardware. */
1432 tmpl->rxq.crc_present = 0;
1433 if (rte_eth_dev_must_keep_crc(offloads)) {
1434 if (config->hw_fcs_strip) {
1435 tmpl->rxq.crc_present = 1;
1438 "port %u CRC stripping has been disabled but will"
1439 " still be performed by hardware, make sure MLNX_OFED"
1440 " and firmware are up to date",
1441 dev->data->port_id);
1445 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1446 " incoming frames to hide it",
1448 tmpl->rxq.crc_present ? "disabled" : "enabled",
1449 tmpl->rxq.crc_present << 2);
1451 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1452 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1453 tmpl->rxq.port_id = dev->data->port_id;
1456 tmpl->rxq.stats.idx = idx;
1457 tmpl->rxq.elts_n = log2above(desc);
1459 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1461 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1464 rte_atomic32_inc(&tmpl->refcnt);
1465 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1476 * Pointer to Ethernet device.
1481 * A pointer to the queue if it exists, NULL otherwise.
1483 struct mlx5_rxq_ctrl *
1484 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1486 struct priv *priv = dev->data->dev_private;
1487 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1489 if ((*priv->rxqs)[idx]) {
1490 rxq_ctrl = container_of((*priv->rxqs)[idx],
1491 struct mlx5_rxq_ctrl,
1493 mlx5_rxq_ibv_get(dev, idx);
1494 rte_atomic32_inc(&rxq_ctrl->refcnt);
1500 * Release a Rx queue.
1503 * Pointer to Ethernet device.
1508 * 1 while a reference on it exists, 0 when freed.
1511 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1513 struct priv *priv = dev->data->dev_private;
1514 struct mlx5_rxq_ctrl *rxq_ctrl;
1516 if (!(*priv->rxqs)[idx])
1518 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1519 assert(rxq_ctrl->priv);
1520 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1521 rxq_ctrl->ibv = NULL;
1522 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1523 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1524 LIST_REMOVE(rxq_ctrl, next);
1526 (*priv->rxqs)[idx] = NULL;
1533 * Verify if the queue can be released.
1536 * Pointer to Ethernet device.
1541 * 1 if the queue can be released, negative errno otherwise and rte_errno is
1545 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1547 struct priv *priv = dev->data->dev_private;
1548 struct mlx5_rxq_ctrl *rxq_ctrl;
1550 if (!(*priv->rxqs)[idx]) {
1554 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1555 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1559 * Verify the Rx Queue list is empty
1562 * Pointer to Ethernet device.
1565 * The number of object not released.
1568 mlx5_rxq_verify(struct rte_eth_dev *dev)
1570 struct priv *priv = dev->data->dev_private;
1571 struct mlx5_rxq_ctrl *rxq_ctrl;
1574 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1575 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1576 dev->data->port_id, rxq_ctrl->idx);
1583 * Create an indirection table.
1586 * Pointer to Ethernet device.
1588 * Queues entering in the indirection table.
1590 * Number of queues in the array.
1593 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1595 struct mlx5_ind_table_ibv *
1596 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
1599 struct priv *priv = dev->data->dev_private;
1600 struct mlx5_ind_table_ibv *ind_tbl;
1601 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1602 log2above(queues_n) :
1603 log2above(priv->config.ind_table_max_size);
1604 struct ibv_wq *wq[1 << wq_n];
1608 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1609 queues_n * sizeof(uint16_t), 0);
1614 for (i = 0; i != queues_n; ++i) {
1615 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1619 wq[i] = rxq->ibv->wq;
1620 ind_tbl->queues[i] = queues[i];
1622 ind_tbl->queues_n = queues_n;
1623 /* Finalise indirection table. */
1624 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1626 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1628 &(struct ibv_rwq_ind_table_init_attr){
1629 .log_ind_tbl_size = wq_n,
1633 if (!ind_tbl->ind_table) {
1637 rte_atomic32_inc(&ind_tbl->refcnt);
1638 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1642 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1647 * Get an indirection table.
1650 * Pointer to Ethernet device.
1652 * Queues entering in the indirection table.
1654 * Number of queues in the array.
1657 * An indirection table if found.
1659 struct mlx5_ind_table_ibv *
1660 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
1663 struct priv *priv = dev->data->dev_private;
1664 struct mlx5_ind_table_ibv *ind_tbl;
1666 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1667 if ((ind_tbl->queues_n == queues_n) &&
1668 (memcmp(ind_tbl->queues, queues,
1669 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1676 rte_atomic32_inc(&ind_tbl->refcnt);
1677 for (i = 0; i != ind_tbl->queues_n; ++i)
1678 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1684 * Release an indirection table.
1687 * Pointer to Ethernet device.
1689 * Indirection table to release.
1692 * 1 while a reference on it exists, 0 when freed.
1695 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1696 struct mlx5_ind_table_ibv *ind_tbl)
1700 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1701 claim_zero(mlx5_glue->destroy_rwq_ind_table
1702 (ind_tbl->ind_table));
1703 for (i = 0; i != ind_tbl->queues_n; ++i)
1704 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1705 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1706 LIST_REMOVE(ind_tbl, next);
1714 * Verify the Rx Queue list is empty
1717 * Pointer to Ethernet device.
1720 * The number of object not released.
1723 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1725 struct priv *priv = dev->data->dev_private;
1726 struct mlx5_ind_table_ibv *ind_tbl;
1729 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1731 "port %u Verbs indirection table %p still referenced",
1732 dev->data->port_id, (void *)ind_tbl);
1739 * Create an Rx Hash queue.
1742 * Pointer to Ethernet device.
1744 * RSS key for the Rx hash queue.
1745 * @param rss_key_len
1747 * @param hash_fields
1748 * Verbs protocol hash field to make the RSS on.
1750 * Queues entering in hash queue. In case of empty hash_fields only the
1751 * first queue index will be taken for the indirection table.
1756 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1759 mlx5_hrxq_new(struct rte_eth_dev *dev,
1760 const uint8_t *rss_key, uint32_t rss_key_len,
1761 uint64_t hash_fields,
1762 const uint16_t *queues, uint32_t queues_n,
1763 int tunnel __rte_unused)
1765 struct priv *priv = dev->data->dev_private;
1766 struct mlx5_hrxq *hrxq;
1767 struct mlx5_ind_table_ibv *ind_tbl;
1771 queues_n = hash_fields ? queues_n : 1;
1772 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1774 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1780 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
1781 rss_key = rss_hash_default_key;
1783 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1784 qp = mlx5_glue->dv_create_qp
1786 &(struct ibv_qp_init_attr_ex){
1787 .qp_type = IBV_QPT_RAW_PACKET,
1789 IBV_QP_INIT_ATTR_PD |
1790 IBV_QP_INIT_ATTR_IND_TABLE |
1791 IBV_QP_INIT_ATTR_RX_HASH,
1792 .rx_hash_conf = (struct ibv_rx_hash_conf){
1793 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1794 .rx_hash_key_len = rss_key_len ? rss_key_len :
1795 MLX5_RSS_HASH_KEY_LEN,
1796 .rx_hash_key = rss_key ?
1797 (void *)(uintptr_t)rss_key :
1798 rss_hash_default_key,
1799 .rx_hash_fields_mask = hash_fields,
1801 .rwq_ind_tbl = ind_tbl->ind_table,
1804 &(struct mlx5dv_qp_init_attr){
1805 .comp_mask = tunnel ?
1806 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0,
1807 .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS,
1810 qp = mlx5_glue->create_qp_ex
1812 &(struct ibv_qp_init_attr_ex){
1813 .qp_type = IBV_QPT_RAW_PACKET,
1815 IBV_QP_INIT_ATTR_PD |
1816 IBV_QP_INIT_ATTR_IND_TABLE |
1817 IBV_QP_INIT_ATTR_RX_HASH,
1818 .rx_hash_conf = (struct ibv_rx_hash_conf){
1819 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1820 .rx_hash_key_len = rss_key_len ? rss_key_len :
1821 MLX5_RSS_HASH_KEY_LEN,
1822 .rx_hash_key = rss_key ?
1823 (void *)(uintptr_t)rss_key :
1824 rss_hash_default_key,
1825 .rx_hash_fields_mask = hash_fields,
1827 .rwq_ind_tbl = ind_tbl->ind_table,
1835 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1838 hrxq->ind_table = ind_tbl;
1840 hrxq->rss_key_len = rss_key_len;
1841 hrxq->hash_fields = hash_fields;
1842 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1843 rte_atomic32_inc(&hrxq->refcnt);
1844 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1847 err = rte_errno; /* Save rte_errno before cleanup. */
1848 mlx5_ind_table_ibv_release(dev, ind_tbl);
1850 claim_zero(mlx5_glue->destroy_qp(qp));
1851 rte_errno = err; /* Restore rte_errno. */
1856 * Get an Rx Hash queue.
1859 * Pointer to Ethernet device.
1861 * RSS configuration for the Rx hash queue.
1863 * Queues entering in hash queue. In case of empty hash_fields only the
1864 * first queue index will be taken for the indirection table.
1869 * An hash Rx queue on success.
1872 mlx5_hrxq_get(struct rte_eth_dev *dev,
1873 const uint8_t *rss_key, uint32_t rss_key_len,
1874 uint64_t hash_fields,
1875 const uint16_t *queues, uint32_t queues_n)
1877 struct priv *priv = dev->data->dev_private;
1878 struct mlx5_hrxq *hrxq;
1880 queues_n = hash_fields ? queues_n : 1;
1881 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1882 struct mlx5_ind_table_ibv *ind_tbl;
1884 if (hrxq->rss_key_len != rss_key_len)
1886 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1888 if (hrxq->hash_fields != hash_fields)
1890 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1893 if (ind_tbl != hrxq->ind_table) {
1894 mlx5_ind_table_ibv_release(dev, ind_tbl);
1897 rte_atomic32_inc(&hrxq->refcnt);
1904 * Release the hash Rx queue.
1907 * Pointer to Ethernet device.
1909 * Pointer to Hash Rx queue to release.
1912 * 1 while a reference on it exists, 0 when freed.
1915 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1917 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1918 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1919 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1920 LIST_REMOVE(hrxq, next);
1924 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1929 * Verify the Rx Queue list is empty
1932 * Pointer to Ethernet device.
1935 * The number of object not released.
1938 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1940 struct priv *priv = dev->data->dev_private;
1941 struct mlx5_hrxq *hrxq;
1944 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1946 "port %u Verbs hash Rx queue %p still referenced",
1947 dev->data->port_id, (void *)hrxq);
1954 * Create a drop Rx queue Verbs object.
1957 * Pointer to Ethernet device.
1960 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1962 struct mlx5_rxq_ibv *
1963 mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
1965 struct priv *priv = dev->data->dev_private;
1967 struct ibv_wq *wq = NULL;
1968 struct mlx5_rxq_ibv *rxq;
1970 if (priv->drop_queue.rxq)
1971 return priv->drop_queue.rxq;
1972 cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
1974 DEBUG("port %u cannot allocate CQ for drop queue",
1975 dev->data->port_id);
1979 wq = mlx5_glue->create_wq(priv->ctx,
1980 &(struct ibv_wq_init_attr){
1981 .wq_type = IBV_WQT_RQ,
1988 DEBUG("port %u cannot allocate WQ for drop queue",
1989 dev->data->port_id);
1993 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
1995 DEBUG("port %u cannot allocate drop Rx queue memory",
1996 dev->data->port_id);
2002 priv->drop_queue.rxq = rxq;
2006 claim_zero(mlx5_glue->destroy_wq(wq));
2008 claim_zero(mlx5_glue->destroy_cq(cq));
2013 * Release a drop Rx queue Verbs object.
2016 * Pointer to Ethernet device.
2019 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2022 mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
2024 struct priv *priv = dev->data->dev_private;
2025 struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
2028 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2030 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2032 priv->drop_queue.rxq = NULL;
2036 * Create a drop indirection table.
2039 * Pointer to Ethernet device.
2042 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2044 struct mlx5_ind_table_ibv *
2045 mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
2047 struct priv *priv = dev->data->dev_private;
2048 struct mlx5_ind_table_ibv *ind_tbl;
2049 struct mlx5_rxq_ibv *rxq;
2050 struct mlx5_ind_table_ibv tmpl;
2052 rxq = mlx5_rxq_ibv_drop_new(dev);
2055 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2057 &(struct ibv_rwq_ind_table_init_attr){
2058 .log_ind_tbl_size = 0,
2059 .ind_tbl = &rxq->wq,
2062 if (!tmpl.ind_table) {
2063 DEBUG("port %u cannot allocate indirection table for drop"
2065 dev->data->port_id);
2069 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2074 ind_tbl->ind_table = tmpl.ind_table;
2077 mlx5_rxq_ibv_drop_release(dev);
2082 * Release a drop indirection table.
2085 * Pointer to Ethernet device.
2088 mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
2090 struct priv *priv = dev->data->dev_private;
2091 struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
2093 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2094 mlx5_rxq_ibv_drop_release(dev);
2096 priv->drop_queue.hrxq->ind_table = NULL;
2100 * Create a drop Rx Hash queue.
2103 * Pointer to Ethernet device.
2106 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2109 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2111 struct priv *priv = dev->data->dev_private;
2112 struct mlx5_ind_table_ibv *ind_tbl;
2114 struct mlx5_hrxq *hrxq;
2116 if (priv->drop_queue.hrxq) {
2117 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2118 return priv->drop_queue.hrxq;
2120 ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
2123 qp = mlx5_glue->create_qp_ex(priv->ctx,
2124 &(struct ibv_qp_init_attr_ex){
2125 .qp_type = IBV_QPT_RAW_PACKET,
2127 IBV_QP_INIT_ATTR_PD |
2128 IBV_QP_INIT_ATTR_IND_TABLE |
2129 IBV_QP_INIT_ATTR_RX_HASH,
2130 .rx_hash_conf = (struct ibv_rx_hash_conf){
2132 IBV_RX_HASH_FUNC_TOEPLITZ,
2133 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2134 .rx_hash_key = rss_hash_default_key,
2135 .rx_hash_fields_mask = 0,
2137 .rwq_ind_tbl = ind_tbl->ind_table,
2141 DEBUG("port %u cannot allocate QP for drop queue",
2142 dev->data->port_id);
2146 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2149 "port %u cannot allocate memory for drop queue",
2150 dev->data->port_id);
2154 hrxq->ind_table = ind_tbl;
2156 priv->drop_queue.hrxq = hrxq;
2157 rte_atomic32_set(&hrxq->refcnt, 1);
2161 mlx5_ind_table_ibv_drop_release(dev);
2166 * Release a drop hash Rx queue.
2169 * Pointer to Ethernet device.
2172 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2174 struct priv *priv = dev->data->dev_private;
2175 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2177 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2178 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2179 mlx5_ind_table_ibv_drop_release(dev);
2181 priv->drop_queue.hrxq = NULL;