1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
58 * Check whether Multi-Packet RQ can be enabled for the device.
61 * Pointer to Ethernet device.
64 * 1 if supported, negative errno value if not.
67 mlx5_check_mprq_support(struct rte_eth_dev *dev)
69 struct priv *priv = dev->data->dev_private;
71 if (priv->config.mprq.enabled &&
72 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
78 * Check whether Multi-Packet RQ is enabled for the Rx queue.
81 * Pointer to receive queue structure.
84 * 0 if disabled, otherwise enabled.
87 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
89 return rxq->strd_num_n > 0;
93 * Check whether Multi-Packet RQ is enabled for the device.
96 * Pointer to Ethernet device.
99 * 0 if disabled, otherwise enabled.
102 mlx5_mprq_enabled(struct rte_eth_dev *dev)
104 struct priv *priv = dev->data->dev_private;
108 if (mlx5_check_mprq_support(dev) < 0)
110 /* All the configured queues should be enabled. */
111 for (i = 0; i < priv->rxqs_n; ++i) {
112 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
116 if (mlx5_rxq_mprq_enabled(rxq))
119 /* Multi-Packet RQ can't be partially configured. */
120 assert(n == 0 || n == priv->rxqs_n);
121 return n == priv->rxqs_n;
125 * Allocate RX queue elements for Multi-Packet RQ.
128 * Pointer to RX queue structure.
131 * 0 on success, a negative errno value otherwise and rte_errno is set.
134 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
136 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
137 unsigned int wqe_n = 1 << rxq->elts_n;
141 /* Iterate on segments. */
142 for (i = 0; i <= wqe_n; ++i) {
143 struct mlx5_mprq_buf *buf;
145 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
146 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
151 (*rxq->mprq_bufs)[i] = buf;
153 rxq->mprq_repl = buf;
156 "port %u Rx queue %u allocated and configured %u segments",
157 rxq->port_id, rxq_ctrl->idx, wqe_n);
160 err = rte_errno; /* Save rte_errno before cleanup. */
162 for (i = 0; (i != wqe_n); ++i) {
163 if ((*rxq->mprq_bufs)[i] != NULL)
164 rte_mempool_put(rxq->mprq_mp,
165 (*rxq->mprq_bufs)[i]);
166 (*rxq->mprq_bufs)[i] = NULL;
168 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
169 rxq->port_id, rxq_ctrl->idx);
170 rte_errno = err; /* Restore rte_errno. */
175 * Allocate RX queue elements for Single-Packet RQ.
178 * Pointer to RX queue structure.
181 * 0 on success, errno value on failure.
184 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
186 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
187 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
191 /* Iterate on segments. */
192 for (i = 0; (i != elts_n); ++i) {
193 struct rte_mbuf *buf;
195 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
197 DRV_LOG(ERR, "port %u empty mbuf pool",
198 PORT_ID(rxq_ctrl->priv));
202 /* Headroom is reserved by rte_pktmbuf_alloc(). */
203 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
204 /* Buffer is supposed to be empty. */
205 assert(rte_pktmbuf_data_len(buf) == 0);
206 assert(rte_pktmbuf_pkt_len(buf) == 0);
208 /* Only the first segment keeps headroom. */
210 SET_DATA_OFF(buf, 0);
211 PORT(buf) = rxq_ctrl->rxq.port_id;
212 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
213 PKT_LEN(buf) = DATA_LEN(buf);
215 (*rxq_ctrl->rxq.elts)[i] = buf;
217 /* If Rx vector is activated. */
218 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
219 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
220 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
223 /* Initialize default rearm_data for vPMD. */
224 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
225 rte_mbuf_refcnt_set(mbuf_init, 1);
226 mbuf_init->nb_segs = 1;
227 mbuf_init->port = rxq->port_id;
229 * prevent compiler reordering:
230 * rearm_data covers previous fields.
232 rte_compiler_barrier();
233 rxq->mbuf_initializer =
234 *(uint64_t *)&mbuf_init->rearm_data;
235 /* Padding with a fake mbuf for vectorized Rx. */
236 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
237 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
240 "port %u Rx queue %u allocated and configured %u segments"
242 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
243 elts_n / (1 << rxq_ctrl->rxq.sges_n));
246 err = rte_errno; /* Save rte_errno before cleanup. */
248 for (i = 0; (i != elts_n); ++i) {
249 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
250 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
251 (*rxq_ctrl->rxq.elts)[i] = NULL;
253 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
254 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
255 rte_errno = err; /* Restore rte_errno. */
260 * Allocate RX queue elements.
263 * Pointer to RX queue structure.
266 * 0 on success, errno value on failure.
269 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
271 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
272 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
276 * Free RX queue elements for Multi-Packet RQ.
279 * Pointer to RX queue structure.
282 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
284 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
287 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
288 rxq->port_id, rxq_ctrl->idx);
289 if (rxq->mprq_bufs == NULL)
291 assert(mlx5_rxq_check_vec_support(rxq) < 0);
292 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
293 if ((*rxq->mprq_bufs)[i] != NULL)
294 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
295 (*rxq->mprq_bufs)[i] = NULL;
297 if (rxq->mprq_repl != NULL) {
298 mlx5_mprq_buf_free(rxq->mprq_repl);
299 rxq->mprq_repl = NULL;
304 * Free RX queue elements for Single-Packet RQ.
307 * Pointer to RX queue structure.
310 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
312 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
313 const uint16_t q_n = (1 << rxq->elts_n);
314 const uint16_t q_mask = q_n - 1;
315 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
318 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
319 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
320 if (rxq->elts == NULL)
323 * Some mbuf in the Ring belongs to the application. They cannot be
326 if (mlx5_rxq_check_vec_support(rxq) > 0) {
327 for (i = 0; i < used; ++i)
328 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
329 rxq->rq_pi = rxq->rq_ci;
331 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
332 if ((*rxq->elts)[i] != NULL)
333 rte_pktmbuf_free_seg((*rxq->elts)[i]);
334 (*rxq->elts)[i] = NULL;
339 * Free RX queue elements.
342 * Pointer to RX queue structure.
345 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
347 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
348 rxq_free_elts_mprq(rxq_ctrl);
350 rxq_free_elts_sprq(rxq_ctrl);
354 * Clean up a RX queue.
356 * Destroy objects, free allocated memory and reset the structure for reuse.
359 * Pointer to RX queue structure.
362 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
364 DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
365 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
367 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
368 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
372 * Returns the per-queue supported offloads.
375 * Pointer to Ethernet device.
378 * Supported Rx offloads.
381 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
383 struct priv *priv = dev->data->dev_private;
384 struct mlx5_dev_config *config = &priv->config;
385 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
386 DEV_RX_OFFLOAD_TIMESTAMP |
387 DEV_RX_OFFLOAD_JUMBO_FRAME);
389 if (config->hw_fcs_strip)
390 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
392 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
393 DEV_RX_OFFLOAD_UDP_CKSUM |
394 DEV_RX_OFFLOAD_TCP_CKSUM);
395 if (config->hw_vlan_strip)
396 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
402 * Returns the per-port supported offloads.
405 * Supported Rx offloads.
408 mlx5_get_rx_port_offloads(void)
410 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
418 * Pointer to Ethernet device structure.
422 * Number of descriptors to configure in queue.
424 * NUMA socket on which memory must be allocated.
426 * Thresholds parameters.
428 * Memory pool for buffer allocations.
431 * 0 on success, a negative errno value otherwise and rte_errno is set.
434 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
435 unsigned int socket, const struct rte_eth_rxconf *conf,
436 struct rte_mempool *mp)
438 struct priv *priv = dev->data->dev_private;
439 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
440 struct mlx5_rxq_ctrl *rxq_ctrl =
441 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
443 if (!rte_is_power_of_2(desc)) {
444 desc = 1 << log2above(desc);
446 "port %u increased number of descriptors in Rx queue %u"
447 " to the next power of two (%d)",
448 dev->data->port_id, idx, desc);
450 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
451 dev->data->port_id, idx, desc);
452 if (idx >= priv->rxqs_n) {
453 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
454 dev->data->port_id, idx, priv->rxqs_n);
455 rte_errno = EOVERFLOW;
458 if (!mlx5_rxq_releasable(dev, idx)) {
459 DRV_LOG(ERR, "port %u unable to release queue index %u",
460 dev->data->port_id, idx);
464 mlx5_rxq_release(dev, idx);
465 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
467 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
468 dev->data->port_id, idx);
472 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
473 dev->data->port_id, idx);
474 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
479 * DPDK callback to release a RX queue.
482 * Generic RX queue pointer.
485 mlx5_rx_queue_release(void *dpdk_rxq)
487 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
488 struct mlx5_rxq_ctrl *rxq_ctrl;
493 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
494 priv = rxq_ctrl->priv;
495 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
496 rte_panic("port %u Rx queue %u is still used by a flow and"
497 " cannot be removed\n",
498 PORT_ID(priv), rxq_ctrl->idx);
499 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
503 * Allocate queue vector and fill epoll fd list for Rx interrupts.
506 * Pointer to Ethernet device.
509 * 0 on success, a negative errno value otherwise and rte_errno is set.
512 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
514 struct priv *priv = dev->data->dev_private;
516 unsigned int rxqs_n = priv->rxqs_n;
517 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
518 unsigned int count = 0;
519 struct rte_intr_handle *intr_handle = dev->intr_handle;
521 if (!dev->data->dev_conf.intr_conf.rxq)
523 mlx5_rx_intr_vec_disable(dev);
524 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
525 if (intr_handle->intr_vec == NULL) {
527 "port %u failed to allocate memory for interrupt"
528 " vector, Rx interrupts will not be supported",
533 intr_handle->type = RTE_INTR_HANDLE_EXT;
534 for (i = 0; i != n; ++i) {
535 /* This rxq ibv must not be released in this function. */
536 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
541 /* Skip queues that cannot request interrupts. */
542 if (!rxq_ibv || !rxq_ibv->channel) {
543 /* Use invalid intr_vec[] index to disable entry. */
544 intr_handle->intr_vec[i] =
545 RTE_INTR_VEC_RXTX_OFFSET +
546 RTE_MAX_RXTX_INTR_VEC_ID;
549 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
551 "port %u too many Rx queues for interrupt"
552 " vector size (%d), Rx interrupts cannot be"
554 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
555 mlx5_rx_intr_vec_disable(dev);
559 fd = rxq_ibv->channel->fd;
560 flags = fcntl(fd, F_GETFL);
561 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
565 "port %u failed to make Rx interrupt file"
566 " descriptor %d non-blocking for queue index"
568 dev->data->port_id, fd, i);
569 mlx5_rx_intr_vec_disable(dev);
572 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
573 intr_handle->efds[count] = fd;
577 mlx5_rx_intr_vec_disable(dev);
579 intr_handle->nb_efd = count;
584 * Clean up Rx interrupts handler.
587 * Pointer to Ethernet device.
590 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
592 struct priv *priv = dev->data->dev_private;
593 struct rte_intr_handle *intr_handle = dev->intr_handle;
595 unsigned int rxqs_n = priv->rxqs_n;
596 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
598 if (!dev->data->dev_conf.intr_conf.rxq)
600 if (!intr_handle->intr_vec)
602 for (i = 0; i != n; ++i) {
603 struct mlx5_rxq_ctrl *rxq_ctrl;
604 struct mlx5_rxq_data *rxq_data;
606 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
607 RTE_MAX_RXTX_INTR_VEC_ID)
610 * Need to access directly the queue to release the reference
611 * kept in priv_rx_intr_vec_enable().
613 rxq_data = (*priv->rxqs)[i];
614 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
615 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
618 rte_intr_free_epoll_fd(intr_handle);
619 if (intr_handle->intr_vec)
620 free(intr_handle->intr_vec);
621 intr_handle->nb_efd = 0;
622 intr_handle->intr_vec = NULL;
626 * MLX5 CQ notification .
629 * Pointer to receive queue structure.
631 * Sequence number per receive queue .
634 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
637 uint32_t doorbell_hi;
639 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
641 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
642 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
643 doorbell = (uint64_t)doorbell_hi << 32;
644 doorbell |= rxq->cqn;
645 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
646 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
650 * DPDK callback for Rx queue interrupt enable.
653 * Pointer to Ethernet device structure.
658 * 0 on success, a negative errno value otherwise and rte_errno is set.
661 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
663 struct priv *priv = dev->data->dev_private;
664 struct mlx5_rxq_data *rxq_data;
665 struct mlx5_rxq_ctrl *rxq_ctrl;
667 rxq_data = (*priv->rxqs)[rx_queue_id];
672 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
674 struct mlx5_rxq_ibv *rxq_ibv;
676 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
681 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
682 mlx5_rxq_ibv_release(rxq_ibv);
688 * DPDK callback for Rx queue interrupt disable.
691 * Pointer to Ethernet device structure.
696 * 0 on success, a negative errno value otherwise and rte_errno is set.
699 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
701 struct priv *priv = dev->data->dev_private;
702 struct mlx5_rxq_data *rxq_data;
703 struct mlx5_rxq_ctrl *rxq_ctrl;
704 struct mlx5_rxq_ibv *rxq_ibv = NULL;
705 struct ibv_cq *ev_cq;
709 rxq_data = (*priv->rxqs)[rx_queue_id];
714 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
717 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
722 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
723 if (ret || ev_cq != rxq_ibv->cq) {
727 rxq_data->cq_arm_sn++;
728 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
731 ret = rte_errno; /* Save rte_errno before cleanup. */
733 mlx5_rxq_ibv_release(rxq_ibv);
734 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
735 dev->data->port_id, rx_queue_id);
736 rte_errno = ret; /* Restore rte_errno. */
741 * Create the Rx queue Verbs object.
744 * Pointer to Ethernet device.
746 * Queue index in DPDK Rx queue array
749 * The Verbs object initialised, NULL otherwise and rte_errno is set.
751 struct mlx5_rxq_ibv *
752 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
754 struct priv *priv = dev->data->dev_private;
755 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
756 struct mlx5_rxq_ctrl *rxq_ctrl =
757 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
758 struct ibv_wq_attr mod;
761 struct ibv_cq_init_attr_ex ibv;
762 struct mlx5dv_cq_init_attr mlx5;
765 struct ibv_wq_init_attr ibv;
766 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
767 struct mlx5dv_wq_init_attr mlx5;
770 struct ibv_cq_ex cq_attr;
773 unsigned int wqe_n = 1 << rxq_data->elts_n;
774 struct mlx5_rxq_ibv *tmpl;
775 struct mlx5dv_cq cq_info;
776 struct mlx5dv_rwq rwq;
779 struct mlx5dv_obj obj;
780 struct mlx5_dev_config *config = &priv->config;
781 const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
784 assert(!rxq_ctrl->ibv);
785 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
786 priv->verbs_alloc_ctx.obj = rxq_ctrl;
787 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
791 "port %u Rx queue %u cannot allocate verbs resources",
792 dev->data->port_id, rxq_ctrl->idx);
796 tmpl->rxq_ctrl = rxq_ctrl;
798 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
799 if (!tmpl->channel) {
800 DRV_LOG(ERR, "port %u: comp channel creation failure",
807 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
810 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
812 .channel = tmpl->channel,
815 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
818 if (config->cqe_comp && !rxq_data->hw_timestamp) {
819 attr.cq.mlx5.comp_mask |=
820 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
821 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
822 attr.cq.mlx5.cqe_comp_res_format =
823 mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
824 MLX5DV_CQE_RES_FORMAT_HASH;
826 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
829 * For vectorized Rx, it must not be doubled in order to
830 * make cq_ci and rq_ci aligned.
832 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
833 attr.cq.ibv.cqe *= 2;
834 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
836 "port %u Rx CQE compression is disabled for HW"
840 tmpl->cq = mlx5_glue->cq_ex_to_cq
841 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
843 if (tmpl->cq == NULL) {
844 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
845 dev->data->port_id, idx);
849 DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
850 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
851 DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
852 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
853 attr.wq.ibv = (struct ibv_wq_init_attr){
854 .wq_context = NULL, /* Could be useful in the future. */
855 .wq_type = IBV_WQT_RQ,
856 /* Max number of outstanding WRs. */
857 .max_wr = wqe_n >> rxq_data->sges_n,
858 /* Max number of scatter/gather elements in a WR. */
859 .max_sge = 1 << rxq_data->sges_n,
863 IBV_WQ_FLAGS_CVLAN_STRIPPING |
865 .create_flags = (rxq_data->vlan_strip ?
866 IBV_WQ_FLAGS_CVLAN_STRIPPING :
869 /* By default, FCS (CRC) is stripped by hardware. */
870 if (rxq_data->crc_present) {
871 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
872 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
874 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
875 if (config->hw_padding) {
876 attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
877 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
880 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
881 attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
885 struct mlx5dv_striding_rq_init_attr *mprq_attr =
886 &attr.wq.mlx5.striding_rq_attrs;
888 attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
889 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
890 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
891 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
892 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
895 tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
898 tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
900 if (tmpl->wq == NULL) {
901 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
902 dev->data->port_id, idx);
907 * Make sure number of WRs*SGEs match expectations since a queue
908 * cannot allocate more than "desc" buffers.
910 if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
911 attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
913 "port %u Rx queue %u requested %u*%u but got %u*%u"
915 dev->data->port_id, idx,
916 wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
917 attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
921 /* Change queue state to ready. */
922 mod = (struct ibv_wq_attr){
923 .attr_mask = IBV_WQ_ATTR_STATE,
924 .wq_state = IBV_WQS_RDY,
926 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
929 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
930 dev->data->port_id, idx);
934 obj.cq.in = tmpl->cq;
935 obj.cq.out = &cq_info;
936 obj.rwq.in = tmpl->wq;
938 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
943 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
945 "port %u wrong MLX5_CQE_SIZE environment variable"
946 " value: it should be set to %u",
947 dev->data->port_id, RTE_CACHE_LINE_SIZE);
951 /* Fill the rings. */
952 rxq_data->wqes = rwq.buf;
953 for (i = 0; (i != wqe_n); ++i) {
954 volatile struct mlx5_wqe_data_seg *scat;
959 struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
961 scat = &((volatile struct mlx5_wqe_mprq *)
962 rxq_data->wqes)[i].dseg;
963 addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
964 byte_count = (1 << rxq_data->strd_sz_n) *
965 (1 << rxq_data->strd_num_n);
967 struct rte_mbuf *buf = (*rxq_data->elts)[i];
969 scat = &((volatile struct mlx5_wqe_data_seg *)
971 addr = rte_pktmbuf_mtod(buf, uintptr_t);
972 byte_count = DATA_LEN(buf);
974 /* scat->addr must be able to store a pointer. */
975 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
976 *scat = (struct mlx5_wqe_data_seg){
977 .addr = rte_cpu_to_be_64(addr),
978 .byte_count = rte_cpu_to_be_32(byte_count),
979 .lkey = mlx5_rx_addr2mr(rxq_data, addr),
982 rxq_data->rq_db = rwq.dbrec;
983 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
985 rxq_data->consumed_strd = 0;
987 rxq_data->zip = (struct rxq_zip){
990 rxq_data->cq_db = cq_info.dbrec;
991 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
992 rxq_data->cq_uar = cq_info.cq_uar;
993 rxq_data->cqn = cq_info.cqn;
994 rxq_data->cq_arm_sn = 0;
995 /* Update doorbell counter. */
996 rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
998 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
999 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1000 idx, (void *)&tmpl);
1001 rte_atomic32_inc(&tmpl->refcnt);
1002 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
1003 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1006 ret = rte_errno; /* Save rte_errno before cleanup. */
1008 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1010 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1012 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
1013 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1014 rte_errno = ret; /* Restore rte_errno. */
1019 * Get an Rx queue Verbs object.
1022 * Pointer to Ethernet device.
1024 * Queue index in DPDK Rx queue array
1027 * The Verbs object if it exists.
1029 struct mlx5_rxq_ibv *
1030 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
1032 struct priv *priv = dev->data->dev_private;
1033 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1034 struct mlx5_rxq_ctrl *rxq_ctrl;
1036 if (idx >= priv->rxqs_n)
1040 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1041 if (rxq_ctrl->ibv) {
1042 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
1044 return rxq_ctrl->ibv;
1048 * Release an Rx verbs queue object.
1051 * Verbs Rx queue object.
1054 * 1 while a reference on it exists, 0 when freed.
1057 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
1060 assert(rxq_ibv->wq);
1061 assert(rxq_ibv->cq);
1062 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
1063 rxq_free_elts(rxq_ibv->rxq_ctrl);
1064 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
1065 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
1066 if (rxq_ibv->channel)
1067 claim_zero(mlx5_glue->destroy_comp_channel
1068 (rxq_ibv->channel));
1069 LIST_REMOVE(rxq_ibv, next);
1077 * Verify the Verbs Rx queue list is empty
1080 * Pointer to Ethernet device.
1083 * The number of object not released.
1086 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
1088 struct priv *priv = dev->data->dev_private;
1090 struct mlx5_rxq_ibv *rxq_ibv;
1092 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
1093 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
1094 dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
1101 * Return true if a single reference exists on the object.
1104 * Verbs Rx queue object.
1107 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
1110 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
1114 * Callback function to initialize mbufs for Multi-Packet RQ.
1117 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
1118 void *_m, unsigned int i __rte_unused)
1120 struct mlx5_mprq_buf *buf = _m;
1122 memset(_m, 0, sizeof(*buf));
1124 rte_atomic16_set(&buf->refcnt, 1);
1128 * Free mempool of Multi-Packet RQ.
1131 * Pointer to Ethernet device.
1134 * 0 on success, negative errno value on failure.
1137 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1139 struct priv *priv = dev->data->dev_private;
1140 struct rte_mempool *mp = priv->mprq_mp;
1145 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1146 dev->data->port_id, mp->name);
1148 * If a buffer in the pool has been externally attached to a mbuf and it
1149 * is still in use by application, destroying the Rx qeueue can spoil
1150 * the packet. It is unlikely to happen but if application dynamically
1151 * creates and destroys with holding Rx packets, this can happen.
1153 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1154 * RQ isn't provided by application but managed by PMD.
1156 if (!rte_mempool_full(mp)) {
1158 "port %u mempool for Multi-Packet RQ is still in use",
1159 dev->data->port_id);
1163 rte_mempool_free(mp);
1164 /* Unset mempool for each Rx queue. */
1165 for (i = 0; i != priv->rxqs_n; ++i) {
1166 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1170 rxq->mprq_mp = NULL;
1176 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1177 * mempool. If already allocated, reuse it if there're enough elements.
1178 * Otherwise, resize it.
1181 * Pointer to Ethernet device.
1184 * 0 on success, negative errno value on failure.
1187 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1189 struct priv *priv = dev->data->dev_private;
1190 struct rte_mempool *mp = priv->mprq_mp;
1191 char name[RTE_MEMPOOL_NAMESIZE];
1192 unsigned int desc = 0;
1193 unsigned int buf_len;
1194 unsigned int obj_num;
1195 unsigned int obj_size;
1196 unsigned int strd_num_n = 0;
1197 unsigned int strd_sz_n = 0;
1200 if (!mlx5_mprq_enabled(dev))
1202 /* Count the total number of descriptors configured. */
1203 for (i = 0; i != priv->rxqs_n; ++i) {
1204 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1208 desc += 1 << rxq->elts_n;
1209 /* Get the max number of strides. */
1210 if (strd_num_n < rxq->strd_num_n)
1211 strd_num_n = rxq->strd_num_n;
1212 /* Get the max size of a stride. */
1213 if (strd_sz_n < rxq->strd_sz_n)
1214 strd_sz_n = rxq->strd_sz_n;
1216 assert(strd_num_n && strd_sz_n);
1217 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1218 obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
1220 * Received packets can be either memcpy'd or externally referenced. In
1221 * case that the packet is attached to an mbuf as an external buffer, as
1222 * it isn't possible to predict how the buffers will be queued by
1223 * application, there's no option to exactly pre-allocate needed buffers
1224 * in advance but to speculatively prepares enough buffers.
1226 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1227 * received packets to buffers provided by application (rxq->mp) until
1228 * this Mempool gets available again.
1231 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1232 /* Check a mempool is already allocated and if it can be resued. */
1233 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1234 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1235 dev->data->port_id, mp->name);
1238 } else if (mp != NULL) {
1239 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1240 dev->data->port_id, mp->name);
1242 * If failed to free, which means it may be still in use, no way
1243 * but to keep using the existing one. On buffer underrun,
1244 * packets will be memcpy'd instead of external buffer
1247 if (mlx5_mprq_free_mp(dev)) {
1248 if (mp->elt_size >= obj_size)
1254 snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
1255 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1256 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
1257 dev->device->numa_node, 0);
1260 "port %u failed to allocate a mempool for"
1261 " Multi-Packet RQ, count=%u, size=%u",
1262 dev->data->port_id, obj_num, obj_size);
1268 /* Set mempool for each Rx queue. */
1269 for (i = 0; i != priv->rxqs_n; ++i) {
1270 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1276 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1277 dev->data->port_id);
1282 * Create a DPDK Rx queue.
1285 * Pointer to Ethernet device.
1289 * Number of descriptors to configure in queue.
1291 * NUMA socket on which memory must be allocated.
1294 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1296 struct mlx5_rxq_ctrl *
1297 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1298 unsigned int socket, const struct rte_eth_rxconf *conf,
1299 struct rte_mempool *mp)
1301 struct priv *priv = dev->data->dev_private;
1302 struct mlx5_rxq_ctrl *tmpl;
1303 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1304 unsigned int mprq_stride_size;
1305 struct mlx5_dev_config *config = &priv->config;
1307 * Always allocate extra slots, even if eventually
1308 * the vector Rx will not be used.
1311 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1312 uint64_t offloads = conf->offloads |
1313 dev->data->dev_conf.rxmode.offloads;
1314 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1316 tmpl = rte_calloc_socket("RXQ", 1,
1318 desc_n * sizeof(struct rte_mbuf *),
1324 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1325 MLX5_MR_BTREE_CACHE_N, socket)) {
1326 /* rte_errno is already set. */
1329 tmpl->socket = socket;
1330 if (dev->data->dev_conf.intr_conf.rxq)
1333 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1334 * following conditions are met:
1335 * - MPRQ is enabled.
1336 * - The number of descs is more than the number of strides.
1337 * - max_rx_pkt_len plus overhead is less than the max size of a
1339 * Otherwise, enable Rx scatter if necessary.
1341 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1343 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1344 sizeof(struct rte_mbuf_ext_shared_info) +
1345 RTE_PKTMBUF_HEADROOM;
1347 desc >= (1U << config->mprq.stride_num_n) &&
1348 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1349 /* TODO: Rx scatter isn't supported yet. */
1350 tmpl->rxq.sges_n = 0;
1351 /* Trim the number of descs needed. */
1352 desc >>= config->mprq.stride_num_n;
1353 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1354 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1355 config->mprq.min_stride_size_n);
1356 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1357 tmpl->rxq.mprq_max_memcpy_len =
1358 RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
1359 config->mprq.max_memcpy_len);
1361 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1362 " strd_num_n = %u, strd_sz_n = %u",
1363 dev->data->port_id, idx,
1364 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1365 } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
1366 (mb_len - RTE_PKTMBUF_HEADROOM)) {
1367 tmpl->rxq.sges_n = 0;
1368 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1370 RTE_PKTMBUF_HEADROOM +
1371 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1372 unsigned int sges_n;
1375 * Determine the number of SGEs needed for a full packet
1376 * and round it to the next power of two.
1378 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1379 tmpl->rxq.sges_n = sges_n;
1380 /* Make sure rxq.sges_n did not overflow. */
1381 size = mb_len * (1 << tmpl->rxq.sges_n);
1382 size -= RTE_PKTMBUF_HEADROOM;
1383 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1385 "port %u too many SGEs (%u) needed to handle"
1386 " requested maximum packet size %u",
1389 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1390 rte_errno = EOVERFLOW;
1395 "port %u the requested maximum Rx packet size (%u) is"
1396 " larger than a single mbuf (%u) and scattered mode has"
1397 " not been requested",
1399 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1400 mb_len - RTE_PKTMBUF_HEADROOM);
1402 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1403 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1404 if (desc % (1 << tmpl->rxq.sges_n)) {
1406 "port %u number of Rx queue descriptors (%u) is not a"
1407 " multiple of SGEs per packet (%u)",
1410 1 << tmpl->rxq.sges_n);
1414 /* Toggle RX checksum offload if hardware supports it. */
1415 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1416 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1417 /* Configure VLAN stripping. */
1418 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1419 /* By default, FCS (CRC) is stripped by hardware. */
1420 if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1421 tmpl->rxq.crc_present = 0;
1422 } else if (config->hw_fcs_strip) {
1423 tmpl->rxq.crc_present = 1;
1426 "port %u CRC stripping has been disabled but will"
1427 " still be performed by hardware, make sure MLNX_OFED"
1428 " and firmware are up to date",
1429 dev->data->port_id);
1430 tmpl->rxq.crc_present = 0;
1433 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1434 " incoming frames to hide it",
1436 tmpl->rxq.crc_present ? "disabled" : "enabled",
1437 tmpl->rxq.crc_present << 2);
1439 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1440 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1441 tmpl->rxq.port_id = dev->data->port_id;
1444 tmpl->rxq.stats.idx = idx;
1445 tmpl->rxq.elts_n = log2above(desc);
1447 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1449 rte_atomic32_inc(&tmpl->refcnt);
1450 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1461 * Pointer to Ethernet device.
1466 * A pointer to the queue if it exists, NULL otherwise.
1468 struct mlx5_rxq_ctrl *
1469 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1471 struct priv *priv = dev->data->dev_private;
1472 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1474 if ((*priv->rxqs)[idx]) {
1475 rxq_ctrl = container_of((*priv->rxqs)[idx],
1476 struct mlx5_rxq_ctrl,
1478 mlx5_rxq_ibv_get(dev, idx);
1479 rte_atomic32_inc(&rxq_ctrl->refcnt);
1485 * Release a Rx queue.
1488 * Pointer to Ethernet device.
1493 * 1 while a reference on it exists, 0 when freed.
1496 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1498 struct priv *priv = dev->data->dev_private;
1499 struct mlx5_rxq_ctrl *rxq_ctrl;
1501 if (!(*priv->rxqs)[idx])
1503 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1504 assert(rxq_ctrl->priv);
1505 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1506 rxq_ctrl->ibv = NULL;
1507 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1508 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1509 LIST_REMOVE(rxq_ctrl, next);
1511 (*priv->rxqs)[idx] = NULL;
1518 * Verify if the queue can be released.
1521 * Pointer to Ethernet device.
1526 * 1 if the queue can be released, negative errno otherwise and rte_errno is
1530 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1532 struct priv *priv = dev->data->dev_private;
1533 struct mlx5_rxq_ctrl *rxq_ctrl;
1535 if (!(*priv->rxqs)[idx]) {
1539 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1540 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1544 * Verify the Rx Queue list is empty
1547 * Pointer to Ethernet device.
1550 * The number of object not released.
1553 mlx5_rxq_verify(struct rte_eth_dev *dev)
1555 struct priv *priv = dev->data->dev_private;
1556 struct mlx5_rxq_ctrl *rxq_ctrl;
1559 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1560 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1561 dev->data->port_id, rxq_ctrl->idx);
1568 * Create an indirection table.
1571 * Pointer to Ethernet device.
1573 * Queues entering in the indirection table.
1575 * Number of queues in the array.
1578 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1580 struct mlx5_ind_table_ibv *
1581 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
1584 struct priv *priv = dev->data->dev_private;
1585 struct mlx5_ind_table_ibv *ind_tbl;
1586 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1587 log2above(queues_n) :
1588 log2above(priv->config.ind_table_max_size);
1589 struct ibv_wq *wq[1 << wq_n];
1593 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1594 queues_n * sizeof(uint16_t), 0);
1599 for (i = 0; i != queues_n; ++i) {
1600 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1604 wq[i] = rxq->ibv->wq;
1605 ind_tbl->queues[i] = queues[i];
1607 ind_tbl->queues_n = queues_n;
1608 /* Finalise indirection table. */
1609 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1611 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1613 &(struct ibv_rwq_ind_table_init_attr){
1614 .log_ind_tbl_size = wq_n,
1618 if (!ind_tbl->ind_table) {
1622 rte_atomic32_inc(&ind_tbl->refcnt);
1623 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1627 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1632 * Get an indirection table.
1635 * Pointer to Ethernet device.
1637 * Queues entering in the indirection table.
1639 * Number of queues in the array.
1642 * An indirection table if found.
1644 struct mlx5_ind_table_ibv *
1645 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
1648 struct priv *priv = dev->data->dev_private;
1649 struct mlx5_ind_table_ibv *ind_tbl;
1651 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1652 if ((ind_tbl->queues_n == queues_n) &&
1653 (memcmp(ind_tbl->queues, queues,
1654 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1661 rte_atomic32_inc(&ind_tbl->refcnt);
1662 for (i = 0; i != ind_tbl->queues_n; ++i)
1663 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1669 * Release an indirection table.
1672 * Pointer to Ethernet device.
1674 * Indirection table to release.
1677 * 1 while a reference on it exists, 0 when freed.
1680 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1681 struct mlx5_ind_table_ibv *ind_tbl)
1685 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1686 claim_zero(mlx5_glue->destroy_rwq_ind_table
1687 (ind_tbl->ind_table));
1688 for (i = 0; i != ind_tbl->queues_n; ++i)
1689 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1690 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1691 LIST_REMOVE(ind_tbl, next);
1699 * Verify the Rx Queue list is empty
1702 * Pointer to Ethernet device.
1705 * The number of object not released.
1708 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1710 struct priv *priv = dev->data->dev_private;
1711 struct mlx5_ind_table_ibv *ind_tbl;
1714 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1716 "port %u Verbs indirection table %p still referenced",
1717 dev->data->port_id, (void *)ind_tbl);
1724 * Create an Rx Hash queue.
1727 * Pointer to Ethernet device.
1729 * RSS key for the Rx hash queue.
1730 * @param rss_key_len
1732 * @param hash_fields
1733 * Verbs protocol hash field to make the RSS on.
1735 * Queues entering in hash queue. In case of empty hash_fields only the
1736 * first queue index will be taken for the indirection table.
1740 * Tunnel type, implies tunnel offloading like inner checksum if available.
1742 * RSS hash on tunnel level.
1745 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1748 mlx5_hrxq_new(struct rte_eth_dev *dev,
1749 const uint8_t *rss_key, uint32_t rss_key_len,
1750 uint64_t hash_fields,
1751 const uint16_t *queues, uint32_t queues_n,
1752 uint32_t tunnel, uint32_t rss_level)
1754 struct priv *priv = dev->data->dev_private;
1755 struct mlx5_hrxq *hrxq;
1756 struct mlx5_ind_table_ibv *ind_tbl;
1759 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1760 struct mlx5dv_qp_init_attr qp_init_attr = {0};
1763 queues_n = hash_fields ? queues_n : 1;
1764 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1766 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1772 rss_key_len = rss_hash_default_key_len;
1773 rss_key = rss_hash_default_key;
1775 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1777 qp_init_attr.comp_mask =
1778 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1779 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
1781 qp = mlx5_glue->dv_create_qp
1783 &(struct ibv_qp_init_attr_ex){
1784 .qp_type = IBV_QPT_RAW_PACKET,
1786 IBV_QP_INIT_ATTR_PD |
1787 IBV_QP_INIT_ATTR_IND_TABLE |
1788 IBV_QP_INIT_ATTR_RX_HASH,
1789 .rx_hash_conf = (struct ibv_rx_hash_conf){
1790 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1791 .rx_hash_key_len = rss_key_len ? rss_key_len :
1792 rss_hash_default_key_len,
1793 .rx_hash_key = rss_key ?
1794 (void *)(uintptr_t)rss_key :
1795 rss_hash_default_key,
1796 .rx_hash_fields_mask = hash_fields |
1797 (tunnel && rss_level > 1 ?
1798 (uint32_t)IBV_RX_HASH_INNER : 0),
1800 .rwq_ind_tbl = ind_tbl->ind_table,
1805 qp = mlx5_glue->create_qp_ex
1807 &(struct ibv_qp_init_attr_ex){
1808 .qp_type = IBV_QPT_RAW_PACKET,
1810 IBV_QP_INIT_ATTR_PD |
1811 IBV_QP_INIT_ATTR_IND_TABLE |
1812 IBV_QP_INIT_ATTR_RX_HASH,
1813 .rx_hash_conf = (struct ibv_rx_hash_conf){
1814 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1815 .rx_hash_key_len = rss_key_len ? rss_key_len :
1816 rss_hash_default_key_len,
1817 .rx_hash_key = rss_key ?
1818 (void *)(uintptr_t)rss_key :
1819 rss_hash_default_key,
1820 .rx_hash_fields_mask = hash_fields,
1822 .rwq_ind_tbl = ind_tbl->ind_table,
1830 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1833 hrxq->ind_table = ind_tbl;
1835 hrxq->rss_key_len = rss_key_len;
1836 hrxq->hash_fields = hash_fields;
1837 hrxq->tunnel = tunnel;
1838 hrxq->rss_level = rss_level;
1839 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1840 rte_atomic32_inc(&hrxq->refcnt);
1841 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1844 err = rte_errno; /* Save rte_errno before cleanup. */
1845 mlx5_ind_table_ibv_release(dev, ind_tbl);
1847 claim_zero(mlx5_glue->destroy_qp(qp));
1848 rte_errno = err; /* Restore rte_errno. */
1853 * Get an Rx Hash queue.
1856 * Pointer to Ethernet device.
1858 * RSS configuration for the Rx hash queue.
1860 * Queues entering in hash queue. In case of empty hash_fields only the
1861 * first queue index will be taken for the indirection table.
1865 * Tunnel type, implies tunnel offloading like inner checksum if available.
1867 * RSS hash on tunnel level
1870 * An hash Rx queue on success.
1873 mlx5_hrxq_get(struct rte_eth_dev *dev,
1874 const uint8_t *rss_key, uint32_t rss_key_len,
1875 uint64_t hash_fields,
1876 const uint16_t *queues, uint32_t queues_n,
1877 uint32_t tunnel, uint32_t rss_level)
1879 struct priv *priv = dev->data->dev_private;
1880 struct mlx5_hrxq *hrxq;
1882 queues_n = hash_fields ? queues_n : 1;
1883 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1884 struct mlx5_ind_table_ibv *ind_tbl;
1886 if (hrxq->rss_key_len != rss_key_len)
1888 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1890 if (hrxq->hash_fields != hash_fields)
1892 if (hrxq->tunnel != tunnel)
1894 if (hrxq->rss_level != rss_level)
1896 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1899 if (ind_tbl != hrxq->ind_table) {
1900 mlx5_ind_table_ibv_release(dev, ind_tbl);
1903 rte_atomic32_inc(&hrxq->refcnt);
1910 * Release the hash Rx queue.
1913 * Pointer to Ethernet device.
1915 * Pointer to Hash Rx queue to release.
1918 * 1 while a reference on it exists, 0 when freed.
1921 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1923 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1924 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1925 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1926 LIST_REMOVE(hrxq, next);
1930 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1935 * Verify the Rx Queue list is empty
1938 * Pointer to Ethernet device.
1941 * The number of object not released.
1944 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1946 struct priv *priv = dev->data->dev_private;
1947 struct mlx5_hrxq *hrxq;
1950 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1952 "port %u Verbs hash Rx queue %p still referenced",
1953 dev->data->port_id, (void *)hrxq);