1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
13 #include "mlx5_autoconf.h"
16 #include <rte_malloc.h>
17 #include <ethdev_driver.h>
18 #include <rte_common.h>
20 #include <mlx5_glue.h>
21 #include <mlx5_common.h>
22 #include <mlx5_common_mr.h>
23 #include <mlx5_verbs.h>
26 #include <mlx5_utils.h>
27 #include <mlx5_malloc.h>
30 * Modify Rx WQ vlan stripping offload
35 * @return 0 on success, non-0 otherwise
38 mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
40 uint16_t vlan_offloads =
41 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
43 struct ibv_wq_attr mod;
44 mod = (struct ibv_wq_attr){
45 .attr_mask = IBV_WQ_ATTR_FLAGS,
46 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
47 .flags = vlan_offloads,
50 return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
54 * Modifies the attributes for the specified WQ.
59 * Type of change queue state.
62 * 0 on success, a negative errno value otherwise and rte_errno is set.
65 mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)
67 struct ibv_wq_attr mod = {
68 .attr_mask = IBV_WQ_ATTR_STATE,
69 .wq_state = (enum ibv_wq_state)type,
72 return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
76 * Modify QP using Verbs API.
79 * Verbs Tx queue object.
81 * Type of change queue state.
83 * IB device port number.
86 * 0 on success, a negative errno value otherwise and rte_errno is set.
89 mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
92 struct ibv_qp_attr mod = {
93 .qp_state = IBV_QPS_RESET,
96 int attr_mask = (IBV_QP_STATE | IBV_QP_PORT);
99 if (type != MLX5_TXQ_MOD_RST2RDY) {
100 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
102 DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s",
107 if (type == MLX5_TXQ_MOD_RDY2RST)
110 if (type == MLX5_TXQ_MOD_ERR2RDY)
111 attr_mask = IBV_QP_STATE;
112 mod.qp_state = IBV_QPS_INIT;
113 ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask);
115 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
120 mod.qp_state = IBV_QPS_RTR;
121 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
123 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
128 mod.qp_state = IBV_QPS_RTS;
129 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
131 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
140 * Create a CQ Verbs object.
143 * Pointer to Rx queue.
146 * The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
148 static struct ibv_cq *
149 mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)
151 struct mlx5_priv *priv = rxq->priv;
152 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
153 struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
154 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
155 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
157 struct ibv_cq_init_attr_ex ibv;
158 struct mlx5dv_cq_init_attr mlx5;
161 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
163 .channel = rxq_obj->ibv_channel,
166 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
169 if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
170 cq_attr.mlx5.comp_mask |=
171 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
172 rxq_data->byte_mask = UINT32_MAX;
173 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
174 if (mlx5_rxq_mprq_enabled(rxq_data)) {
175 cq_attr.mlx5.cqe_comp_res_format =
176 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX;
177 rxq_data->mcqe_format =
178 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
180 cq_attr.mlx5.cqe_comp_res_format =
181 MLX5DV_CQE_RES_FORMAT_HASH;
182 rxq_data->mcqe_format =
183 MLX5_CQE_RESP_FORMAT_HASH;
186 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
187 rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH;
190 * For vectorized Rx, it must not be doubled in order to
191 * make cq_ci and rq_ci aligned.
193 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
194 cq_attr.ibv.cqe *= 2;
195 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
197 "Port %u Rx CQE compression is disabled for HW"
199 priv->dev_data->port_id);
201 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
202 if (RTE_CACHE_LINE_SIZE == 128) {
203 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
204 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
207 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq
208 (priv->sh->cdev->ctx,
214 * Create a WQ Verbs object.
217 * Pointer to Rx queue.
220 * The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
222 static struct ibv_wq *
223 mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
225 struct mlx5_priv *priv = rxq->priv;
226 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
227 struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
228 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
229 unsigned int wqe_n = 1 << rxq_data->elts_n;
231 struct ibv_wq_init_attr ibv;
232 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
233 struct mlx5dv_wq_init_attr mlx5;
237 wq_attr.ibv = (struct ibv_wq_init_attr){
238 .wq_context = NULL, /* Could be useful in the future. */
239 .wq_type = IBV_WQT_RQ,
240 /* Max number of outstanding WRs. */
241 .max_wr = wqe_n >> rxq_data->sges_n,
242 /* Max number of scatter/gather elements in a WR. */
243 .max_sge = 1 << rxq_data->sges_n,
244 .pd = priv->sh->cdev->pd,
245 .cq = rxq_obj->ibv_cq,
246 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
247 .create_flags = (rxq_data->vlan_strip ?
248 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
250 /* By default, FCS (CRC) is stripped by hardware. */
251 if (rxq_data->crc_present) {
252 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
253 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
255 if (priv->config.hw_padding) {
256 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
257 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
258 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
259 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
260 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
261 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
264 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
265 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
268 if (mlx5_rxq_mprq_enabled(rxq_data)) {
269 struct mlx5dv_striding_rq_init_attr *mprq_attr =
270 &wq_attr.mlx5.striding_rq_attrs;
272 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
273 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
274 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
275 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
276 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
279 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->cdev->ctx, &wq_attr.ibv,
282 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->cdev->ctx, &wq_attr.ibv);
286 * Make sure number of WRs*SGEs match expectations since a queue
287 * cannot allocate more than "desc" buffers.
289 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
290 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
292 "Port %u Rx queue %u requested %u*%u but got"
294 priv->dev_data->port_id, rxq->idx,
295 wqe_n >> rxq_data->sges_n,
296 (1 << rxq_data->sges_n),
297 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
298 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
307 * Create the Rx queue Verbs object.
310 * Pointer to Rx queue.
313 * 0 on success, a negative errno value otherwise and rte_errno is set.
316 mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
318 uint16_t idx = rxq->idx;
319 struct mlx5_priv *priv = rxq->priv;
320 uint16_t port_id = priv->dev_data->port_id;
321 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
322 struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
323 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
324 struct mlx5dv_cq cq_info;
325 struct mlx5dv_rwq rwq;
327 struct mlx5dv_obj obj;
329 MLX5_ASSERT(rxq_data);
331 tmpl->rxq_ctrl = rxq_ctrl;
334 mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
335 if (!tmpl->ibv_channel) {
336 DRV_LOG(ERR, "Port %u: comp channel creation failure.",
341 tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
343 /* Create CQ using Verbs API. */
344 tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);
346 DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
351 obj.cq.in = tmpl->ibv_cq;
352 obj.cq.out = &cq_info;
353 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
358 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
360 "Port %u wrong MLX5_CQE_SIZE environment "
361 "variable value: it should be set to %u.",
362 port_id, RTE_CACHE_LINE_SIZE);
366 /* Fill the rings. */
367 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
368 rxq_data->cq_db = cq_info.dbrec;
369 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
370 rxq_data->cq_uar = cq_info.cq_uar;
371 rxq_data->cqn = cq_info.cqn;
372 /* Create WQ (RQ) using Verbs API. */
373 tmpl->wq = mlx5_rxq_ibv_wq_create(rxq);
375 DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
380 /* Change queue state to ready. */
381 ret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);
384 "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
389 obj.rwq.in = tmpl->wq;
391 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
396 rxq_data->wqes = rwq.buf;
397 rxq_data->rq_db = rwq.dbrec;
398 rxq_data->cq_arm_sn = 0;
399 mlx5_rxq_initialize(rxq_data);
401 priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
402 rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
405 ret = rte_errno; /* Save rte_errno before cleanup. */
407 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
409 claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
410 if (tmpl->ibv_channel)
411 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
412 rte_errno = ret; /* Restore rte_errno. */
417 * Release an Rx verbs queue object.
420 * Pointer to Rx queue.
423 mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)
425 struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
427 if (rxq_obj == NULL || rxq_obj->wq == NULL)
429 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
431 MLX5_ASSERT(rxq_obj->ibv_cq);
432 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
433 if (rxq_obj->ibv_channel)
434 claim_zero(mlx5_glue->destroy_comp_channel
435 (rxq_obj->ibv_channel));
436 rxq->ctrl->started = false;
440 * Get event for an Rx verbs queue object.
443 * Verbs Rx queue object.
446 * 0 on success, a negative errno value otherwise and rte_errno is set.
449 mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj)
451 struct ibv_cq *ev_cq;
453 int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel,
456 if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
458 mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
469 * Creates a receive work queue as a filed of indirection table.
472 * Pointer to Ethernet device.
474 * Log of number of queues in the array.
476 * Verbs indirection table object.
479 * 0 on success, a negative errno value otherwise and rte_errno is set.
482 mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
483 struct mlx5_ind_table_obj *ind_tbl)
485 struct mlx5_priv *priv = dev->data->dev_private;
486 struct ibv_wq *wq[1 << log_n];
489 MLX5_ASSERT(ind_tbl);
490 for (i = 0; i != ind_tbl->queues_n; ++i) {
491 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev,
494 wq[i] = rxq->ctrl->obj->wq;
497 /* Finalise indirection table. */
498 for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
500 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
501 (priv->sh->cdev->ctx,
502 &(struct ibv_rwq_ind_table_init_attr){
503 .log_ind_tbl_size = log_n,
507 if (!ind_tbl->ind_table) {
515 * Destroys the specified Indirection Table.
518 * Indirection table to release.
521 mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
523 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
527 * Create an Rx Hash queue.
530 * Pointer to Ethernet device.
532 * Pointer to Rx Hash queue.
537 * 0 on success, a negative errno value otherwise and rte_errno is set.
540 mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
541 int tunnel __rte_unused)
543 struct mlx5_priv *priv = dev->data->dev_private;
544 struct ibv_qp *qp = NULL;
545 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
546 const uint8_t *rss_key = hrxq->rss_key;
547 uint64_t hash_fields = hrxq->hash_fields;
549 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
550 struct mlx5dv_qp_init_attr qp_init_attr;
552 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
554 qp_init_attr.comp_mask =
555 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
556 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
558 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
559 if (dev->data->dev_conf.lpbk_mode) {
560 /* Allow packet sent from NIC loop back w/o source MAC check. */
561 qp_init_attr.comp_mask |=
562 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
563 qp_init_attr.create_flags |=
564 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
567 qp = mlx5_glue->dv_create_qp
568 (priv->sh->cdev->ctx,
569 &(struct ibv_qp_init_attr_ex){
570 .qp_type = IBV_QPT_RAW_PACKET,
572 IBV_QP_INIT_ATTR_PD |
573 IBV_QP_INIT_ATTR_IND_TABLE |
574 IBV_QP_INIT_ATTR_RX_HASH,
575 .rx_hash_conf = (struct ibv_rx_hash_conf){
577 IBV_RX_HASH_FUNC_TOEPLITZ,
578 .rx_hash_key_len = hrxq->rss_key_len,
580 (void *)(uintptr_t)rss_key,
581 .rx_hash_fields_mask = hash_fields,
583 .rwq_ind_tbl = ind_tbl->ind_table,
584 .pd = priv->sh->cdev->pd,
588 qp = mlx5_glue->create_qp_ex
589 (priv->sh->cdev->ctx,
590 &(struct ibv_qp_init_attr_ex){
591 .qp_type = IBV_QPT_RAW_PACKET,
593 IBV_QP_INIT_ATTR_PD |
594 IBV_QP_INIT_ATTR_IND_TABLE |
595 IBV_QP_INIT_ATTR_RX_HASH,
596 .rx_hash_conf = (struct ibv_rx_hash_conf){
598 IBV_RX_HASH_FUNC_TOEPLITZ,
599 .rx_hash_key_len = hrxq->rss_key_len,
601 (void *)(uintptr_t)rss_key,
602 .rx_hash_fields_mask = hash_fields,
604 .rwq_ind_tbl = ind_tbl->ind_table,
605 .pd = priv->sh->cdev->pd,
613 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
614 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
622 err = rte_errno; /* Save rte_errno before cleanup. */
624 claim_zero(mlx5_glue->destroy_qp(qp));
625 rte_errno = err; /* Restore rte_errno. */
630 * Destroy a Verbs queue pair.
633 * Hash Rx queue to release its qp.
636 mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq)
638 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
642 * Release a drop Rx queue Verbs object.
645 * Pointer to Ethernet device.
648 mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
650 struct mlx5_priv *priv = dev->data->dev_private;
651 struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
652 struct mlx5_rxq_obj *rxq_obj;
656 if (rxq->ctrl == NULL)
658 rxq_obj = rxq->ctrl->obj;
662 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
664 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
667 mlx5_free(rxq->ctrl);
670 priv->drop_queue.rxq = NULL;
674 * Create a drop Rx queue Verbs object.
677 * Pointer to Ethernet device.
680 * 0 on success, a negative errno value otherwise and rte_errno is set.
683 mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
685 struct mlx5_priv *priv = dev->data->dev_private;
686 struct ibv_context *ctx = priv->sh->cdev->ctx;
687 struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
688 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
689 struct mlx5_rxq_obj *rxq_obj = NULL;
693 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
695 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
700 priv->drop_queue.rxq = rxq;
701 rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,
703 if (rxq_ctrl == NULL) {
704 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue control memory.",
709 rxq->ctrl = rxq_ctrl;
710 rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,
712 if (rxq_obj == NULL) {
713 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
718 rxq_ctrl->obj = rxq_obj;
719 rxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
720 if (!rxq_obj->ibv_cq) {
721 DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.",
726 rxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
727 .wq_type = IBV_WQT_RQ,
730 .pd = priv->sh->cdev->pd,
731 .cq = rxq_obj->ibv_cq,
734 DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.",
741 mlx5_rxq_ibv_obj_drop_release(dev);
746 * Create a Verbs drop action for Rx Hash queue.
749 * Pointer to Ethernet device.
752 * 0 on success, a negative errno value otherwise and rte_errno is set.
755 mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
757 struct mlx5_priv *priv = dev->data->dev_private;
758 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
759 struct ibv_rwq_ind_table *ind_tbl = NULL;
760 struct mlx5_rxq_obj *rxq;
763 MLX5_ASSERT(hrxq && hrxq->ind_table);
764 ret = mlx5_rxq_ibv_obj_drop_create(dev);
767 rxq = priv->drop_queue.rxq->ctrl->obj;
768 ind_tbl = mlx5_glue->create_rwq_ind_table
769 (priv->sh->cdev->ctx,
770 &(struct ibv_rwq_ind_table_init_attr){
771 .log_ind_tbl_size = 0,
772 .ind_tbl = (struct ibv_wq **)&rxq->wq,
776 DRV_LOG(DEBUG, "Port %u"
777 " cannot allocate indirection table for drop queue.",
782 hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx,
783 &(struct ibv_qp_init_attr_ex){
784 .qp_type = IBV_QPT_RAW_PACKET,
785 .comp_mask = IBV_QP_INIT_ATTR_PD |
786 IBV_QP_INIT_ATTR_IND_TABLE |
787 IBV_QP_INIT_ATTR_RX_HASH,
788 .rx_hash_conf = (struct ibv_rx_hash_conf){
789 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
790 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
791 .rx_hash_key = rss_hash_default_key,
792 .rx_hash_fields_mask = 0,
794 .rwq_ind_tbl = ind_tbl,
795 .pd = priv->sh->cdev->pd
798 DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
803 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
804 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
810 hrxq->ind_table->ind_table = ind_tbl;
814 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
816 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
817 if (priv->drop_queue.rxq)
818 mlx5_rxq_ibv_obj_drop_release(dev);
823 * Release a drop hash Rx queue.
826 * Pointer to Ethernet device.
829 mlx5_ibv_drop_action_destroy(struct rte_eth_dev *dev)
831 struct mlx5_priv *priv = dev->data->dev_private;
832 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
833 struct ibv_rwq_ind_table *ind_tbl = hrxq->ind_table->ind_table;
835 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
836 claim_zero(mlx5_glue->destroy_flow_action(hrxq->action));
838 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
839 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
840 mlx5_rxq_ibv_obj_drop_release(dev);
844 * Create a QP Verbs object.
847 * Pointer to Ethernet device.
849 * Queue index in DPDK Tx queue array.
852 * The QP Verbs object, NULL otherwise and rte_errno is set.
854 static struct ibv_qp *
855 mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
857 struct mlx5_priv *priv = dev->data->dev_private;
858 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
859 struct mlx5_txq_ctrl *txq_ctrl =
860 container_of(txq_data, struct mlx5_txq_ctrl, txq);
861 struct ibv_qp *qp_obj = NULL;
862 struct ibv_qp_init_attr_ex qp_attr = { 0 };
863 const int desc = 1 << txq_data->elts_n;
865 MLX5_ASSERT(txq_ctrl->obj->cq);
866 /* CQ to be associated with the send queue. */
867 qp_attr.send_cq = txq_ctrl->obj->cq;
868 /* CQ to be associated with the receive queue. */
869 qp_attr.recv_cq = txq_ctrl->obj->cq;
870 /* Max number of outstanding WRs. */
871 qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
872 priv->sh->device_attr.max_qp_wr : desc);
874 * Max number of scatter/gather elements in a WR, must be 1 to prevent
875 * libmlx5 from trying to affect must be 1 to prevent libmlx5 from
876 * trying to affect too much memory. TX gather is not impacted by the
877 * device_attr.max_sge limit and will still work properly.
879 qp_attr.cap.max_send_sge = 1;
880 qp_attr.qp_type = IBV_QPT_RAW_PACKET,
881 /* Do *NOT* enable this, completions events are managed per Tx burst. */
882 qp_attr.sq_sig_all = 0;
883 qp_attr.pd = priv->sh->cdev->pd;
884 qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
885 if (txq_data->inlen_send)
886 qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
887 if (txq_data->tso_en) {
888 qp_attr.max_tso_header = txq_ctrl->max_tso_header;
889 qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
891 qp_obj = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &qp_attr);
892 if (qp_obj == NULL) {
893 DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
894 dev->data->port_id, idx);
901 * Create the Tx queue Verbs object.
904 * Pointer to Ethernet device.
906 * Queue index in DPDK Tx queue array.
909 * 0 on success, a negative errno value otherwise and rte_errno is set.
912 mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
914 struct mlx5_priv *priv = dev->data->dev_private;
915 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
916 struct mlx5_txq_ctrl *txq_ctrl =
917 container_of(txq_data, struct mlx5_txq_ctrl, txq);
918 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
921 struct mlx5dv_cq cq_info;
922 struct mlx5dv_obj obj;
923 const int desc = 1 << txq_data->elts_n;
926 MLX5_ASSERT(txq_data);
927 MLX5_ASSERT(txq_obj);
928 txq_obj->txq_ctrl = txq_ctrl;
929 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
930 DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION "
931 "must never be set.", dev->data->port_id);
935 cqe_n = desc / MLX5_TX_COMP_THRESH +
936 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
937 txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n,
939 if (txq_obj->cq == NULL) {
940 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
941 dev->data->port_id, idx);
945 txq_obj->qp = mlx5_txq_ibv_qp_create(dev, idx);
946 if (txq_obj->qp == NULL) {
950 ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY,
951 (uint8_t)priv->dev_port);
953 DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.",
954 dev->data->port_id, idx);
958 qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
959 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
960 /* If using DevX, need additional mask to read tisn value. */
961 if (priv->sh->devx && !priv->sh->tdn)
962 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
964 obj.cq.in = txq_obj->cq;
965 obj.cq.out = &cq_info;
966 obj.qp.in = txq_obj->qp;
968 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
973 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
975 "Port %u wrong MLX5_CQE_SIZE environment variable"
976 " value: it should be set to %u.",
977 dev->data->port_id, RTE_CACHE_LINE_SIZE);
981 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
982 txq_data->cqe_s = 1 << txq_data->cqe_n;
983 txq_data->cqe_m = txq_data->cqe_s - 1;
984 txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8;
985 txq_data->wqes = qp.sq.buf;
986 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
987 txq_data->wqe_s = 1 << txq_data->wqe_n;
988 txq_data->wqe_m = txq_data->wqe_s - 1;
989 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
990 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
991 txq_data->cq_db = cq_info.dbrec;
992 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
995 txq_data->wqe_ci = 0;
996 txq_data->wqe_pi = 0;
997 txq_data->wqe_comp = 0;
998 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
999 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1001 * If using DevX need to query and store TIS transport domain value.
1002 * This is done once per port.
1003 * Will use this value on Rx, when creating matching TIR.
1005 if (priv->sh->devx && !priv->sh->tdn) {
1006 ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
1009 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
1010 "transport domain.", dev->data->port_id, idx);
1014 DRV_LOG(DEBUG, "Port %u Tx queue %u TIS number %d "
1015 "transport domain %d.", dev->data->port_id,
1016 idx, qp.tisn, priv->sh->tdn);
1020 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
1021 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
1022 DRV_LOG(DEBUG, "Port %u: uar_mmap_offset 0x%" PRIx64 ".",
1023 dev->data->port_id, txq_ctrl->uar_mmap_offset);
1026 "Port %u failed to retrieve UAR info, invalid libmlx5.so",
1027 dev->data->port_id);
1031 txq_uar_init(txq_ctrl, qp.bf.reg);
1032 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1035 ret = rte_errno; /* Save rte_errno before cleanup. */
1037 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1039 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1040 rte_errno = ret; /* Restore rte_errno. */
1045 * Create the dummy QP with minimal resources for loopback.
1048 * Pointer to Ethernet device.
1051 * 0 on success, a negative errno value otherwise and rte_errno is set.
1054 mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
1056 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
1057 struct mlx5_priv *priv = dev->data->dev_private;
1058 struct mlx5_dev_ctx_shared *sh = priv->sh;
1059 struct ibv_context *ctx = sh->cdev->ctx;
1060 struct mlx5dv_qp_init_attr qp_init_attr = {0};
1062 struct ibv_cq_init_attr_ex ibv;
1063 struct mlx5dv_cq_init_attr mlx5;
1066 if (dev->data->dev_conf.lpbk_mode) {
1067 /* Allow packet sent from NIC loop back w/o source MAC check. */
1068 qp_init_attr.comp_mask |=
1069 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1070 qp_init_attr.create_flags |=
1071 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
1075 /* Only need to check refcnt, 0 after "sh" is allocated. */
1076 if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
1077 MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
1081 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
1086 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
1089 /* Only CQ is needed, no WQ(RQ) is required in this case. */
1090 sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
1093 if (!sh->self_lb.ibv_cq) {
1094 DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
1095 dev->data->port_id);
1099 sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
1100 &(struct ibv_qp_init_attr_ex){
1101 .qp_type = IBV_QPT_RAW_PACKET,
1102 .comp_mask = IBV_QP_INIT_ATTR_PD,
1104 .send_cq = sh->self_lb.ibv_cq,
1105 .recv_cq = sh->self_lb.ibv_cq,
1106 .cap.max_recv_wr = 1,
1109 if (!sh->self_lb.qp) {
1110 DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
1111 dev->data->port_id);
1118 if (sh->self_lb.ibv_cq) {
1119 claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
1120 sh->self_lb.ibv_cq = NULL;
1122 (void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
1131 * Release the dummy queue resources for loopback.
1134 * Pointer to Ethernet device.
1137 mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
1139 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
1140 struct mlx5_priv *priv = dev->data->dev_private;
1141 struct mlx5_dev_ctx_shared *sh = priv->sh;
1145 MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
1146 if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
1147 if (sh->self_lb.qp) {
1148 claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
1149 sh->self_lb.qp = NULL;
1151 if (sh->self_lb.ibv_cq) {
1152 claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
1153 sh->self_lb.ibv_cq = NULL;
1164 * Release an Tx verbs queue object.
1167 * Verbs Tx queue object..
1170 mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj)
1172 MLX5_ASSERT(txq_obj);
1173 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1174 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1177 struct mlx5_obj_ops ibv_obj_ops = {
1178 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
1179 .rxq_obj_new = mlx5_rxq_ibv_obj_new,
1180 .rxq_event_get = mlx5_rx_ibv_get_event,
1181 .rxq_obj_modify = mlx5_ibv_modify_wq,
1182 .rxq_obj_release = mlx5_rxq_ibv_obj_release,
1183 .ind_table_new = mlx5_ibv_ind_table_new,
1184 .ind_table_destroy = mlx5_ibv_ind_table_destroy,
1185 .hrxq_new = mlx5_ibv_hrxq_new,
1186 .hrxq_destroy = mlx5_ibv_qp_destroy,
1187 .drop_action_create = mlx5_ibv_drop_action_create,
1188 .drop_action_destroy = mlx5_ibv_drop_action_destroy,
1189 .txq_obj_new = mlx5_txq_ibv_obj_new,
1190 .txq_obj_modify = mlx5_ibv_modify_qp,
1191 .txq_obj_release = mlx5_txq_ibv_obj_release,
1192 .lb_dummy_queue_create = NULL,
1193 .lb_dummy_queue_release = NULL,