1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_common_devx.h>
19 #include <mlx5_malloc.h>
22 #include "mlx5_common_os.h"
25 #include "mlx5_utils.h"
26 #include "mlx5_devx.h"
27 #include "mlx5_flow.h"
28 #include "mlx5_flow_os.h"
31 * Modify RQ vlan stripping offload
36 * Enable/disable VLAN stripping.
39 * 0 on success, non-0 otherwise
42 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
44 struct mlx5_devx_modify_rq_attr rq_attr;
46 memset(&rq_attr, 0, sizeof(rq_attr));
47 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
48 rq_attr.state = MLX5_RQC_STATE_RDY;
49 rq_attr.vsd = (on ? 0 : 1);
50 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
51 return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
55 * Modify RQ using DevX API.
60 * Type of change queue state.
63 * 0 on success, a negative errno value otherwise and rte_errno is set.
66 mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)
68 struct mlx5_devx_modify_rq_attr rq_attr;
70 memset(&rq_attr, 0, sizeof(rq_attr));
72 case MLX5_RXQ_MOD_ERR2RST:
73 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
74 rq_attr.state = MLX5_RQC_STATE_RST;
76 case MLX5_RXQ_MOD_RST2RDY:
77 rq_attr.rq_state = MLX5_RQC_STATE_RST;
78 rq_attr.state = MLX5_RQC_STATE_RDY;
80 case MLX5_RXQ_MOD_RDY2ERR:
81 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
82 rq_attr.state = MLX5_RQC_STATE_ERR;
84 case MLX5_RXQ_MOD_RDY2RST:
85 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
86 rq_attr.state = MLX5_RQC_STATE_RST;
91 if (rxq->ctrl->is_hairpin)
92 return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr);
93 return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
97 * Modify SQ using DevX API.
100 * DevX Tx queue object.
102 * Type of change queue state.
107 * 0 on success, a negative errno value otherwise and rte_errno is set.
110 mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
113 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
116 if (type != MLX5_TXQ_MOD_RST2RDY) {
117 /* Change queue state to reset. */
118 if (type == MLX5_TXQ_MOD_ERR2RDY)
119 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
121 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
122 msq_attr.state = MLX5_SQC_STATE_RST;
123 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
125 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
126 " %s", strerror(errno));
131 if (type != MLX5_TXQ_MOD_RDY2RST) {
132 /* Change queue state to ready. */
133 msq_attr.sq_state = MLX5_SQC_STATE_RST;
134 msq_attr.state = MLX5_SQC_STATE_RDY;
135 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
137 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
138 " %s", strerror(errno));
144 * The dev_port variable is relevant only in Verbs API, and there is a
145 * pointer that points to this function and a parallel function in verbs
146 * intermittently, so they should have the same parameters.
153 * Release an Rx DevX queue object.
159 mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
161 struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
165 if (rxq_obj->rxq_ctrl->is_hairpin) {
166 if (rxq_obj->rq == NULL)
168 mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);
169 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
171 if (rxq->devx_rq.rq == NULL)
173 mlx5_devx_rq_destroy(&rxq->devx_rq);
174 if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0)
176 mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
177 memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
178 if (rxq_obj->devx_channel) {
179 mlx5_os_devx_destroy_event_channel
180 (rxq_obj->devx_channel);
181 rxq_obj->devx_channel = NULL;
184 rxq->ctrl->started = false;
188 * Get event for an Rx DevX queue object.
191 * DevX Rx queue object.
194 * 0 on success, a negative errno value otherwise and rte_errno is set.
197 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
199 #ifdef HAVE_IBV_DEVX_EVENT
201 struct mlx5dv_devx_async_event_hdr event_resp;
202 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
204 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
212 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) {
221 #endif /* HAVE_IBV_DEVX_EVENT */
225 * Create a RQ object using DevX.
228 * Pointer to Rx queue.
231 * 0 on success, a negative errno value otherwise and rte_errno is set.
234 mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
236 struct mlx5_priv *priv = rxq->priv;
237 struct mlx5_common_device *cdev = priv->sh->cdev;
238 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
239 struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
240 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
241 uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;
242 uint32_t wqe_size, log_wqe_size;
244 /* Fill RQ attributes. */
245 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
246 rq_attr.flush_in_error_en = 1;
247 rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
248 rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
249 rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
251 mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format);
252 /* Fill WQ attributes for this RQ. */
253 if (mlx5_rxq_mprq_enabled(rxq_data)) {
254 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
256 * Number of strides in each WQE:
257 * 512*2^single_wqe_log_num_of_strides.
259 rq_attr.wq_attr.single_wqe_log_num_of_strides =
260 rxq_data->log_strd_num -
261 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
262 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
263 rq_attr.wq_attr.single_stride_log_num_of_bytes =
264 rxq_data->log_strd_sz -
265 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
266 wqe_size = sizeof(struct mlx5_wqe_mprq);
268 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
269 wqe_size = sizeof(struct mlx5_wqe_data_seg);
271 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
272 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
273 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
274 rq_attr.wq_attr.log_wq_sz = log_desc_n;
275 rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
276 MLX5_WQ_END_PAD_MODE_ALIGN :
277 MLX5_WQ_END_PAD_MODE_NONE;
278 rq_attr.wq_attr.pd = cdev->pdn;
279 rq_attr.counter_set_id = priv->counter_set_id;
280 rq_attr.delay_drop_en = rxq_data->delay_drop;
281 rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id);
282 if (rxq_data->shared) /* Create RMP based RQ. */
283 rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;
284 /* Create RQ using DevX API. */
285 return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size,
286 log_desc_n, &rq_attr, rxq_ctrl->socket);
290 * Create a DevX CQ object for an Rx queue.
293 * Pointer to Rx queue.
296 * 0 on success, a negative errno value otherwise and rte_errno is set.
299 mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
301 struct mlx5_devx_cq *cq_obj = 0;
302 struct mlx5_devx_cq_attr cq_attr = { 0 };
303 struct mlx5_priv *priv = rxq->priv;
304 struct mlx5_dev_ctx_shared *sh = priv->sh;
305 uint16_t port_id = priv->dev_data->port_id;
306 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
307 struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
308 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
310 uint16_t event_nums[1] = { 0 };
313 if (rxq_ctrl->started)
315 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
317 cq_attr.cqe_comp_en = 1u;
318 rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
319 rxq_data->byte_mask = UINT32_MAX;
320 switch (priv->config.cqe_comp_fmt) {
321 case MLX5_CQE_RESP_FORMAT_HASH:
323 case MLX5_CQE_RESP_FORMAT_CSUM:
325 * Select CSUM miniCQE format only for non-vectorized
326 * MPRQ Rx burst, use HASH miniCQE format for others.
328 if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
329 mlx5_rxq_mprq_enabled(rxq_data))
330 cq_attr.mini_cqe_res_format =
331 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
333 cq_attr.mini_cqe_res_format =
334 MLX5_CQE_RESP_FORMAT_HASH;
335 rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
337 case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
338 rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
340 case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
341 cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
343 case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
344 cq_attr.mini_cqe_res_format = 0;
345 cq_attr.mini_cqe_res_format_ext = 1;
349 "Port %u Rx CQE compression is enabled, format %d.",
350 port_id, priv->config.cqe_comp_fmt);
352 * For vectorized Rx, it must not be doubled in order to
353 * make cq_ci and rq_ci aligned.
355 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
357 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
359 "Port %u Rx CQE compression is disabled for HW timestamp.",
361 } else if (priv->config.cqe_comp && rxq_data->lro) {
363 "Port %u Rx CQE compression is disabled for LRO.",
366 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj);
367 log_cqe_n = log2above(cqe_n);
368 /* Create CQ using DevX API. */
369 ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj,
370 log_cqe_n, &cq_attr, sh->numa_node);
373 cq_obj = &rxq_ctrl->obj->cq_obj;
374 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])
375 (uintptr_t)cq_obj->cqes;
376 rxq_data->cq_db = cq_obj->db_rec;
377 rxq_data->uar_data = sh->rx_uar.cq_db;
378 rxq_data->cqe_n = log_cqe_n;
379 rxq_data->cqn = cq_obj->cq->id;
381 if (rxq_ctrl->obj->devx_channel) {
382 ret = mlx5_os_devx_subscribe_devx_event
383 (rxq_ctrl->obj->devx_channel,
387 (uint64_t)(uintptr_t)cq_obj->cq);
389 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
391 mlx5_devx_cq_destroy(cq_obj);
392 memset(cq_obj, 0, sizeof(*cq_obj));
401 * Create the Rx hairpin queue object.
404 * Pointer to Rx queue.
407 * 0 on success, a negative errno value otherwise and rte_errno is set.
410 mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
412 uint16_t idx = rxq->idx;
413 struct mlx5_priv *priv = rxq->priv;
414 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
415 struct mlx5_devx_create_rq_attr attr = { 0 };
416 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
417 uint32_t max_wq_data;
419 MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);
420 tmpl->rxq_ctrl = rxq_ctrl;
423 priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
424 /* Jumbo frames > 9KB should be supported, and more packets. */
425 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
426 if (priv->config.log_hp_size > max_wq_data) {
427 DRV_LOG(ERR, "Total data size %u power of 2 is "
428 "too large for hairpin.",
429 priv->config.log_hp_size);
433 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
435 attr.wq_attr.log_hairpin_data_sz =
436 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
437 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
439 /* Set the packets number to the maximum value for performance. */
440 attr.wq_attr.log_hairpin_num_packets =
441 attr.wq_attr.log_hairpin_data_sz -
442 MLX5_HAIRPIN_QUEUE_STRIDE;
443 attr.counter_set_id = priv->counter_set_id;
444 rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop;
445 attr.delay_drop_en = priv->config.hp_delay_drop;
446 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
450 "Port %u Rx hairpin queue %u can't create rq object.",
451 priv->dev_data->port_id, idx);
455 priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
460 * Create the Rx queue DevX object.
463 * Pointer to Rx queue.
466 * 0 on success, a negative errno value otherwise and rte_errno is set.
469 mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
471 struct mlx5_priv *priv = rxq->priv;
472 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
473 struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
474 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
477 MLX5_ASSERT(rxq_data);
479 if (rxq_ctrl->is_hairpin)
480 return mlx5_rxq_obj_hairpin_new(rxq);
481 tmpl->rxq_ctrl = rxq_ctrl;
482 if (rxq_ctrl->irq && !rxq_ctrl->started) {
484 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
486 tmpl->devx_channel = mlx5_os_devx_create_event_channel
487 (priv->sh->cdev->ctx,
489 if (!tmpl->devx_channel) {
491 DRV_LOG(ERR, "Failed to create event channel %d.",
495 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
497 /* Create CQ using DevX API. */
498 ret = mlx5_rxq_create_devx_cq_resources(rxq);
500 DRV_LOG(ERR, "Failed to create CQ.");
503 rxq_data->delay_drop = priv->config.std_delay_drop;
504 /* Create RQ using DevX API. */
505 ret = mlx5_rxq_create_devx_rq_resources(rxq);
507 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
508 priv->dev_data->port_id, rxq->idx);
512 /* Change queue state to ready. */
513 ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
516 if (!rxq_data->shared) {
517 rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
518 rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;
519 } else if (!rxq_ctrl->started) {
520 rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf;
522 (uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec;
524 if (!rxq_ctrl->started) {
525 mlx5_rxq_initialize(rxq_data);
526 rxq_ctrl->wqn = rxq->devx_rq.rq->id;
528 priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;
531 ret = rte_errno; /* Save rte_errno before cleanup. */
532 mlx5_rxq_devx_obj_release(rxq);
533 rte_errno = ret; /* Restore rte_errno. */
538 * Prepare RQT attribute structure for DevX RQT API.
541 * Pointer to Ethernet device.
543 * Log of number of queues in the array.
545 * List of RX queue indices or NULL, in which case
546 * the attribute will be filled by drop queue ID.
548 * Size of @p queues array or 0 if it is NULL.
550 * DevX indirection table object.
553 * The RQT attr object initialized, NULL otherwise and rte_errno is set.
555 static struct mlx5_devx_rqt_attr *
556 mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
557 const unsigned int log_n,
558 const uint16_t *queues,
559 const uint32_t queues_n)
561 struct mlx5_priv *priv = dev->data->dev_private;
562 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
563 const unsigned int rqt_n = 1 << log_n;
566 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
567 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
569 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
574 rqt_attr->rqt_max_size = priv->sh->dev_cap.ind_table_max_size;
575 rqt_attr->rqt_actual_size = rqt_n;
576 if (queues == NULL) {
577 for (i = 0; i < rqt_n; i++)
578 rqt_attr->rq_list[i] =
579 priv->drop_queue.rxq->devx_rq.rq->id;
582 for (i = 0; i != queues_n; ++i) {
583 if (mlx5_is_external_rxq(dev, queues[i])) {
584 struct mlx5_external_rxq *ext_rxq =
585 mlx5_ext_rxq_get(dev, queues[i]);
587 rqt_attr->rq_list[i] = ext_rxq->hw_id;
589 struct mlx5_rxq_priv *rxq =
590 mlx5_rxq_get(dev, queues[i]);
592 MLX5_ASSERT(rxq != NULL);
593 if (rxq->ctrl->is_hairpin)
594 rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
596 rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
600 for (j = 0; i != rqt_n; ++j, ++i)
601 rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
606 * Create RQT using DevX API as a filed of indirection table.
609 * Pointer to Ethernet device.
611 * Log of number of queues in the array.
613 * DevX indirection table object.
616 * 0 on success, a negative errno value otherwise and rte_errno is set.
619 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
620 struct mlx5_ind_table_obj *ind_tbl)
622 struct mlx5_priv *priv = dev->data->dev_private;
623 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
624 const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues :
627 MLX5_ASSERT(ind_tbl);
628 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues,
632 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr);
635 DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
644 * Modify RQT using DevX API as a filed of indirection table.
647 * Pointer to Ethernet device.
649 * Log of number of queues in the array.
651 * DevX indirection table object.
654 * 0 on success, a negative errno value otherwise and rte_errno is set.
657 mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
658 const uint16_t *queues, const uint32_t queues_n,
659 struct mlx5_ind_table_obj *ind_tbl)
662 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
664 MLX5_ASSERT(ind_tbl);
665 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
670 ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
673 DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
679 * Destroy the DevX RQT object.
682 * Indirection table to release.
685 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
687 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
691 * Set TIR attribute struct with relevant input values.
694 * Pointer to Ethernet device.
696 * RSS key for the Rx hash queue.
697 * @param[in] hash_fields
698 * Verbs protocol hash field to make the RSS on.
700 * Indirection table for TIR. If table queues array is NULL,
701 * a TIR for drop queue is assumed.
704 * @param[out] tir_attr
705 * Parameters structure for TIR creation/modification.
708 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
711 mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
712 uint64_t hash_fields,
713 const struct mlx5_ind_table_obj *ind_tbl,
714 int tunnel, struct mlx5_devx_tir_attr *tir_attr)
716 struct mlx5_priv *priv = dev->data->dev_private;
721 /* NULL queues designate drop queue. */
722 if (ind_tbl->queues == NULL) {
723 is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin;
724 } else if (mlx5_is_external_rxq(dev, ind_tbl->queues[0])) {
725 /* External RxQ supports neither Hairpin nor LRO. */
729 is_hairpin = mlx5_rxq_is_hairpin(dev, ind_tbl->queues[0]);
730 /* Enable TIR LRO only if all the queues were configured for. */
731 for (i = 0; i < ind_tbl->queues_n; ++i) {
732 struct mlx5_rxq_data *rxq_i =
733 mlx5_rxq_data_get(dev, ind_tbl->queues[i]);
735 if (rxq_i != NULL && !rxq_i->lro) {
741 memset(tir_attr, 0, sizeof(*tir_attr));
742 tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
743 tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
744 tir_attr->tunneled_offload_en = !!tunnel;
745 /* If needed, translate hash_fields bitmap to PRM format. */
747 struct mlx5_rx_hash_field_select *rx_hash_field_select =
748 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
749 hash_fields & IBV_RX_HASH_INNER ?
750 &tir_attr->rx_hash_field_selector_inner :
752 &tir_attr->rx_hash_field_selector_outer;
753 /* 1 bit: 0: IPv4, 1: IPv6. */
754 rx_hash_field_select->l3_prot_type =
755 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
756 /* 1 bit: 0: TCP, 1: UDP. */
757 rx_hash_field_select->l4_prot_type =
758 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
759 /* Bitmask which sets which fields to use in RX Hash. */
760 rx_hash_field_select->selected_fields =
761 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
762 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
763 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
764 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
765 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
766 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
767 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
768 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
771 tir_attr->transport_domain = priv->sh->td->id;
773 tir_attr->transport_domain = priv->sh->tdn;
774 memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
775 tir_attr->indirect_table = ind_tbl->rqt->id;
776 if (dev->data->dev_conf.lpbk_mode)
777 tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
779 tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout;
780 tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
781 tir_attr->lro_enable_mask =
782 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
783 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
788 * Create an Rx Hash queue.
791 * Pointer to Ethernet device.
793 * Pointer to Rx Hash queue.
798 * 0 on success, a negative errno value otherwise and rte_errno is set.
801 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
802 int tunnel __rte_unused)
804 struct mlx5_priv *priv = dev->data->dev_private;
805 struct mlx5_devx_tir_attr tir_attr = {0};
808 mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
809 hrxq->ind_table, tunnel, &tir_attr);
810 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr);
812 DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
817 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
818 if (hrxq->hws_flags) {
819 hrxq->action = mlx5dr_action_create_dest_tir
821 (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags);
826 if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
834 err = rte_errno; /* Save rte_errno before cleanup. */
836 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
837 rte_errno = err; /* Restore rte_errno. */
842 * Destroy a DevX TIR object.
845 * Hash Rx queue to release its tir.
848 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
850 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
854 * Modify an Rx Hash queue configuration.
857 * Pointer to Ethernet device.
859 * Hash Rx queue to modify.
861 * RSS key for the Rx hash queue.
863 * Verbs protocol hash field to make the RSS on.
865 * Indirection table for TIR.
868 * 0 on success, a negative errno value otherwise and rte_errno is set.
871 mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
872 const uint8_t *rss_key,
873 uint64_t hash_fields,
874 const struct mlx5_ind_table_obj *ind_tbl)
876 struct mlx5_devx_modify_tir_attr modify_tir = {0};
879 * untested for modification fields:
880 * - rx_hash_symmetric not set in hrxq_new(),
881 * - rx_hash_fn set hard-coded in hrxq_new(),
882 * - lro_xxx not set after rxq setup
884 if (ind_tbl != hrxq->ind_table)
885 modify_tir.modify_bitmask |=
886 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
887 if (hash_fields != hrxq->hash_fields ||
888 memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
889 modify_tir.modify_bitmask |=
890 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
891 mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
892 0, /* N/A - tunnel modification unsupported */
894 modify_tir.tirn = hrxq->tir->id;
895 if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
896 DRV_LOG(ERR, "port %u cannot modify DevX TIR",
905 * Create a DevX drop Rx queue.
908 * Pointer to Ethernet device.
911 * 0 on success, a negative errno value otherwise and rte_errno is set.
914 mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
916 struct mlx5_priv *priv = dev->data->dev_private;
917 int socket_id = dev->device->numa_node;
918 struct mlx5_rxq_priv *rxq;
919 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
920 struct mlx5_rxq_obj *rxq_obj = NULL;
924 * Initialize dummy control structures.
925 * They are required to hold pointers for cleanup
926 * and are only accessible via drop queue DevX objects.
928 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
930 DRV_LOG(ERR, "Port %u could not allocate drop queue private",
935 rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
937 if (rxq_ctrl == NULL) {
938 DRV_LOG(ERR, "Port %u could not allocate drop queue control",
943 rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id);
944 if (rxq_obj == NULL) {
945 DRV_LOG(ERR, "Port %u could not allocate drop queue object",
950 rxq_obj->rxq_ctrl = rxq_ctrl;
951 rxq_ctrl->is_hairpin = false;
952 rxq_ctrl->sh = priv->sh;
953 rxq_ctrl->obj = rxq_obj;
954 rxq->ctrl = rxq_ctrl;
956 LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
957 /* Create CQ using DevX API. */
958 ret = mlx5_rxq_create_devx_cq_resources(rxq);
960 DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
964 rxq_ctrl->rxq.delay_drop = 0;
965 /* Create RQ using DevX API. */
966 ret = mlx5_rxq_create_devx_rq_resources(rxq);
968 DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
973 /* Change queue state to ready. */
974 ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
977 /* Initialize drop queue. */
978 priv->drop_queue.rxq = rxq;
981 ret = rte_errno; /* Save rte_errno before cleanup. */
982 if (rxq != NULL && rxq->devx_rq.rq != NULL)
983 mlx5_devx_rq_destroy(&rxq->devx_rq);
984 if (rxq_obj != NULL) {
985 if (rxq_obj->cq_obj.cq != NULL)
986 mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
987 if (rxq_obj->devx_channel)
988 mlx5_os_devx_destroy_event_channel
989 (rxq_obj->devx_channel);
992 if (rxq_ctrl != NULL)
996 rte_errno = ret; /* Restore rte_errno. */
1001 * Release drop Rx queue resources.
1004 * Pointer to Ethernet device.
1007 mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
1009 struct mlx5_priv *priv = dev->data->dev_private;
1010 struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
1011 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
1013 mlx5_rxq_devx_obj_release(rxq);
1014 mlx5_free(rxq_ctrl->obj);
1015 mlx5_free(rxq_ctrl);
1017 priv->drop_queue.rxq = NULL;
1021 * Release a drop hash Rx queue.
1024 * Pointer to Ethernet device.
1027 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
1029 struct mlx5_priv *priv = dev->data->dev_private;
1030 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
1032 if (hrxq->tir != NULL)
1033 mlx5_devx_tir_destroy(hrxq);
1034 if (hrxq->ind_table->ind_table != NULL)
1035 mlx5_devx_ind_table_destroy(hrxq->ind_table);
1036 if (priv->drop_queue.rxq->devx_rq.rq != NULL)
1037 mlx5_rxq_devx_obj_drop_release(dev);
1041 * Create a DevX drop action for Rx Hash queue.
1044 * Pointer to Ethernet device.
1047 * 0 on success, a negative errno value otherwise and rte_errno is set.
1050 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
1052 struct mlx5_priv *priv = dev->data->dev_private;
1053 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
1056 ret = mlx5_rxq_devx_obj_drop_create(dev);
1058 DRV_LOG(ERR, "Cannot create drop RX queue");
1061 if (priv->sh->config.dv_flow_en == 2)
1063 /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
1064 ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
1066 DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table");
1069 ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false);
1071 DRV_LOG(ERR, "Cannot create drop hash RX queue");
1076 mlx5_devx_drop_action_destroy(dev);
1081 * Select TXQ TIS number.
1084 * Pointer to Ethernet device.
1086 * Queue index in DPDK Tx queue array.
1089 * > 0 on success, a negative errno value otherwise.
1092 mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx)
1094 struct mlx5_priv *priv = dev->data->dev_private;
1097 if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode ==
1098 MLX5_LAG_MODE_TIS) {
1099 tis_idx = (priv->lag_affinity_idx + queue_idx) %
1100 priv->sh->bond.n_port;
1101 DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.",
1102 dev->data->port_id, queue_idx, tis_idx + 1,
1103 priv->sh->lag.tx_remap_affinity[tis_idx]);
1107 MLX5_ASSERT(priv->sh->tis[tis_idx]);
1108 return priv->sh->tis[tis_idx]->id;
1112 * Create the Tx hairpin queue object.
1115 * Pointer to Ethernet device.
1117 * Queue index in DPDK Tx queue array.
1120 * 0 on success, a negative errno value otherwise and rte_errno is set.
1123 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1125 struct mlx5_priv *priv = dev->data->dev_private;
1126 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1127 struct mlx5_txq_ctrl *txq_ctrl =
1128 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1129 struct mlx5_devx_create_sq_attr attr = { 0 };
1130 struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
1131 uint32_t max_wq_data;
1133 MLX5_ASSERT(txq_data);
1135 tmpl->txq_ctrl = txq_ctrl;
1137 attr.tis_lst_sz = 1;
1139 priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
1140 /* Jumbo frames > 9KB should be supported, and more packets. */
1141 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1142 if (priv->config.log_hp_size > max_wq_data) {
1143 DRV_LOG(ERR, "Total data size %u power of 2 is "
1144 "too large for hairpin.",
1145 priv->config.log_hp_size);
1149 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1151 attr.wq_attr.log_hairpin_data_sz =
1152 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1153 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1155 /* Set the packets number to the maximum value for performance. */
1156 attr.wq_attr.log_hairpin_num_packets =
1157 attr.wq_attr.log_hairpin_data_sz -
1158 MLX5_HAIRPIN_QUEUE_STRIDE;
1160 attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
1161 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr);
1164 "Port %u tx hairpin queue %u can't create SQ object.",
1165 dev->data->port_id, idx);
1172 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1174 * Destroy the Tx queue DevX object.
1177 * Txq object to destroy.
1180 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
1182 mlx5_devx_sq_destroy(&txq_obj->sq_obj);
1183 memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj));
1184 mlx5_devx_cq_destroy(&txq_obj->cq_obj);
1185 memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj));
1189 * Create a SQ object and its resources using DevX.
1192 * Pointer to Ethernet device.
1194 * Queue index in DPDK Tx queue array.
1195 * @param[in] log_desc_n
1196 * Log of number of descriptors in queue.
1199 * 0 on success, a negative errno value otherwise and rte_errno is set.
1202 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
1203 uint16_t log_desc_n)
1205 struct mlx5_priv *priv = dev->data->dev_private;
1206 struct mlx5_common_device *cdev = priv->sh->cdev;
1207 struct mlx5_uar *uar = &priv->sh->tx_uar;
1208 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1209 struct mlx5_txq_ctrl *txq_ctrl =
1210 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1211 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1212 struct mlx5_devx_create_sq_attr sq_attr = {
1213 .flush_in_error_en = 1,
1214 .allow_multi_pkt_send_wqe = !!priv->config.mps,
1215 .min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode,
1216 .allow_swp = !!priv->sh->dev_cap.swp,
1217 .cqn = txq_obj->cq_obj.cq->id,
1219 .wq_attr = (struct mlx5_devx_wq_attr){
1221 .uar_page = mlx5_os_get_devx_uar_page_id(uar->obj),
1224 mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
1225 .tis_num = mlx5_get_txq_tis_num(dev, idx),
1228 /* Create Send Queue object with DevX. */
1229 return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj,
1230 log_desc_n, &sq_attr, priv->sh->numa_node);
1235 * Create the Tx queue DevX object.
1238 * Pointer to Ethernet device.
1240 * Queue index in DPDK Tx queue array.
1243 * 0 on success, a negative errno value otherwise and rte_errno is set.
1246 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1248 struct mlx5_priv *priv = dev->data->dev_private;
1249 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1250 struct mlx5_txq_ctrl *txq_ctrl =
1251 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1253 if (txq_ctrl->is_hairpin)
1254 return mlx5_txq_obj_hairpin_new(dev, idx);
1255 #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
1256 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1257 dev->data->port_id, idx);
1261 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
1262 struct mlx5_dev_ctx_shared *sh = priv->sh;
1263 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1264 struct mlx5_devx_cq_attr cq_attr = {
1265 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
1267 uint32_t cqe_n, log_desc_n;
1268 uint32_t wqe_n, wqe_size;
1271 MLX5_ASSERT(txq_data);
1272 MLX5_ASSERT(txq_obj);
1273 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1275 txq_obj->txq_ctrl = txq_ctrl;
1277 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1278 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1279 log_desc_n = log2above(cqe_n);
1280 cqe_n = 1UL << log_desc_n;
1281 if (cqe_n > UINT16_MAX) {
1282 DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.",
1283 dev->data->port_id, txq_data->idx, cqe_n);
1287 /* Create completion queue object with DevX. */
1288 ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n,
1289 &cq_attr, priv->sh->numa_node);
1291 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1292 dev->data->port_id, idx);
1295 txq_data->cqe_n = log_desc_n;
1296 txq_data->cqe_s = cqe_n;
1297 txq_data->cqe_m = txq_data->cqe_s - 1;
1298 txq_data->cqes = txq_obj->cq_obj.cqes;
1299 txq_data->cq_ci = 0;
1300 txq_data->cq_pi = 0;
1301 txq_data->cq_db = txq_obj->cq_obj.db_rec;
1302 *txq_data->cq_db = 0;
1304 * Adjust the amount of WQEs depending on inline settings.
1305 * The number of descriptors should be enough to handle
1306 * the specified number of packets. If queue is being created
1307 * with Verbs the rdma-core does queue size adjustment
1308 * internally in the mlx5_calc_sq_size(), we do the same
1309 * for the queue being created with DevX at this point.
1311 wqe_size = txq_data->tso_en ?
1312 RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0;
1313 wqe_size += sizeof(struct mlx5_wqe_cseg) +
1314 sizeof(struct mlx5_wqe_eseg) +
1315 sizeof(struct mlx5_wqe_dseg);
1316 if (txq_data->inlen_send)
1317 wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) +
1318 sizeof(struct mlx5_wqe_eseg) +
1319 RTE_ALIGN(txq_data->inlen_send +
1322 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
1323 /* Create Send Queue object with DevX. */
1324 wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
1325 (uint32_t)priv->sh->dev_cap.max_qp_wr);
1326 log_desc_n = log2above(wqe_n);
1327 ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
1329 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1330 dev->data->port_id, idx);
1334 /* Create the Work Queue. */
1335 txq_data->wqe_n = log_desc_n;
1336 txq_data->wqe_s = 1 << txq_data->wqe_n;
1337 txq_data->wqe_m = txq_data->wqe_s - 1;
1338 txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes;
1339 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1340 txq_data->wqe_ci = 0;
1341 txq_data->wqe_pi = 0;
1342 txq_data->wqe_comp = 0;
1343 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1344 txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR];
1345 *txq_data->qp_db = 0;
1346 txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8;
1347 txq_data->db_heu = sh->cdev->config.dbnc == MLX5_SQ_DB_HEURISTIC;
1348 txq_data->db_nc = sh->tx_uar.dbnc;
1349 txq_data->wait_on_time = !!(!sh->config.tx_pp &&
1350 sh->cdev->config.hca_attr.wait_on_time);
1351 /* Change Send Queue state to Ready-to-Send. */
1352 ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1356 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1357 dev->data->port_id, idx);
1360 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1362 * If using DevX need to query and store TIS transport domain value.
1363 * This is done once per port.
1364 * Will use this value on Rx, when creating matching TIR.
1367 priv->sh->tdn = priv->sh->td->id;
1369 txq_ctrl->uar_mmap_offset =
1370 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj);
1371 ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db;
1372 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1375 ret = rte_errno; /* Save rte_errno before cleanup. */
1376 mlx5_txq_release_devx_resources(txq_obj);
1377 rte_errno = ret; /* Restore rte_errno. */
1383 * Release an Tx DevX queue object.
1386 * DevX Tx queue object.
1389 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1391 MLX5_ASSERT(txq_obj);
1392 if (txq_obj->txq_ctrl->is_hairpin) {
1394 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1395 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1397 mlx5_txq_release_devx_resources(txq_obj);
1402 struct mlx5_obj_ops devx_obj_ops = {
1403 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1404 .rxq_obj_new = mlx5_rxq_devx_obj_new,
1405 .rxq_event_get = mlx5_rx_devx_get_event,
1406 .rxq_obj_modify = mlx5_devx_modify_rq,
1407 .rxq_obj_release = mlx5_rxq_devx_obj_release,
1408 .ind_table_new = mlx5_devx_ind_table_new,
1409 .ind_table_modify = mlx5_devx_ind_table_modify,
1410 .ind_table_destroy = mlx5_devx_ind_table_destroy,
1411 .hrxq_new = mlx5_devx_hrxq_new,
1412 .hrxq_destroy = mlx5_devx_tir_destroy,
1413 .hrxq_modify = mlx5_devx_hrxq_modify,
1414 .drop_action_create = mlx5_devx_drop_action_create,
1415 .drop_action_destroy = mlx5_devx_drop_action_destroy,
1416 .txq_obj_new = mlx5_txq_devx_obj_new,
1417 .txq_obj_modify = mlx5_txq_devx_modify,
1418 .txq_obj_release = mlx5_txq_devx_obj_release,
1419 .lb_dummy_queue_create = NULL,
1420 .lb_dummy_queue_release = NULL,