1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_malloc.h>
21 #include "mlx5_common_os.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_devx.h"
25 #include "mlx5_flow.h"
29 * Modify RQ vlan stripping offload
35 * 0 on success, non-0 otherwise
38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
40 struct mlx5_devx_modify_rq_attr rq_attr;
42 memset(&rq_attr, 0, sizeof(rq_attr));
43 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
44 rq_attr.state = MLX5_RQC_STATE_RDY;
45 rq_attr.vsd = (on ? 0 : 1);
46 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
47 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
51 * Modify RQ using DevX API.
54 * DevX Rx queue object.
57 * 0 on success, a negative errno value otherwise and rte_errno is set.
60 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
62 struct mlx5_devx_modify_rq_attr rq_attr;
64 memset(&rq_attr, 0, sizeof(rq_attr));
66 rq_attr.rq_state = MLX5_RQC_STATE_RST;
67 rq_attr.state = MLX5_RQC_STATE_RDY;
69 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
70 rq_attr.state = MLX5_RQC_STATE_RST;
72 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
76 * Modify SQ using DevX API.
79 * DevX Tx queue object.
81 * Type of change queue state.
86 * 0 on success, a negative errno value otherwise and rte_errno is set.
89 mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
92 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
95 if (type != MLX5_TXQ_MOD_RST2RDY) {
96 /* Change queue state to reset. */
97 if (type == MLX5_TXQ_MOD_ERR2RDY)
98 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
100 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
101 msq_attr.state = MLX5_SQC_STATE_RST;
102 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
104 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
105 " %s", strerror(errno));
110 if (type != MLX5_TXQ_MOD_RDY2RST) {
111 /* Change queue state to ready. */
112 msq_attr.sq_state = MLX5_SQC_STATE_RST;
113 msq_attr.state = MLX5_SQC_STATE_RDY;
114 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
116 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
117 " %s", strerror(errno));
123 * The dev_port variable is relevant only in Verbs API, and there is a
124 * pointer that points to this function and a parallel function in verbs
125 * intermittently, so they should have the same parameters.
132 * Release the resources allocated for an RQ DevX object.
135 * DevX Rx queue object.
138 mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
140 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
142 if (rxq_ctrl->rxq.wqes) {
143 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
144 rxq_ctrl->rxq.wqes = NULL;
146 if (rxq_ctrl->wq_umem) {
147 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
148 rxq_ctrl->wq_umem = NULL;
151 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
152 mlx5_os_get_umem_id(dbr_page->umem),
153 rxq_ctrl->rq_dbr_offset));
154 rxq_ctrl->rq_dbrec_page = NULL;
159 * Release the resources allocated for the Rx CQ DevX object.
162 * DevX Rx queue object.
165 mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
167 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
169 if (rxq_ctrl->rxq.cqes) {
170 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
171 rxq_ctrl->rxq.cqes = NULL;
173 if (rxq_ctrl->cq_umem) {
174 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
175 rxq_ctrl->cq_umem = NULL;
178 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
179 mlx5_os_get_umem_id(dbr_page->umem),
180 rxq_ctrl->cq_dbr_offset));
181 rxq_ctrl->cq_dbrec_page = NULL;
186 * Release an Rx DevX queue object.
189 * DevX Rx queue object.
192 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
194 MLX5_ASSERT(rxq_obj);
195 MLX5_ASSERT(rxq_obj->rq);
196 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
197 mlx5_devx_modify_rq(rxq_obj, false);
198 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
200 MLX5_ASSERT(rxq_obj->devx_cq);
201 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
202 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
203 if (rxq_obj->devx_channel)
204 mlx5_glue->devx_destroy_event_channel
205 (rxq_obj->devx_channel);
206 mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
207 mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
212 * Get event for an Rx DevX queue object.
215 * DevX Rx queue object.
218 * 0 on success, a negative errno value otherwise and rte_errno is set.
221 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
223 #ifdef HAVE_IBV_DEVX_EVENT
225 struct mlx5dv_devx_async_event_hdr event_resp;
226 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
228 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
236 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
245 #endif /* HAVE_IBV_DEVX_EVENT */
249 * Fill common fields of create RQ attributes structure.
252 * Pointer to Rx queue data.
254 * CQ number to use with this RQ.
256 * RQ attributes structure to fill..
259 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
260 struct mlx5_devx_create_rq_attr *rq_attr)
262 rq_attr->state = MLX5_RQC_STATE_RST;
263 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
265 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
269 * Fill common fields of DevX WQ attributes structure.
272 * Pointer to device private data.
274 * Pointer to Rx queue control structure.
276 * WQ attributes structure to fill..
279 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
280 struct mlx5_devx_wq_attr *wq_attr)
282 wq_attr->end_padding_mode = priv->config.cqe_pad ?
283 MLX5_WQ_END_PAD_MODE_ALIGN :
284 MLX5_WQ_END_PAD_MODE_NONE;
285 wq_attr->pd = priv->sh->pdn;
286 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
287 wq_attr->dbr_umem_id =
288 mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
289 wq_attr->dbr_umem_valid = 1;
290 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
291 wq_attr->wq_umem_valid = 1;
295 * Create a RQ object using DevX.
298 * Pointer to Ethernet device.
300 * Queue index in DPDK Rx queue array.
303 * The DevX RQ object initialized, NULL otherwise and rte_errno is set.
305 static struct mlx5_devx_obj *
306 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
308 struct mlx5_priv *priv = dev->data->dev_private;
309 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
310 struct mlx5_rxq_ctrl *rxq_ctrl =
311 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
312 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
313 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
314 uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
315 struct mlx5_devx_dbr_page *dbr_page;
317 uint32_t wq_size = 0;
318 uint32_t wqe_size = 0;
319 uint32_t log_wqe_size = 0;
321 struct mlx5_devx_obj *rq;
323 /* Fill RQ attributes. */
324 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
325 rq_attr.flush_in_error_en = 1;
326 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
327 /* Fill WQ attributes for this RQ. */
328 if (mlx5_rxq_mprq_enabled(rxq_data)) {
329 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
331 * Number of strides in each WQE:
332 * 512*2^single_wqe_log_num_of_strides.
334 rq_attr.wq_attr.single_wqe_log_num_of_strides =
335 rxq_data->strd_num_n -
336 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
337 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
338 rq_attr.wq_attr.single_stride_log_num_of_bytes =
339 rxq_data->strd_sz_n -
340 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
341 wqe_size = sizeof(struct mlx5_wqe_mprq);
343 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
344 wqe_size = sizeof(struct mlx5_wqe_data_seg);
346 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
347 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
348 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
349 /* Calculate and allocate WQ memory space. */
350 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
351 wq_size = wqe_n * wqe_size;
352 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
353 if (alignment == (size_t)-1) {
354 DRV_LOG(ERR, "Failed to get mem page size");
358 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
359 alignment, rxq_ctrl->socket);
362 rxq_data->wqes = buf;
363 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
365 if (!rxq_ctrl->wq_umem)
367 /* Allocate RQ door-bell. */
368 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
369 if (dbr_offset < 0) {
370 DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
373 rxq_ctrl->rq_dbr_offset = dbr_offset;
374 rxq_ctrl->rq_dbrec_page = dbr_page;
375 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
376 (uintptr_t)rxq_ctrl->rq_dbr_offset);
377 /* Create RQ using DevX API. */
378 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
379 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
384 mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
389 * Create a DevX CQ object for an Rx queue.
392 * Pointer to Ethernet device.
394 * Queue index in DPDK Rx queue array.
397 * The DevX CQ object initialized, NULL otherwise and rte_errno is set.
399 static struct mlx5_devx_obj *
400 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
402 struct mlx5_devx_obj *cq_obj = 0;
403 struct mlx5_devx_cq_attr cq_attr = { 0 };
404 struct mlx5_priv *priv = dev->data->dev_private;
405 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
406 struct mlx5_rxq_ctrl *rxq_ctrl =
407 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
408 size_t page_size = rte_mem_page_size();
409 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
410 struct mlx5_devx_dbr_page *dbr_page;
413 uint16_t event_nums[1] = {0};
418 if (page_size == (size_t)-1) {
419 DRV_LOG(ERR, "Failed to get page_size.");
422 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
424 cq_attr.cqe_comp_en = 1u;
425 cq_attr.mini_cqe_res_format =
426 mlx5_rxq_mprq_enabled(rxq_data) ?
427 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX :
428 MLX5_CQE_RESP_FORMAT_HASH;
430 * For vectorized Rx, it must not be doubled in order to
431 * make cq_ci and rq_ci aligned.
433 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
435 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
437 "Port %u Rx CQE compression is disabled for HW"
440 } else if (priv->config.cqe_comp && rxq_data->lro) {
442 "Port %u Rx CQE compression is disabled for LRO.",
445 if (priv->config.cqe_pad)
446 cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
447 log_cqe_n = log2above(cqe_n);
448 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
449 buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
452 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
455 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
456 rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
458 IBV_ACCESS_LOCAL_WRITE);
459 if (!rxq_ctrl->cq_umem) {
460 DRV_LOG(ERR, "Failed to register umem for CQ.");
463 /* Allocate CQ door-bell. */
464 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
465 if (dbr_offset < 0) {
466 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
469 rxq_ctrl->cq_dbr_offset = dbr_offset;
470 rxq_ctrl->cq_dbrec_page = dbr_page;
471 rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
472 (uintptr_t)rxq_ctrl->cq_dbr_offset);
474 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
475 /* Create CQ using DevX API. */
476 cq_attr.eqn = priv->sh->eqn;
477 cq_attr.uar_page_id =
478 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
479 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
480 cq_attr.q_umem_valid = 1;
481 cq_attr.log_cq_size = log_cqe_n;
482 cq_attr.log_page_size = rte_log2_u32(page_size);
483 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
484 cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
485 cq_attr.db_umem_valid = 1;
486 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
489 rxq_data->cqe_n = log_cqe_n;
490 rxq_data->cqn = cq_obj->id;
491 if (rxq_ctrl->obj->devx_channel) {
492 ret = mlx5_glue->devx_subscribe_devx_event
493 (rxq_ctrl->obj->devx_channel,
497 (uint64_t)(uintptr_t)cq_obj);
499 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
504 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
505 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
509 mlx5_devx_cmd_destroy(cq_obj);
510 mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
515 * Create the Rx hairpin queue object.
518 * Pointer to Ethernet device.
520 * Queue index in DPDK Rx queue array.
523 * 0 on success, a negative errno value otherwise and rte_errno is set.
526 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
528 struct mlx5_priv *priv = dev->data->dev_private;
529 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
530 struct mlx5_rxq_ctrl *rxq_ctrl =
531 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
532 struct mlx5_devx_create_rq_attr attr = { 0 };
533 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
534 uint32_t max_wq_data;
536 MLX5_ASSERT(rxq_data);
538 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
539 tmpl->rxq_ctrl = rxq_ctrl;
541 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
542 /* Jumbo frames > 9KB should be supported, and more packets. */
543 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
544 if (priv->config.log_hp_size > max_wq_data) {
545 DRV_LOG(ERR, "Total data size %u power of 2 is "
546 "too large for hairpin.",
547 priv->config.log_hp_size);
551 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
553 attr.wq_attr.log_hairpin_data_sz =
554 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
555 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
557 /* Set the packets number to the maximum value for performance. */
558 attr.wq_attr.log_hairpin_num_packets =
559 attr.wq_attr.log_hairpin_data_sz -
560 MLX5_HAIRPIN_QUEUE_STRIDE;
561 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
565 "Port %u Rx hairpin queue %u can't create rq object.",
566 dev->data->port_id, idx);
570 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
575 * Create the Rx queue DevX object.
578 * Pointer to Ethernet device.
580 * Queue index in DPDK Rx queue array.
583 * 0 on success, a negative errno value otherwise and rte_errno is set.
586 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
588 struct mlx5_priv *priv = dev->data->dev_private;
589 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
590 struct mlx5_rxq_ctrl *rxq_ctrl =
591 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
592 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
595 MLX5_ASSERT(rxq_data);
597 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
598 return mlx5_rxq_obj_hairpin_new(dev, idx);
599 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
600 tmpl->rxq_ctrl = rxq_ctrl;
603 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
605 tmpl->devx_channel = mlx5_glue->devx_create_event_channel
608 if (!tmpl->devx_channel) {
610 DRV_LOG(ERR, "Failed to create event channel %d.",
614 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
616 /* Create CQ using DevX API. */
617 tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx);
618 if (!tmpl->devx_cq) {
619 DRV_LOG(ERR, "Failed to create CQ.");
622 /* Create RQ using DevX API. */
623 tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx);
625 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
626 dev->data->port_id, idx);
630 /* Change queue state to ready. */
631 ret = mlx5_devx_modify_rq(tmpl, true);
634 rxq_data->cq_arm_sn = 0;
635 mlx5_rxq_initialize(rxq_data);
637 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
638 rxq_ctrl->wqn = tmpl->rq->id;
641 ret = rte_errno; /* Save rte_errno before cleanup. */
643 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
645 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
646 if (tmpl->devx_channel)
647 mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
648 mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
649 mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
650 rte_errno = ret; /* Restore rte_errno. */
655 * Create RQT using DevX API as a filed of indirection table.
658 * Pointer to Ethernet device.
660 * Log of number of queues in the array.
662 * DevX indirection table object.
665 * 0 on success, a negative errno value otherwise and rte_errno is set.
668 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
669 struct mlx5_ind_table_obj *ind_tbl)
671 struct mlx5_priv *priv = dev->data->dev_private;
672 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
673 const unsigned int rqt_n = 1 << log_n;
676 MLX5_ASSERT(ind_tbl);
677 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
678 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
680 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
685 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
686 rqt_attr->rqt_actual_size = rqt_n;
687 for (i = 0; i != ind_tbl->queues_n; ++i) {
688 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
689 struct mlx5_rxq_ctrl *rxq_ctrl =
690 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
692 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
695 for (j = 0; i != rqt_n; ++j, ++i)
696 rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
697 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
700 DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
709 * Destroy the DevX RQT object.
712 * Indirection table to release.
715 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
717 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
721 * Create an Rx Hash queue.
724 * Pointer to Ethernet device.
726 * Pointer to Rx Hash queue.
731 * 0 on success, a negative errno value otherwise and rte_errno is set.
734 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
735 int tunnel __rte_unused)
737 struct mlx5_priv *priv = dev->data->dev_private;
738 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
739 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
740 struct mlx5_rxq_ctrl *rxq_ctrl =
741 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
742 struct mlx5_devx_tir_attr tir_attr;
743 const uint8_t *rss_key = hrxq->rss_key;
744 uint64_t hash_fields = hrxq->hash_fields;
749 /* Enable TIR LRO only if all the queues were configured for. */
750 for (i = 0; i < ind_tbl->queues_n; ++i) {
751 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
756 memset(&tir_attr, 0, sizeof(tir_attr));
757 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
758 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
759 tir_attr.tunneled_offload_en = !!tunnel;
760 /* If needed, translate hash_fields bitmap to PRM format. */
762 struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL;
763 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
764 rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
765 &tir_attr.rx_hash_field_selector_inner :
766 &tir_attr.rx_hash_field_selector_outer;
768 rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
770 /* 1 bit: 0: IPv4, 1: IPv6. */
771 rx_hash_field_select->l3_prot_type =
772 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
773 /* 1 bit: 0: TCP, 1: UDP. */
774 rx_hash_field_select->l4_prot_type =
775 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
776 /* Bitmask which sets which fields to use in RX Hash. */
777 rx_hash_field_select->selected_fields =
778 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
779 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
780 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
781 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
782 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
783 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
784 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
785 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
787 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
788 tir_attr.transport_domain = priv->sh->td->id;
790 tir_attr.transport_domain = priv->sh->tdn;
791 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
792 tir_attr.indirect_table = ind_tbl->rqt->id;
793 if (dev->data->dev_conf.lpbk_mode)
794 tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
796 tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout;
797 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
798 tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
799 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
801 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
803 DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
808 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
809 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
818 err = rte_errno; /* Save rte_errno before cleanup. */
820 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
821 rte_errno = err; /* Restore rte_errno. */
826 * Destroy a DevX TIR object.
829 * Hash Rx queue to release its tir.
832 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
834 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
838 * Create a DevX drop action for Rx Hash queue.
841 * Pointer to Ethernet device.
844 * 0 on success, a negative errno value otherwise and rte_errno is set.
847 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
850 DRV_LOG(ERR, "DevX drop action is not supported yet.");
856 * Release a drop hash Rx queue.
859 * Pointer to Ethernet device.
862 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
865 DRV_LOG(ERR, "DevX drop action is not supported yet.");
870 * Create the Tx hairpin queue object.
873 * Pointer to Ethernet device.
875 * Queue index in DPDK Tx queue array.
878 * 0 on success, a negative errno value otherwise and rte_errno is set.
881 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
883 struct mlx5_priv *priv = dev->data->dev_private;
884 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
885 struct mlx5_txq_ctrl *txq_ctrl =
886 container_of(txq_data, struct mlx5_txq_ctrl, txq);
887 struct mlx5_devx_create_sq_attr attr = { 0 };
888 struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
889 uint32_t max_wq_data;
891 MLX5_ASSERT(txq_data);
893 tmpl->txq_ctrl = txq_ctrl;
896 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
897 /* Jumbo frames > 9KB should be supported, and more packets. */
898 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
899 if (priv->config.log_hp_size > max_wq_data) {
900 DRV_LOG(ERR, "Total data size %u power of 2 is "
901 "too large for hairpin.",
902 priv->config.log_hp_size);
906 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
908 attr.wq_attr.log_hairpin_data_sz =
909 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
910 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
912 /* Set the packets number to the maximum value for performance. */
913 attr.wq_attr.log_hairpin_num_packets =
914 attr.wq_attr.log_hairpin_data_sz -
915 MLX5_HAIRPIN_QUEUE_STRIDE;
916 attr.tis_num = priv->sh->tis->id;
917 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
920 "Port %u tx hairpin queue %u can't create SQ object.",
921 dev->data->port_id, idx);
928 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
930 * Release DevX SQ resources.
933 * DevX Tx queue object.
936 mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
938 if (txq_obj->sq_devx)
939 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
940 if (txq_obj->sq_umem)
941 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
943 mlx5_free(txq_obj->sq_buf);
944 if (txq_obj->sq_dbrec_page)
945 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
947 (txq_obj->sq_dbrec_page->umem),
948 txq_obj->sq_dbrec_offset));
952 * Release DevX Tx CQ resources.
955 * DevX Tx queue object.
958 mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
960 if (txq_obj->cq_devx)
961 claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
962 if (txq_obj->cq_umem)
963 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
965 mlx5_free(txq_obj->cq_buf);
966 if (txq_obj->cq_dbrec_page)
967 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
969 (txq_obj->cq_dbrec_page->umem),
970 txq_obj->cq_dbrec_offset));
974 * Destroy the Tx queue DevX object.
977 * Txq object to destroy.
980 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
982 mlx5_txq_release_devx_cq_resources(txq_obj);
983 mlx5_txq_release_devx_sq_resources(txq_obj);
987 * Create a DevX CQ object and its resources for an Tx queue.
990 * Pointer to Ethernet device.
992 * Queue index in DPDK Tx queue array.
995 * Number of CQEs in CQ, 0 otherwise and rte_errno is set.
998 mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
1000 struct mlx5_priv *priv = dev->data->dev_private;
1001 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1002 struct mlx5_txq_ctrl *txq_ctrl =
1003 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1004 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1005 struct mlx5_devx_cq_attr cq_attr = { 0 };
1006 struct mlx5_cqe *cqe;
1013 MLX5_ASSERT(txq_data);
1014 MLX5_ASSERT(txq_obj);
1015 page_size = rte_mem_page_size();
1016 if (page_size == (size_t)-1) {
1017 DRV_LOG(ERR, "Failed to get mem page size.");
1021 /* Allocate memory buffer for CQEs. */
1022 alignment = MLX5_CQE_BUF_ALIGNMENT;
1023 if (alignment == (size_t)-1) {
1024 DRV_LOG(ERR, "Failed to get CQE buf alignment.");
1028 /* Create the Completion Queue. */
1029 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1030 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1031 cqe_n = 1UL << log2above(cqe_n);
1032 if (cqe_n > UINT16_MAX) {
1034 "Port %u Tx queue %u requests to many CQEs %u.",
1035 dev->data->port_id, txq_data->idx, cqe_n);
1039 txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1040 cqe_n * sizeof(struct mlx5_cqe),
1042 priv->sh->numa_node);
1043 if (!txq_obj->cq_buf) {
1045 "Port %u Tx queue %u cannot allocate memory (CQ).",
1046 dev->data->port_id, txq_data->idx);
1050 /* Register allocated buffer in user space with DevX. */
1051 txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1052 (void *)txq_obj->cq_buf,
1053 cqe_n * sizeof(struct mlx5_cqe),
1054 IBV_ACCESS_LOCAL_WRITE);
1055 if (!txq_obj->cq_umem) {
1058 "Port %u Tx queue %u cannot register memory (CQ).",
1059 dev->data->port_id, txq_data->idx);
1062 /* Allocate doorbell record for completion queue. */
1063 txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1065 &txq_obj->cq_dbrec_page);
1066 if (txq_obj->cq_dbrec_offset < 0) {
1068 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
1071 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
1072 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
1073 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1074 cq_attr.eqn = priv->sh->eqn;
1075 cq_attr.q_umem_valid = 1;
1076 cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
1077 cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
1078 cq_attr.db_umem_valid = 1;
1079 cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
1080 cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
1081 cq_attr.log_cq_size = rte_log2_u32(cqe_n);
1082 cq_attr.log_page_size = rte_log2_u32(page_size);
1083 /* Create completion queue object with DevX. */
1084 txq_obj->cq_devx = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
1085 if (!txq_obj->cq_devx) {
1087 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1088 dev->data->port_id, idx);
1091 /* Initial fill CQ buffer with invalid CQE opcode. */
1092 cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
1093 for (i = 0; i < cqe_n; i++) {
1094 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
1100 mlx5_txq_release_devx_cq_resources(txq_obj);
1106 * Create a SQ object and its resources using DevX.
1109 * Pointer to Ethernet device.
1111 * Queue index in DPDK Tx queue array.
1114 * Number of WQEs in SQ, 0 otherwise and rte_errno is set.
1117 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
1119 struct mlx5_priv *priv = dev->data->dev_private;
1120 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1121 struct mlx5_txq_ctrl *txq_ctrl =
1122 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1123 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1124 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
1129 MLX5_ASSERT(txq_data);
1130 MLX5_ASSERT(txq_obj);
1131 page_size = rte_mem_page_size();
1132 if (page_size == (size_t)-1) {
1133 DRV_LOG(ERR, "Failed to get mem page size.");
1137 wqe_n = RTE_MIN(1UL << txq_data->elts_n,
1138 (uint32_t)priv->sh->device_attr.max_qp_wr);
1139 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1140 wqe_n * sizeof(struct mlx5_wqe),
1141 page_size, priv->sh->numa_node);
1142 if (!txq_obj->sq_buf) {
1144 "Port %u Tx queue %u cannot allocate memory (SQ).",
1145 dev->data->port_id, txq_data->idx);
1149 /* Register allocated buffer in user space with DevX. */
1150 txq_obj->sq_umem = mlx5_glue->devx_umem_reg
1152 (void *)txq_obj->sq_buf,
1153 wqe_n * sizeof(struct mlx5_wqe),
1154 IBV_ACCESS_LOCAL_WRITE);
1155 if (!txq_obj->sq_umem) {
1158 "Port %u Tx queue %u cannot register memory (SQ).",
1159 dev->data->port_id, txq_data->idx);
1162 /* Allocate doorbell record for send queue. */
1163 txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1165 &txq_obj->sq_dbrec_page);
1166 if (txq_obj->sq_dbrec_offset < 0) {
1168 DRV_LOG(ERR, "Failed to allocate SQ door-bell.");
1171 sq_attr.tis_lst_sz = 1;
1172 sq_attr.tis_num = priv->sh->tis->id;
1173 sq_attr.state = MLX5_SQC_STATE_RST;
1174 sq_attr.cqn = txq_obj->cq_devx->id;
1175 sq_attr.flush_in_error_en = 1;
1176 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
1177 sq_attr.allow_swp = !!priv->config.swp;
1178 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
1179 sq_attr.wq_attr.uar_page =
1180 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1181 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1182 sq_attr.wq_attr.pd = priv->sh->pdn;
1183 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
1184 sq_attr.wq_attr.log_wq_sz = log2above(wqe_n);
1185 sq_attr.wq_attr.dbr_umem_valid = 1;
1186 sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset;
1187 sq_attr.wq_attr.dbr_umem_id =
1188 mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem);
1189 sq_attr.wq_attr.wq_umem_valid = 1;
1190 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
1191 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
1192 /* Create Send Queue object with DevX. */
1193 txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr);
1194 if (!txq_obj->sq_devx) {
1196 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1197 dev->data->port_id, idx);
1203 mlx5_txq_release_devx_sq_resources(txq_obj);
1210 * Create the Tx queue DevX object.
1213 * Pointer to Ethernet device.
1215 * Queue index in DPDK Tx queue array.
1218 * 0 on success, a negative errno value otherwise and rte_errno is set.
1221 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1223 struct mlx5_priv *priv = dev->data->dev_private;
1224 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1225 struct mlx5_txq_ctrl *txq_ctrl =
1226 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1228 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
1229 return mlx5_txq_obj_hairpin_new(dev, idx);
1230 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1231 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1232 dev->data->port_id, idx);
1236 struct mlx5_dev_ctx_shared *sh = priv->sh;
1237 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1243 MLX5_ASSERT(txq_data);
1244 MLX5_ASSERT(txq_obj);
1245 txq_obj->txq_ctrl = txq_ctrl;
1247 cqe_n = mlx5_txq_create_devx_cq_resources(dev, idx);
1252 txq_data->cqe_n = log2above(cqe_n);
1253 txq_data->cqe_s = 1 << txq_data->cqe_n;
1254 txq_data->cqe_m = txq_data->cqe_s - 1;
1255 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
1256 txq_data->cq_ci = 0;
1257 txq_data->cq_pi = 0;
1258 txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
1259 txq_obj->cq_dbrec_offset);
1260 *txq_data->cq_db = 0;
1261 /* Create Send Queue object with DevX. */
1262 wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx);
1267 /* Create the Work Queue. */
1268 txq_data->wqe_n = log2above(wqe_n);
1269 txq_data->wqe_s = 1 << txq_data->wqe_n;
1270 txq_data->wqe_m = txq_data->wqe_s - 1;
1271 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
1272 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1273 txq_data->wqe_ci = 0;
1274 txq_data->wqe_pi = 0;
1275 txq_data->wqe_comp = 0;
1276 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1277 txq_data->qp_db = (volatile uint32_t *)
1278 (txq_obj->sq_dbrec_page->dbrs +
1279 txq_obj->sq_dbrec_offset +
1280 MLX5_SND_DBR * sizeof(uint32_t));
1281 *txq_data->qp_db = 0;
1282 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
1283 /* Change Send Queue state to Ready-to-Send. */
1284 ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1288 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1289 dev->data->port_id, idx);
1292 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1294 * If using DevX need to query and store TIS transport domain value.
1295 * This is done once per port.
1296 * Will use this value on Rx, when creating matching TIR.
1299 priv->sh->tdn = priv->sh->td->id;
1301 MLX5_ASSERT(sh->tx_uar);
1302 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
1303 MLX5_ASSERT(reg_addr);
1304 txq_ctrl->bf_reg = reg_addr;
1305 txq_ctrl->uar_mmap_offset =
1306 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
1307 txq_uar_init(txq_ctrl);
1310 ret = rte_errno; /* Save rte_errno before cleanup. */
1311 mlx5_txq_release_devx_resources(txq_obj);
1312 rte_errno = ret; /* Restore rte_errno. */
1318 * Release an Tx DevX queue object.
1321 * DevX Tx queue object.
1324 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1326 MLX5_ASSERT(txq_obj);
1327 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
1329 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1330 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
1332 mlx5_txq_release_devx_resources(txq_obj);
1337 struct mlx5_obj_ops devx_obj_ops = {
1338 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1339 .rxq_obj_new = mlx5_rxq_devx_obj_new,
1340 .rxq_event_get = mlx5_rx_devx_get_event,
1341 .rxq_obj_modify = mlx5_devx_modify_rq,
1342 .rxq_obj_release = mlx5_rxq_devx_obj_release,
1343 .ind_table_new = mlx5_devx_ind_table_new,
1344 .ind_table_destroy = mlx5_devx_ind_table_destroy,
1345 .hrxq_new = mlx5_devx_hrxq_new,
1346 .hrxq_destroy = mlx5_devx_tir_destroy,
1347 .drop_action_create = mlx5_devx_drop_action_create,
1348 .drop_action_destroy = mlx5_devx_drop_action_destroy,
1349 .txq_obj_new = mlx5_txq_devx_obj_new,
1350 .txq_obj_modify = mlx5_devx_modify_sq,
1351 .txq_obj_release = mlx5_txq_devx_obj_release,