+/**
+ * Prepare RQT attribute structure for DevX RQT API.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param log_n
+ * Log of number of queues in the array.
+ * @param ind_tbl
+ * DevX indirection table object.
+ *
+ * @return
+ * The RQT attr object initialized, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_devx_rqt_attr *
+mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
+ const unsigned int log_n,
+ const uint16_t *queues,
+ const uint32_t queues_n)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+ const unsigned int rqt_n = 1 << log_n;
+ unsigned int i, j;
+
+ rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
+ rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
+ if (!rqt_attr) {
+ DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
+ rqt_attr->rqt_actual_size = rqt_n;
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+
+ rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id;
+ }
+ MLX5_ASSERT(i > 0);
+ for (j = 0; i != rqt_n; ++j, ++i)
+ rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
+ return rqt_attr;
+}
+
+/**
+ * Create RQT using DevX API as a filed of indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param log_n
+ * Log of number of queues in the array.
+ * @param ind_tbl
+ * DevX indirection table object.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+
+ MLX5_ASSERT(ind_tbl);
+ rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
+ ind_tbl->queues,
+ ind_tbl->queues_n);
+ if (!rqt_attr)
+ return -rte_errno;
+ ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
+ mlx5_free(rqt_attr);
+ if (!ind_tbl->rqt) {
+ DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
+ dev->data->port_id);
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Modify RQT using DevX API as a filed of indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param log_n
+ * Log of number of queues in the array.
+ * @param ind_tbl
+ * DevX indirection table object.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
+ const uint16_t *queues, const uint32_t queues_n,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ int ret = 0;
+ struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+
+ MLX5_ASSERT(ind_tbl);
+ rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
+ queues,
+ queues_n);
+ if (!rqt_attr)
+ return -rte_errno;
+ ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
+ mlx5_free(rqt_attr);
+ if (ret)
+ DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
+ dev->data->port_id);
+ return ret;
+}
+
+/**
+ * Destroy the DevX RQT object.
+ *
+ * @param ind_table
+ * Indirection table to release.
+ */
+static void
+mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
+{
+ claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
+}
+
+/**
+ * Set TIR attribute struct with relevant input values.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] rss_key
+ * RSS key for the Rx hash queue.
+ * @param[in] hash_fields
+ * Verbs protocol hash field to make the RSS on.
+ * @param[in] ind_tbl
+ * Indirection table for TIR.
+ * @param[in] tunnel
+ * Tunnel type.
+ * @param[out] tir_attr
+ * Parameters structure for TIR creation/modification.
+ *
+ * @return
+ * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
+ */
+static void
+mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
+ uint64_t hash_fields,
+ const struct mlx5_ind_table_obj *ind_tbl,
+ int tunnel, struct mlx5_devx_tir_attr *tir_attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
+ bool lro = true;
+ uint32_t i;
+
+ /* Enable TIR LRO only if all the queues were configured for. */
+ for (i = 0; i < ind_tbl->queues_n; ++i) {
+ if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
+ lro = false;
+ break;
+ }
+ }
+ memset(tir_attr, 0, sizeof(*tir_attr));
+ tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
+ tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
+ tir_attr->tunneled_offload_en = !!tunnel;
+ /* If needed, translate hash_fields bitmap to PRM format. */
+ if (hash_fields) {
+ struct mlx5_rx_hash_field_select *rx_hash_field_select =
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ hash_fields & IBV_RX_HASH_INNER ?
+ &tir_attr->rx_hash_field_selector_inner :
+#endif
+ &tir_attr->rx_hash_field_selector_outer;
+ /* 1 bit: 0: IPv4, 1: IPv6. */
+ rx_hash_field_select->l3_prot_type =
+ !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
+ /* 1 bit: 0: TCP, 1: UDP. */
+ rx_hash_field_select->l4_prot_type =
+ !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
+ /* Bitmask which sets which fields to use in RX Hash. */
+ rx_hash_field_select->selected_fields =
+ ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
+ (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
+ (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
+ (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
+ }
+ if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
+ tir_attr->transport_domain = priv->sh->td->id;
+ else
+ tir_attr->transport_domain = priv->sh->tdn;
+ memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ tir_attr->indirect_table = ind_tbl->rqt->id;
+ if (dev->data->dev_conf.lpbk_mode)
+ tir_attr->self_lb_block =
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (lro) {
+ tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
+ tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
+ tir_attr->lro_enable_mask =
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
+ }
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq
+ * Pointer to Rx Hash queue.
+ * @param tunnel
+ * Tunnel type.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+ int tunnel __rte_unused)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_devx_tir_attr tir_attr = {0};
+ int err;
+
+ mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
+ hrxq->ind_table, tunnel, &tir_attr);
+ hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
+ if (!hrxq->tir) {
+ DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
+ &hrxq->action)) {
+ rte_errno = errno;
+ goto error;
+ }
+#endif
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ if (hrxq->tir)
+ claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Destroy a DevX TIR object.
+ *
+ * @param hrxq
+ * Hash Rx queue to release its tir.
+ */
+static void
+mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
+{
+ claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
+}
+
+/**
+ * Modify an Rx Hash queue configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq
+ * Hash Rx queue to modify.
+ * @param rss_key
+ * RSS key for the Rx hash queue.
+ * @param hash_fields
+ * Verbs protocol hash field to make the RSS on.
+ * @param[in] ind_tbl
+ * Indirection table for TIR.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+ const uint8_t *rss_key,
+ uint64_t hash_fields,
+ const struct mlx5_ind_table_obj *ind_tbl)
+{
+ struct mlx5_devx_modify_tir_attr modify_tir = {0};
+
+ /*
+ * untested for modification fields:
+ * - rx_hash_symmetric not set in hrxq_new(),
+ * - rx_hash_fn set hard-coded in hrxq_new(),
+ * - lro_xxx not set after rxq setup
+ */
+ if (ind_tbl != hrxq->ind_table)
+ modify_tir.modify_bitmask |=
+ MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
+ if (hash_fields != hrxq->hash_fields ||
+ memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
+ modify_tir.modify_bitmask |=
+ MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
+ mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
+ 0, /* N/A - tunnel modification unsupported */
+ &modify_tir.tir);
+ modify_tir.tirn = hrxq->tir->id;
+ if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
+ DRV_LOG(ERR, "port %u cannot modify DevX TIR",
+ dev->data->port_id);
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Create a DevX drop action for Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
+{
+ (void)dev;
+ DRV_LOG(ERR, "DevX drop action is not supported yet.");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
+{
+ (void)dev;
+ DRV_LOG(ERR, "DevX drop action is not supported yet.");
+ rte_errno = ENOTSUP;
+}
+
+/**
+ * Create the Tx hairpin queue object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Tx queue array.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq_data, struct mlx5_txq_ctrl, txq);
+ struct mlx5_devx_create_sq_attr attr = { 0 };
+ struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
+ uint32_t max_wq_data;
+
+ MLX5_ASSERT(txq_data);
+ MLX5_ASSERT(tmpl);
+ tmpl->txq_ctrl = txq_ctrl;
+ attr.hairpin = 1;
+ attr.tis_lst_sz = 1;
+ max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ /* Jumbo frames > 9KB should be supported, and more packets. */
+ if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
+ if (priv->config.log_hp_size > max_wq_data) {
+ DRV_LOG(ERR, "Total data size %u power of 2 is "
+ "too large for hairpin.",
+ priv->config.log_hp_size);
+ rte_errno = ERANGE;
+ return -rte_errno;
+ }
+ attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
+ } else {
+ attr.wq_attr.log_hairpin_data_sz =
+ (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
+ max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
+ }
+ /* Set the packets number to the maximum value for performance. */
+ attr.wq_attr.log_hairpin_num_packets =
+ attr.wq_attr.log_hairpin_data_sz -
+ MLX5_HAIRPIN_QUEUE_STRIDE;
+ attr.tis_num = priv->sh->tis->id;
+ tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
+ if (!tmpl->sq) {
+ DRV_LOG(ERR,
+ "Port %u tx hairpin queue %u can't create SQ object.",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
+/**
+ * Destroy the Tx queue DevX object.
+ *
+ * @param txq_obj
+ * Txq object to destroy.
+ */
+static void
+mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
+{
+ mlx5_devx_sq_destroy(&txq_obj->sq_obj);
+ memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj));
+ mlx5_devx_cq_destroy(&txq_obj->cq_obj);
+ memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj));
+}
+
+/**
+ * Create a SQ object and its resources using DevX.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Tx queue array.
+ * @param[in] log_desc_n
+ * Log of number of descriptors in queue.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t log_desc_n)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq_data, struct mlx5_txq_ctrl, txq);
+ struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
+ struct mlx5_devx_create_sq_attr sq_attr = {
+ .flush_in_error_en = 1,
+ .allow_multi_pkt_send_wqe = !!priv->config.mps,
+ .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode,
+ .allow_swp = !!priv->config.swp,
+ .cqn = txq_obj->cq_obj.cq->id,
+ .tis_lst_sz = 1,
+ .tis_num = priv->sh->tis->id,
+ .wq_attr = (struct mlx5_devx_wq_attr){
+ .pd = priv->sh->pdn,
+ .uar_page =
+ mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
+ },
+ };
+
+ /* Create Send Queue object with DevX. */
+ return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,
+ &sq_attr, priv->sh->numa_node);
+}
+#endif
+
+/**
+ * Create the Tx queue DevX object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Tx queue array.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq_data, struct mlx5_txq_ctrl, txq);
+
+ if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
+ return mlx5_txq_obj_hairpin_new(dev, idx);
+#if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
+ DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+#else
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
+ struct mlx5_devx_cq_attr cq_attr = {
+ .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+ };
+ void *reg_addr;
+ uint32_t cqe_n, log_desc_n;
+ uint32_t wqe_n, wqe_size;
+ int ret = 0;
+
+ MLX5_ASSERT(txq_data);
+ MLX5_ASSERT(txq_obj);
+ txq_obj->txq_ctrl = txq_ctrl;
+ txq_obj->dev = dev;
+ cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
+ 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
+ log_desc_n = log2above(cqe_n);
+ cqe_n = 1UL << log_desc_n;
+ if (cqe_n > UINT16_MAX) {
+ DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.",
+ dev->data->port_id, txq_data->idx, cqe_n);
+ rte_errno = EINVAL;
+ return 0;
+ }
+ /* Create completion queue object with DevX. */
+ ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,
+ &cq_attr, priv->sh->numa_node);
+ if (ret) {
+ DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
+ dev->data->port_id, idx);
+ goto error;
+ }
+ txq_data->cqe_n = log_desc_n;
+ txq_data->cqe_s = cqe_n;
+ txq_data->cqe_m = txq_data->cqe_s - 1;
+ txq_data->cqes = txq_obj->cq_obj.cqes;
+ txq_data->cq_ci = 0;
+ txq_data->cq_pi = 0;
+ txq_data->cq_db = txq_obj->cq_obj.db_rec;
+ *txq_data->cq_db = 0;
+ /*
+ * Adjust the amount of WQEs depending on inline settings.
+ * The number of descriptors should be enough to handle
+ * the specified number of packets. If queue is being created
+ * with Verbs the rdma-core does queue size adjustment
+ * internally in the mlx5_calc_sq_size(), we do the same
+ * for the queue being created with DevX at this point.
+ */
+ wqe_size = txq_data->tso_en ?
+ RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0;
+ wqe_size += sizeof(struct mlx5_wqe_cseg) +
+ sizeof(struct mlx5_wqe_eseg) +
+ sizeof(struct mlx5_wqe_dseg);
+ if (txq_data->inlen_send)
+ wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) +
+ sizeof(struct mlx5_wqe_eseg) +
+ RTE_ALIGN(txq_data->inlen_send +
+ sizeof(uint32_t),
+ MLX5_WSEG_SIZE));
+ wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
+ /* Create Send Queue object with DevX. */
+ wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
+ (uint32_t)priv->sh->device_attr.max_qp_wr);
+ log_desc_n = log2above(wqe_n);
+ ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
+ if (ret) {
+ DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
+ dev->data->port_id, idx);
+ rte_errno = errno;
+ goto error;
+ }
+ /* Create the Work Queue. */
+ txq_data->wqe_n = log_desc_n;
+ txq_data->wqe_s = 1 << txq_data->wqe_n;
+ txq_data->wqe_m = txq_data->wqe_s - 1;
+ txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes;
+ txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
+ txq_data->wqe_ci = 0;
+ txq_data->wqe_pi = 0;
+ txq_data->wqe_comp = 0;
+ txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
+ txq_data->qp_db = txq_obj->sq_obj.db_rec;
+ *txq_data->qp_db = 0;
+ txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8;
+ /* Change Send Queue state to Ready-to-Send. */
+ ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
+ if (ret) {
+ rte_errno = errno;
+ DRV_LOG(ERR,
+ "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
+ dev->data->port_id, idx);
+ goto error;
+ }
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ /*
+ * If using DevX need to query and store TIS transport domain value.
+ * This is done once per port.
+ * Will use this value on Rx, when creating matching TIR.
+ */
+ if (!priv->sh->tdn)
+ priv->sh->tdn = priv->sh->td->id;
+#endif
+ MLX5_ASSERT(sh->tx_uar);
+ reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
+ MLX5_ASSERT(reg_addr);
+ txq_ctrl->bf_reg = reg_addr;
+ txq_ctrl->uar_mmap_offset =
+ mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
+ txq_uar_init(txq_ctrl);
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_txq_release_devx_resources(txq_obj);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+#endif
+}
+
+/**
+ * Release an Tx DevX queue object.
+ *
+ * @param txq_obj
+ * DevX Tx queue object.
+ */
+void
+mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
+{
+ MLX5_ASSERT(txq_obj);
+ if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
+ if (txq_obj->tis)
+ claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
+#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
+ } else {
+ mlx5_txq_release_devx_resources(txq_obj);
+#endif
+ }
+}
+