+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
+ dev->data->port_id, txq_ibv->txq_ctrl->idx);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Set Tx queue parameters from device configuration.
+ *
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
+ */
+static void
+txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ struct priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
+ const unsigned int max_tso_inline =
+ ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ unsigned int txq_inline;
+ unsigned int txqs_inline;
+ unsigned int inline_max_packet_sz;
+ eth_tx_burst_t tx_pkt_burst =
+ mlx5_select_tx_function(ETH_DEV(priv));
+ int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+ int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO));
+
+ txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txq_inline;
+ txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txqs_inline;
+ inline_max_packet_sz =
+ (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
+ 0 : config->inline_max_packet_sz;
+ if (is_empw_func) {
+ if (config->txq_inline == MLX5_ARG_UNSET)
+ txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
+ inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
+ txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
+ }
+ if (txq_inline && priv->txqs_n >= txqs_inline) {
+ unsigned int ds_cnt;
+
+ txq_ctrl->txq.max_inline =
+ ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ if (is_empw_func) {
+ /* To minimize the size of data set, avoid requesting
+ * too large WQ.
+ */
+ txq_ctrl->max_inline_data =
+ ((RTE_MIN(txq_inline,
+ inline_max_packet_sz) +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
+ } else {
+ txq_ctrl->max_inline_data =
+ txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
+ }
+ /*
+ * Check if the inline size is too large in a way which
+ * can make the WQE DS to overflow.
+ * Considering in calculation:
+ * WQE CTRL (1 DS)
+ * WQE ETH (1 DS)
+ * Inline part (N DS)
+ */
+ ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
+ if (ds_cnt > MLX5_DSEG_MAX) {
+ unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
+ MLX5_WQE_DWORD_SIZE;
+
+ max_inline = max_inline - (max_inline %
+ RTE_CACHE_LINE_SIZE);
+ DRV_LOG(WARNING,
+ "port %u txq inline is too large (%d) setting"
+ " it to the maximum possible: %d\n",
+ PORT_ID(priv), txq_inline, max_inline);
+ txq_ctrl->txq.max_inline = max_inline /
+ RTE_CACHE_LINE_SIZE;
+ }
+ }
+ if (tso) {
+ txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
+ max_tso_inline);
+ txq_ctrl->txq.tso_en = 1;
+ }
+ txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
+ txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+ txq_ctrl->txq.offloads) && config->swp;
+}
+
+/**
+ * Create a DPDK Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_txq_ctrl *
+mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *tmpl;
+
+ tmpl = rte_calloc_socket("TXQ", 1,
+ sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
+ MLX5_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl->txq.offloads = conf->offloads |
+ dev->data->dev_conf.txmode.offloads;
+ tmpl->priv = priv;
+ tmpl->socket = socket;
+ tmpl->txq.elts_n = log2above(desc);
+ tmpl->idx = idx;
+ txq_set_params(tmpl);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ tmpl->txq.elts =
+ (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
+ tmpl->txq.stats.idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
+ return tmpl;
+error:
+ rte_free(tmpl);
+ return NULL;
+}
+
+/**
+ * Get a Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists.
+ */
+struct mlx5_txq_ctrl *
+mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *ctrl = NULL;
+
+ if ((*priv->txqs)[idx]) {
+ ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
+ txq);
+ mlx5_txq_ibv_get(dev, idx);
+ rte_atomic32_inc(&ctrl->refcnt);
+ }
+ return ctrl;
+}
+
+/**
+ * Release a Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int
+mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *txq;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+
+ if (!(*priv->txqs)[idx])
+ return 0;
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
+ txq->ibv = NULL;
+ if (priv->uar_base)
+ munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
+ page_size), page_size);
+ if (rte_atomic32_dec_and_test(&txq->refcnt)) {
+ txq_free_elts(txq);
+ mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
+ LIST_REMOVE(txq, next);
+ rte_free(txq);
+ (*priv->txqs)[idx] = NULL;
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Verify if the queue can be released.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 if the queue can be released.
+ */
+int
+mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *txq;
+
+ if (!(*priv->txqs)[idx])
+ return -1;
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ return (rte_atomic32_read(&txq->refcnt) == 1);
+}
+
+/**
+ * Verify the Tx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_txq_verify(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_ctrl *txq;
+ int ret = 0;
+
+ LIST_FOREACH(txq, &priv->txqsctrl, next) {
+ DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
+ dev->data->port_id, txq->idx);