net/ice/base: change address parameter to 16-bit
[dpdk.git] / drivers / net / mlx5 / mlx5_txq.c
index e0e3963..eb4d34c 100644 (file)
@@ -12,7 +12,8 @@
 
 #include <rte_mbuf.h>
 #include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
 #include <rte_common.h>
 #include <rte_eal_paging.h>
 
@@ -23,6 +24,7 @@
 #include "mlx5_defs.h"
 #include "mlx5_utils.h"
 #include "mlx5.h"
+#include "mlx5_tx.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_autoconf.h"
 
@@ -123,6 +125,8 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
                                     DEV_TX_OFFLOAD_GRE_TNL_TSO |
                                     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
        }
+       if (!config->mprq.enabled)
+               offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
        return offloads;
 }
 
@@ -154,6 +158,7 @@ txq_sync_cq(struct mlx5_txq_data *txq)
        /* Resync CQE and WQE (WQ in reset state). */
        rte_io_wmb();
        *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+       txq->cq_pi = txq->cq_ci;
        rte_io_wmb();
 }
 
@@ -253,7 +258,7 @@ mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
 
        MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
        ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
-                                          MLX5_TXQ_MOD_RDY2RDY,
+                                          MLX5_TXQ_MOD_RST2RDY,
                                           (uint8_t)priv->dev_port);
        if (ret)
                return ret;
@@ -388,7 +393,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
                dev->data->port_id, idx);
        (*priv->txqs)[idx] = &txq_ctrl->txq;
-       dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
        return 0;
 }
 
@@ -634,18 +638,23 @@ txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
 void
 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_txq_data *txq;
-       struct mlx5_txq_ctrl *txq_ctrl;
+       struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
+                                       dev->process_private;
+       const size_t page_size = rte_mem_page_size();
+       void *addr;
        unsigned int i;
 
+       if (page_size == (size_t)-1) {
+               DRV_LOG(ERR, "Failed to get mem page size");
+               return;
+       }
        MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
-       for (i = 0; i != priv->txqs_n; ++i) {
-               if (!(*priv->txqs)[i])
+       for (i = 0; i != ppriv->uar_table_sz; ++i) {
+               if (!ppriv->uar_table[i])
                        continue;
-               txq = (*priv->txqs)[i];
-               txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
-               txq_uar_uninit_secondary(txq_ctrl);
+               addr = ppriv->uar_table[i];
+               rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+
        }
 }
 
@@ -800,10 +809,14 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
        bool vlan_inline;
        unsigned int temp;
 
+       txq_ctrl->txq.fast_free =
+               !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+                  !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+                  !config->mprq.enabled);
        if (config->txqs_inline == MLX5_ARG_UNSET)
                txqs_inline =
 #if defined(RTE_ARCH_ARM64)
-               (priv->pci_dev->id.device_id ==
+               (priv->pci_dev && priv->pci_dev->id.device_id ==
                        PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
                        MLX5_INLINE_MAX_TXQS_BLUEFIELD :
 #endif
@@ -1141,11 +1154,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                rte_errno = ENOMEM;
                goto error;
        }
-       __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
        tmpl->type = MLX5_TXQ_TYPE_STANDARD;
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
 error:
+       mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
        mlx5_free(tmpl);
        return NULL;
 }
@@ -1185,7 +1199,7 @@ mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        tmpl->txq.idx = idx;
        tmpl->hairpin_conf = *hairpin_conf;
        tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
-       __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
 }
@@ -1210,7 +1224,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
 
        if (txq_data) {
                ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
-               __atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+               __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
        }
        return ctrl;
 }
@@ -1232,7 +1246,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_txq_ctrl *txq_ctrl;
 
-       if (!(*priv->txqs)[idx])
+       if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
                return 0;
        txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
        if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
@@ -1249,8 +1263,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
                        txq_ctrl->txq.fcqs = NULL;
                }
                txq_free_elts(txq_ctrl);
+               dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
-       dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
                if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
                        mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);