git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/mvpp2: apply flow control after port init
[dpdk.git]
/
drivers
/
net
/
mlx5
/
mlx5_txq.c
diff --git
a/drivers/net/mlx5/mlx5_txq.c
b/drivers/net/mlx5/mlx5_txq.c
index
e0e3963
..
cd13eb9
100644
(file)
--- a/
drivers/net/mlx5/mlx5_txq.c
+++ b/
drivers/net/mlx5/mlx5_txq.c
@@
-12,7
+12,7
@@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <
rte_
ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_common.h>
#include <rte_eal_paging.h>
#include <rte_common.h>
#include <rte_eal_paging.h>
@@
-123,6
+123,8
@@
mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
}
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
}
+ if (!config->mprq.enabled)
+ offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
return offloads;
}
return offloads;
}
@@
-154,6
+156,7
@@
txq_sync_cq(struct mlx5_txq_data *txq)
/* Resync CQE and WQE (WQ in reset state). */
rte_io_wmb();
*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
/* Resync CQE and WQE (WQ in reset state). */
rte_io_wmb();
*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+ txq->cq_pi = txq->cq_ci;
rte_io_wmb();
}
rte_io_wmb();
}
@@
-253,7
+256,7
@@
mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
- MLX5_TXQ_MOD_R
DY
2RDY,
+ MLX5_TXQ_MOD_R
ST
2RDY,
(uint8_t)priv->dev_port);
if (ret)
return ret;
(uint8_t)priv->dev_port);
if (ret)
return ret;
@@
-388,7
+391,6
@@
mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
- dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
return 0;
}
@@
-634,18
+636,23
@@
txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
void
mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
{
void
mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_txq_data *txq;
- struct mlx5_txq_ctrl *txq_ctrl;
+ struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
+ dev->process_private;
+ const size_t page_size = rte_mem_page_size();
+ void *addr;
unsigned int i;
unsigned int i;
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ return;
+ }
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
- for (i = 0; i != p
riv->txqs_n
; ++i) {
- if (!
(*priv->txqs)
[i])
+ for (i = 0; i != p
priv->uar_table_sz
; ++i) {
+ if (!
ppriv->uar_table
[i])
continue;
continue;
-
txq = (*priv->txqs)
[i];
-
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq
);
- txq_uar_uninit_secondary(txq_ctrl);
+
addr = ppriv->uar_table
[i];
+
rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size
);
+
}
}
}
}
@@
-800,6
+807,10
@@
txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
bool vlan_inline;
unsigned int temp;
bool vlan_inline;
unsigned int temp;
+ txq_ctrl->txq.fast_free =
+ !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+ !config->mprq.enabled);
if (config->txqs_inline == MLX5_ARG_UNSET)
txqs_inline =
#if defined(RTE_ARCH_ARM64)
if (config->txqs_inline == MLX5_ARG_UNSET)
txqs_inline =
#if defined(RTE_ARCH_ARM64)
@@
-1141,11
+1152,12
@@
mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = ENOMEM;
goto error;
}
rte_errno = ENOMEM;
goto error;
}
- __atomic_
add_fetch
(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_
fetch_add
(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
tmpl->type = MLX5_TXQ_TYPE_STANDARD;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
error:
tmpl->type = MLX5_TXQ_TYPE_STANDARD;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
error:
+ mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
mlx5_free(tmpl);
return NULL;
}
mlx5_free(tmpl);
return NULL;
}
@@
-1185,7
+1197,7
@@
mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
- __atomic_
add_fetch
(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_
fetch_add
(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@
-1210,7
+1222,7
@@
mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- __atomic_
add_fetch
(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_
fetch_add
(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
}
return ctrl;
}
}
return ctrl;
}
@@
-1249,8
+1261,8
@@
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
txq_ctrl->txq.fcqs = NULL;
}
txq_free_elts(txq_ctrl);
txq_ctrl->txq.fcqs = NULL;
}
txq_free_elts(txq_ctrl);
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
}
- dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);