*/
#define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
+/*
+ * Device parameter to configure the number of TX queues threshold for
+ * enabling vectorized Tx.
+ */
+#define MLX5_TXQS_MAX_VEC "txqs_max_vec"
+
/* Device parameter to enable multi-packet send WQEs. */
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
}
memset(priv, 0, sizeof(*priv));
priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
- /*
- * flag to rte_eth_dev_close() that it should release the port resources
- * (calling rte_eth_dev_release_port()) in addition to closing it.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
/*
* Reset mac_addrs to NULL such that it is not freed as part of
* rte_eth_dev_release_port(). mac_addrs is part of dev_private so
.filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rx_queue_count = mlx5_rx_queue_count,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
.is_removed = mlx5_is_removed,
config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
config->txqs_inline = tmp;
+ } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
+ config->txqs_vec = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
config->mps = !!tmp;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
MLX5_RXQS_MIN_MPRQ,
MLX5_TXQ_INLINE,
MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQS_MAX_VEC,
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
* Backing DPDK device.
* @param ibv_dev
* Verbs device.
- * @param vf
- * If nonzero, enable VF-specific features.
+ * @param config
+ * Device configuration parameters.
* @param[in] switch_info
* Switch properties of Ethernet device.
*
static struct rte_eth_dev *
mlx5_dev_spawn(struct rte_device *dpdk_dev,
struct ibv_device *ibv_dev,
- int vf,
+ struct mlx5_dev_config config,
const struct mlx5_switch_info *switch_info)
{
- struct ibv_context *ctx;
+ struct ibv_context *ctx = NULL;
struct ibv_device_attr_ex attr;
struct ibv_port_attr port_attr;
struct ibv_pd *pd = NULL;
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
- struct mlx5_dev_config config = {
- .vf = !!vf,
- .cqe_pad = 0,
- .mps = MLX5_ARG_UNSET,
- .tx_vec_en = 1,
- .rx_vec_en = 1,
- .mpw_hdr_dseg = 0,
- .txq_inline = MLX5_ARG_UNSET,
- .txqs_inline = MLX5_ARG_UNSET,
- .inline_max_packet_sz = MLX5_ARG_UNSET,
- .vf_nl_en = 1,
- .mprq = {
- .enabled = 0,
- .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
- .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
- .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
- },
- };
struct rte_eth_dev *eth_dev = NULL;
struct priv *priv = NULL;
int err = 0;
/* Prepare shared data between primary and secondary process. */
mlx5_prepare_shared_data();
errno = 0;
- ctx = mlx5_glue->open_device(ibv_dev);
- if (!ctx) {
- rte_errno = errno ? errno : ENODEV;
- return NULL;
+ ctx = mlx5_glue->dv_open_device(ibv_dev);
+ if (ctx) {
+ config.devx = 1;
+ DRV_LOG(DEBUG, "DEVX is supported");
+ } else {
+ ctx = mlx5_glue->open_device(ibv_dev);
+ if (!ctx) {
+ rte_errno = errno ? errno : ENODEV;
+ return NULL;
+ }
}
#ifdef HAVE_IBV_MLX5_MOD_SWP
dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
err = ENOMEM;
goto error;
}
+ /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
if (priv->representor) {
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = priv->representor_id;
eth_dev->dev_ops = &mlx5_dev_ops;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
- if (vf && config.vf_nl_en)
+ if (config.vf && config.vf_nl_en)
mlx5_nl_mac_addr_sync(eth_dev);
priv->tcf_context = mlx5_flow_tcf_context_create();
if (!priv->tcf_context) {
{
struct ibv_device **ibv_list;
unsigned int n = 0;
- int vf;
+ struct mlx5_dev_config dev_config;
int ret;
assert(pci_drv == &mlx5_driver);
*/
if (n)
qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
+ /* Default configuration. */
+ dev_config = (struct mlx5_dev_config){
+ .mps = MLX5_ARG_UNSET,
+ .tx_vec_en = 1,
+ .rx_vec_en = 1,
+ .txq_inline = MLX5_ARG_UNSET,
+ .txqs_inline = MLX5_ARG_UNSET,
+ .txqs_vec = MLX5_ARG_UNSET,
+ .inline_max_packet_sz = MLX5_ARG_UNSET,
+ .vf_nl_en = 1,
+ .mprq = {
+ .enabled = 0, /* Disabled by default. */
+ .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
+ .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+ .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+ },
+ };
+ /* Device speicific configuration. */
switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
+ dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS_BLUEFIELD;
+ break;
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
- vf = 1;
+ dev_config.vf = 1;
break;
default:
- vf = 0;
+ break;
}
+ /* Set architecture-dependent default value if unset. */
+ if (dev_config.txqs_vec == MLX5_ARG_UNSET)
+ dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS;
for (i = 0; i != n; ++i) {
uint32_t restore;
- list[i].eth_dev = mlx5_dev_spawn
- (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info);
+ list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
+ list[i].ibv_dev, dev_config,
+ &list[i].info);
if (!list[i].eth_dev) {
if (rte_errno != EBUSY && rte_errno != EEXIST)
break;
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
+ },
{
.vendor_id = 0
}
RTE_PCI_DRV_PROBE_AGAIN),
};
-#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
+#ifdef RTE_IBVERBS_LINK_DLOPEN
/**
* Suffix RTE_EAL_PMD_PATH with "-glue".
* cleanup all the Verbs resources even when the device was removed.
*/
setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
-#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
+#ifdef RTE_IBVERBS_LINK_DLOPEN
if (mlx5_glue_init())
return;
assert(mlx5_glue);