#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_glue.h"
+#include "mlx5_mr.h"
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
/* Device parameter to enable hardware Rx vector. */
#define MLX5_RX_VEC_EN "rx_vec_en"
+/* Allow L3 VXLAN flow creation. */
+#define MLX5_L3_VXLAN_EN "l3_vxlan_en"
+
/* Activate Netlink support in VF mode. */
#define MLX5_VF_NL_EN "vf_nl_en"
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
+static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
+
+/* Shared memory between primary and secondary processes. */
+struct mlx5_shared_data *mlx5_shared_data;
+
+/* Spinlock for mlx5_shared_data allocation. */
+static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
/** Driver-specific log messages type. */
int mlx5_logtype;
+/**
+ * Prepare shared data between primary and secondary process.
+ */
+static void
+mlx5_prepare_shared_data(void)
+{
+ const struct rte_memzone *mz;
+
+ rte_spinlock_lock(&mlx5_shared_data_lock);
+ if (mlx5_shared_data == NULL) {
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* Allocate shared memory. */
+ mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
+ sizeof(*mlx5_shared_data),
+ SOCKET_ID_ANY, 0);
+ } else {
+ /* Lookup allocated shared memory. */
+ mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+ }
+ if (mz == NULL)
+ rte_panic("Cannot allocate mlx5 shared data\n");
+ mlx5_shared_data = mz->addr;
+ /* Initialize shared data. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
+ rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
+ }
+ }
+ rte_spinlock_unlock(&mlx5_shared_data_lock);
+}
+
/**
* Retrieve integer value from environment variable.
*
priv->txqs = NULL;
}
mlx5_flow_delete_drop_queue(dev);
+ mlx5_mr_release(dev);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(mlx5_glue->dealloc_pd(priv->pd));
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
- ret = mlx5_mr_verify(dev);
- if (ret)
- DRV_LOG(WARNING, "port %u some memory region still remain",
- dev->data->port_id);
memset(priv, 0, sizeof(*priv));
}
config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
config->rx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
+ config->l3_vxlan_en = !!tmp;
} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
config->vf_nl_en = !!tmp;
} else {
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
+ MLX5_L3_VXLAN_EN,
MLX5_VF_NL_EN,
NULL,
};
struct ibv_counter_set_description cs_desc;
#endif
+ /* Prepare shared data between primary and secondary process. */
+ mlx5_prepare_shared_data();
assert(pci_drv == &mlx5_driver);
/* Get mlx5_dev[] index. */
idx = mlx5_dev_idx(&pci_dev->addr);
break;
}
if (attr_ctx == NULL) {
- mlx5_glue->free_device_list(list);
switch (err) {
case 0:
DRV_LOG(ERR,
"cannot access device, is mlx5_ib loaded?");
err = ENODEV;
- goto error;
+ break;
case EINVAL:
DRV_LOG(ERR,
"cannot use device, are drivers up to date?");
- goto error;
+ break;
}
+ goto error;
}
ibv_dev = list[i];
DRV_LOG(DEBUG, "device opened");
mps = MLX5_MPW_DISABLED;
}
#ifdef HAVE_IBV_MLX5_MOD_SWP
- if (attrs_out.comp_mask | MLX5DV_CONTEXT_MASK_SWP)
+ if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
swp = attrs_out.sw_parsing_caps.sw_parsing_offloads;
DRV_LOG(DEBUG, "SWP support: %u", swp);
#endif
DRV_LOG(WARNING,
"tunnel offloading disabled due to old OFED/rdma-core version");
#endif
- if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) {
- err = errno;
+ err = mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr);
+ if (err) {
+ DEBUG("ibv_query_device_ex() failed");
goto error;
}
DRV_LOG(INFO, "%u port(s) detected",
eth_dev->device = &pci_dev->device;
eth_dev->dev_ops = &mlx5_dev_sec_ops;
err = mlx5_uar_init_secondary(eth_dev);
- if (err)
+ if (err) {
+ err = rte_errno;
goto error;
+ }
/* Receive command fd from primary process */
err = mlx5_socket_connect(eth_dev);
- if (err)
+ if (err < 0) {
+ err = rte_errno;
goto error;
+ }
/* Remap UAR for Tx queues. */
err = mlx5_tx_uar_remap(eth_dev, err);
- if (err)
+ if (err) {
+ err = rte_errno;
goto error;
+ }
/*
* Ethdev pointer is still required as input since
* the primary device is not accessible from the
if (err) {
DRV_LOG(ERR, "failed to process device arguments: %s",
strerror(err));
+ err = rte_errno;
goto port_error;
}
- if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
+ err = mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex);
+ if (err) {
DRV_LOG(ERR, "ibv_query_device_ex() failed");
- err = errno;
goto port_error;
}
config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
goto port_error;
}
eth_dev->data->dev_private = priv;
- priv->dev = eth_dev;
+ priv->dev_data = eth_dev->data;
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->device->driver = &mlx5_driver.driver;
err = mlx5_uar_init_primary(eth_dev);
- if (err)
+ if (err) {
+ err = rte_errno;
goto port_error;
+ }
/* Configure the first MAC address by default. */
if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
DRV_LOG(ERR,
#endif
/* Get actual MTU if possible. */
err = mlx5_get_mtu(eth_dev, &priv->mtu);
- if (err)
+ if (err) {
+ err = rte_errno;
goto port_error;
+ }
DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
priv->mtu);
/*
if (err) {
DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
eth_dev->data->port_id, strerror(rte_errno));
+ err = rte_errno;
goto port_error;
}
/* Supported Verbs flow priority number detection. */
claim_zero(mlx5_glue->dealloc_pd(pd));
if (ctx)
claim_zero(mlx5_glue->close_device(ctx));
+ if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_eth_dev_release_port(eth_dev);
break;
}
/*
}
mlx5_glue->fork_init();
rte_pci_register(&mlx5_driver);
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_mr_mem_event_cb, NULL);
}
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);