#include <rte_rwlock.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
+#include <rte_eal_paging.h>
#include <rte_alarm.h>
#include <rte_cycles.h>
/* Device parameter to configure implicit registration of mempool memory. */
#define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
+/* Device parameter to configure the delay drop when creating Rxqs. */
+#define MLX5_DELAY_DROP "delay_drop"
+
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data *mlx5_shared_data;
},
};
-
#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
mlx5_free(sh->aso_age_mng);
return -1;
}
- rte_spinlock_init(&sh->aso_age_mng->resize_sl);
+ rte_rwlock_init(&sh->aso_age_mng->resize_rwl);
rte_spinlock_init(&sh->aso_age_mng->free_sl);
LIST_INIT(&sh->aso_age_mng->free);
return 0;
static void
mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
int i;
memset(&sh->cmng, 0, sizeof(sh->cmng));
TAILQ_INIT(&sh->cmng.counters[i]);
rte_spinlock_init(&sh->cmng.csl[i]);
}
- if (sh->devx && !haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read;
- }
}
/**
uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
LIST_REMOVE(mng, next);
- claim_zero(mlx5_devx_cmd_destroy(mng->dm));
- claim_zero(mlx5_os_umem_dereg(mng->umem));
+ mlx5_os_wrapped_mkey_destroy(&mng->wm);
mlx5_free(mem);
}
}
if (sh->meter_aso_en) {
rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
+ rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
LIST_INIT(&sh->mtrmng->pools_mng.meters);
}
sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flex_parser_profiles *prf =
- &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+ struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
return !!prf->obj;
}
mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flex_parser_profiles *prf =
- &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+ struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
struct mlx5_devx_graph_node_attr node = {
.modify_field_select = 0,
};
mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flex_parser_profiles *prf =
- &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+ struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
if (prf->obj)
mlx5_devx_cmd_destroy(prf->obj);
return tn_offloads;
}
-/*
- * Allocate Rx and Tx UARs in robust fashion.
- * This routine handles the following UAR allocation issues:
- *
- * - tries to allocate the UAR with the most appropriate memory
- * mapping type from the ones supported by the host
- *
- * - tries to allocate the UAR with non-NULL base address
- * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as
- * UAR base address if UAR was not the first object in the UAR page.
- * It caused the PMD failure and we should try to get another UAR
- * till we get the first one with non-NULL base address returned.
- */
+/* Fill all fields of UAR structure. */
static int
-mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
- const struct mlx5_common_dev_config *config)
+mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
{
- uint32_t uar_mapping, retry;
- int err = 0;
- void *base_addr;
-
- for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
- /* Control the mapping type according to the settings. */
- uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ?
- MLX5DV_UAR_ALLOC_TYPE_NC :
- MLX5DV_UAR_ALLOC_TYPE_BF;
-#else
- RTE_SET_USED(config);
- /*
- * It seems we have no way to control the memory mapping type
- * for the UAR, the default "Write-Combining" type is supposed.
- * The UAR initialization on queue creation queries the
- * actual mapping type done by Verbs/kernel and setups the
- * PMD datapath accordingly.
- */
- uar_mapping = 0;
-#endif
- sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
- if (!sh->tx_uar &&
- uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
- if (config->dbnc == MLX5_TXDB_CACHED ||
- config->dbnc == MLX5_TXDB_HEURISTIC)
- DRV_LOG(WARNING, "Devarg tx_db_nc setting "
- "is not supported by DevX");
- /*
- * In some environments like virtual machine
- * the Write Combining mapped might be not supported
- * and UAR allocation fails. We try "Non-Cached"
- * mapping for the case. The tx_burst routines take
- * the UAR mapping type into account on UAR setup
- * on queue creation.
- */
- DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
- uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
- sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
- } else if (!sh->tx_uar &&
- uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
- if (config->dbnc == MLX5_TXDB_NCACHED)
- DRV_LOG(WARNING, "Devarg tx_db_nc settings "
- "is not supported by DevX");
- /*
- * If Verbs/kernel does not support "Non-Cached"
- * try the "Write-Combining".
- */
- DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
- uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
- sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
- }
-#endif
- if (!sh->tx_uar) {
- DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)");
- err = ENOMEM;
- goto exit;
- }
- base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
- if (base_addr)
- break;
- /*
- * The UARs are allocated by rdma_core within the
- * IB device context, on context closure all UARs
- * will be freed, should be no memory/object leakage.
- */
- DRV_LOG(DEBUG, "Retrying to allocate Tx DevX UAR");
- sh->tx_uar = NULL;
- }
- /* Check whether we finally succeeded with valid UAR allocation. */
- if (!sh->tx_uar) {
- DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)");
- err = ENOMEM;
- goto exit;
- }
- for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
- uar_mapping = 0;
- sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
- if (!sh->devx_rx_uar &&
- uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
- /*
- * Rx UAR is used to control interrupts only,
- * should be no datapath noticeable impact,
- * can try "Non-Cached" mapping safely.
- */
- DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
- uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
- sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
- (sh->cdev->ctx, uar_mapping);
- }
-#endif
- if (!sh->devx_rx_uar) {
- DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)");
- err = ENOMEM;
- goto exit;
- }
- base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
- if (base_addr)
- break;
- /*
- * The UARs are allocated by rdma_core within the
- * IB device context, on context closure all UARs
- * will be freed, should be no memory/object leakage.
- */
- DRV_LOG(DEBUG, "Retrying to allocate Rx DevX UAR");
- sh->devx_rx_uar = NULL;
+ int ret;
+
+ ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
+ return -rte_errno;
}
- /* Check whether we finally succeeded with valid UAR allocation. */
- if (!sh->devx_rx_uar) {
- DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)");
- err = ENOMEM;
+ MLX5_ASSERT(sh->tx_uar.obj);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
+ ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
+ mlx5_devx_uar_release(&sh->tx_uar);
+ return -rte_errno;
}
-exit:
- return err;
+ MLX5_ASSERT(sh->rx_uar.obj);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
+ return 0;
}
-/**
- * Unregister the mempool from the protection domain.
- *
- * @param sh
- * Pointer to the device shared context.
- * @param mp
- * Mempool being unregistered.
- */
static void
-mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
- struct rte_mempool *mp)
+mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_mp_id mp_id;
-
- mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0)
- DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd, rte_strerror(rte_errno));
+ mlx5_devx_uar_release(&sh->rx_uar);
+ mlx5_devx_uar_release(&sh->tx_uar);
}
/**
- * rte_mempool_walk() callback to register mempools
- * for the protection domain.
+ * rte_mempool_walk() callback to unregister Rx mempools.
+ * It used when implicit mempool registration is disabled.
*
* @param mp
* The mempool being walked.
* Pointer to the device shared context.
*/
static void
-mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
{
struct mlx5_dev_ctx_shared *sh = arg;
- struct mlx5_mp_id mp_id;
- int ret;
- mlx5_mp_id_init(&mp_id, 0);
- ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp,
- &mp_id);
- if (ret < 0 && rte_errno != EEXIST)
- DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd, rte_strerror(rte_errno));
-}
-
-/**
- * rte_mempool_walk() callback to unregister mempools
- * from the protection domain.
- *
- * @param mp
- * The mempool being walked.
- * @param arg
- * Pointer to the device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
-{
- mlx5_dev_ctx_shared_mempool_unregister
- ((struct mlx5_dev_ctx_shared *)arg, mp);
-}
-
-/**
- * Mempool life cycle callback for Ethernet devices.
- *
- * @param event
- * Mempool life cycle event.
- * @param mp
- * Associated mempool.
- * @param arg
- * Pointer to a device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
- struct rte_mempool *mp, void *arg)
-{
- struct mlx5_dev_ctx_shared *sh = arg;
- struct mlx5_mp_id mp_id;
-
- switch (event) {
- case RTE_MEMPOOL_EVENT_READY:
- mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd,
- mp, &mp_id) < 0)
- DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd,
- rte_strerror(rte_errno));
- break;
- case RTE_MEMPOOL_EVENT_DESTROY:
- mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
- break;
- }
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
}
/**
struct mlx5_dev_ctx_shared *sh = arg;
if (event == RTE_MEMPOOL_EVENT_DESTROY)
- mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
}
int
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
return ret == 0 || rte_errno == EEXIST ? 0 : ret;
}
- /* Callback for this shared context may be already registered. */
- ret = rte_mempool_event_callback_register
- (mlx5_dev_ctx_shared_mempool_event_cb, sh);
- if (ret != 0 && rte_errno != EEXIST)
- return ret;
- /* Register mempools only once for this shared context. */
- if (ret == 0)
- rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
+ return mlx5_dev_mempool_subscribe(sh->cdev);
+}
+
+/**
+ * Set up multiple TISs with different affinities according to
+ * number of bonding ports
+ *
+ * @param priv
+ * Pointer of shared context.
+ *
+ * @return
+ * Zero on success, -1 otherwise.
+ */
+static int
+mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
+{
+ int i;
+ struct mlx5_devx_lag_context lag_ctx = { 0 };
+ struct mlx5_devx_tis_attr tis_attr = { 0 };
+
+ tis_attr.transport_domain = sh->td->id;
+ if (sh->bond.n_port) {
+ if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) {
+ sh->lag.tx_remap_affinity[0] =
+ lag_ctx.tx_remap_affinity_1;
+ sh->lag.tx_remap_affinity[1] =
+ lag_ctx.tx_remap_affinity_2;
+ sh->lag.affinity_mode = lag_ctx.port_select_mode;
+ } else {
+ DRV_LOG(ERR, "Failed to query lag affinity.");
+ return -1;
+ }
+ if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {
+ for (i = 0; i < sh->bond.n_port; i++) {
+ tis_attr.lag_tx_port_affinity =
+ MLX5_IFC_LAG_MAP_TIS_AFFINITY(i,
+ sh->bond.n_port);
+ sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx,
+ &tis_attr);
+ if (!sh->tis[i]) {
+ DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device"
+ " %s.", i, sh->bond.n_port,
+ sh->ibdev_name);
+ return -1;
+ }
+ }
+ DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
+ sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
+ lag_ctx.tx_remap_affinity_2);
+ return 0;
+ }
+ if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
+ DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
+ sh->ibdev_name);
+ }
+ tis_attr.lag_tx_port_affinity = 0;
+ sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
+ if (!sh->tis[0]) {
+ DRV_LOG(ERR, "Failed to TIS 0 for bonding device"
+ " %s.", sh->ibdev_name);
+ return -1;
+ }
return 0;
}
struct mlx5_dev_ctx_shared *sh;
int err = 0;
uint32_t i;
- struct mlx5_devx_tis_attr tis_attr = { 0 };
MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
err = ENOMEM;
goto error;
}
- tis_attr.transport_domain = sh->td->id;
- sh->tis = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
- if (!sh->tis) {
+ if (mlx5_setup_tis(sh)) {
DRV_LOG(ERR, "TIS allocation failure");
err = ENOMEM;
goto error;
}
- err = mlx5_alloc_rxtx_uars(sh, &sh->cdev->config);
+ err = mlx5_rxtx_uars_prepare(sh);
if (err)
goto error;
- MLX5_ASSERT(sh->tx_uar);
- MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
-
- MLX5_ASSERT(sh->devx_rx_uar);
- MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
- }
#ifndef RTE_ARCH_64
- /* Initialize UAR access locks for 32bit implementations. */
- rte_spinlock_init(&sh->uar_lock_cq);
- for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
- rte_spinlock_init(&sh->uar_lock[i]);
+ } else {
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&sh->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&sh->uar_lock[i]);
#endif
+ }
mlx5_os_dev_shared_handler_install(sh);
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
err = mlx5_flow_os_init_workspace_once();
pthread_mutex_destroy(&sh->txpp.mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
- if (sh->tis)
- claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
claim_zero(mlx5_devx_cmd_destroy(sh->td));
- if (sh->devx_rx_uar)
- mlx5_glue->devx_free_uar(sh->devx_rx_uar);
- if (sh->tx_uar)
- mlx5_glue->devx_free_uar(sh->tx_uar);
+ i = 0;
+ do {
+ if (sh->tis[i])
+ claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+ } while (++i < (uint32_t)sh->bond.n_port);
+ mlx5_rxtx_uars_release(sh);
mlx5_free(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
int ret;
+ int i = 0;
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (--sh->refcnt)
goto exit;
/* Stop watching for mempool events and unregister all mempools. */
- ret = rte_mempool_event_callback_unregister
- (mlx5_dev_ctx_shared_mempool_event_cb, sh);
- if (ret < 0 && rte_errno == ENOENT)
+ if (!sh->cdev->config.mr_mempool_reg_en) {
ret = rte_mempool_event_callback_unregister
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
- if (ret == 0)
- rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
- sh);
+ if (ret == 0)
+ rte_mempool_walk
+ (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
+ }
/* Remove context from the global device list. */
LIST_REMOVE(sh, next);
- /* Release flow workspaces objects on the last device. */
- if (LIST_EMPTY(&mlx5_dev_ctx_list))
+ /* Release resources on the last device removal. */
+ if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
+ mlx5_os_net_cleanup();
mlx5_flow_os_release_workspace();
+ }
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ if (sh->flex_parsers_dv) {
+ mlx5_list_destroy(sh->flex_parsers_dv);
+ sh->flex_parsers_dv = NULL;
+ }
/*
* Ensure there is no async event handler installed.
* Only primary process handles async device events.
mlx5_aso_flow_mtrs_mng_close(sh);
mlx5_flow_ipool_destroy(sh);
mlx5_os_dev_shared_handler_uninstall(sh);
- if (sh->tx_uar) {
- mlx5_glue->devx_free_uar(sh->tx_uar);
- sh->tx_uar = NULL;
- }
- if (sh->tis)
- claim_zero(mlx5_devx_cmd_destroy(sh->tis));
+ mlx5_rxtx_uars_release(sh);
+ do {
+ if (sh->tis[i])
+ claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+ } while (++i < sh->bond.n_port);
if (sh->td)
claim_zero(mlx5_devx_cmd_destroy(sh->td));
- if (sh->devx_rx_uar)
- mlx5_glue->devx_free_uar(sh->devx_rx_uar);
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
pthread_mutex_destroy(&sh->txpp.mutex);
mlx5_free(sh);
struct rte_eth_udp_tunnel *udp_tunnel)
{
MLX5_ASSERT(udp_tunnel != NULL);
- if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+ if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
udp_tunnel->udp_port == 4789)
return 0;
- if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+ if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
udp_tunnel->udp_port == 4790)
return 0;
return -ENOTSUP;
* UAR register table follows the process private structure. BlueFlame
* registers for Tx queues are stored in the table.
*/
- ppriv_size =
- sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
+ ppriv_size = sizeof(struct mlx5_proc_priv) +
+ priv->txqs_n * sizeof(struct mlx5_uar_data);
ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
RTE_CACHE_LINE_SIZE, dev->device->numa_node);
if (!ppriv) {
}
ppriv->uar_table_sz = priv->txqs_n;
dev->process_private = ppriv;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ priv->sh->pppriv = ppriv;
return 0;
}
mlx5_mp_os_req_stop_rxtx(dev);
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
- if (priv->rxqs != NULL) {
+ mlx5_flex_item_port_cleanup(dev);
+ if (priv->rxq_privs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
rte_delay_us_sleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i)
mlx5_rxq_release(dev, i);
priv->rxqs_n = 0;
- priv->rxqs = NULL;
- }
- if (priv->representor) {
- /* Each representor has a dedicated interrupts handler */
- mlx5_free(dev->intr_handle);
- dev->intr_handle = NULL;
+ mlx5_free(priv->rxq_privs);
+ priv->rxq_privs = NULL;
}
if (priv->txqs != NULL) {
/* XXX race condition if mlx5_tx_burst() is still running. */
config->decap_en = !!tmp;
} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
config->allow_duplicate_pattern = !!tmp;
+ } else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
+ config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
+ config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
} else {
- DRV_LOG(WARNING, "%s: unknown parameter", key);
- rte_errno = EINVAL;
- return -rte_errno;
+ DRV_LOG(WARNING,
+ "%s: unknown parameter, maybe it's for another class.",
+ key);
}
return 0;
}
int
mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
- const char **params = (const char *[]){
- MLX5_DRIVER_KEY,
- MLX5_RXQ_CQE_COMP_EN,
- MLX5_RXQ_PKT_PAD_EN,
- MLX5_RX_MPRQ_EN,
- MLX5_RX_MPRQ_LOG_STRIDE_NUM,
- MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
- MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
- MLX5_RXQS_MIN_MPRQ,
- MLX5_TXQ_INLINE,
- MLX5_TXQ_INLINE_MIN,
- MLX5_TXQ_INLINE_MAX,
- MLX5_TXQ_INLINE_MPW,
- MLX5_TXQS_MIN_INLINE,
- MLX5_TXQS_MAX_VEC,
- MLX5_TXQ_MPW_EN,
- MLX5_TXQ_MPW_HDR_DSEG_EN,
- MLX5_TXQ_MAX_INLINE_LEN,
- MLX5_TX_DB_NC,
- MLX5_TX_PP,
- MLX5_TX_SKEW,
- MLX5_TX_VEC_EN,
- MLX5_RX_VEC_EN,
- MLX5_L3_VXLAN_EN,
- MLX5_VF_NL_EN,
- MLX5_DV_ESW_EN,
- MLX5_DV_FLOW_EN,
- MLX5_DV_XMETA_EN,
- MLX5_LACP_BY_USER,
- MLX5_MR_EXT_MEMSEG_EN,
- MLX5_REPRESENTOR,
- MLX5_MAX_DUMP_FILES_NUM,
- MLX5_LRO_TIMEOUT_USEC,
- RTE_DEVARGS_KEY_CLASS,
- MLX5_HP_BUF_SIZE,
- MLX5_RECLAIM_MEM,
- MLX5_SYS_MEM_EN,
- MLX5_DECAP_EN,
- MLX5_ALLOW_DUPLICATE_PATTERN,
- MLX5_MR_MEMPOOL_REG_EN,
- NULL,
- };
struct rte_kvargs *kvlist;
int ret = 0;
- int i;
if (devargs == NULL)
return 0;
/* Following UGLY cast is done to pass checkpatch. */
- kvlist = rte_kvargs_parse(devargs->args, params);
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
/* Process parameters. */
- for (i = 0; (params[i] != NULL); ++i) {
- if (rte_kvargs_count(kvlist, params[i])) {
- ret = rte_kvargs_process(kvlist, params[i],
- mlx5_args_check, config);
- if (ret) {
- rte_errno = EINVAL;
- rte_kvargs_free(kvlist);
- return -rte_errno;
- }
- }
+ ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
+ if (ret) {
+ rte_errno = EINVAL;
+ ret = -rte_errno;
}
rte_kvargs_free(kvlist);
- return 0;
+ return ret;
}
/**
.id_table = mlx5_pci_id_map,
.probe = mlx5_os_net_probe,
.remove = mlx5_net_remove,
- .dma_map = mlx5_net_dma_map,
- .dma_unmap = mlx5_net_dma_unmap,
.probe_again = 1,
.intr_lsc = 1,
.intr_rmv = 1,