#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
#include "rte_pmd_mlx5.h"
}
/**
- * Unregister the mempool from the protection domain.
- *
- * @param sh
- * Pointer to the device shared context.
- * @param mp
- * Mempool being unregistered.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
- struct rte_mempool *mp)
-{
- struct mlx5_mp_id mp_id;
-
- mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
- DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd, rte_strerror(rte_errno));
-}
-
-/**
- * rte_mempool_walk() callback to register mempools
- * for the protection domain.
+ * rte_mempool_walk() callback to unregister Rx mempools.
+ * It used when implicit mempool registration is disabled.
*
* @param mp
* The mempool being walked.
* Pointer to the device shared context.
*/
static void
-mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
{
struct mlx5_dev_ctx_shared *sh = arg;
- struct mlx5_mp_id mp_id;
- int ret;
-
- mlx5_mp_id_init(&mp_id, 0);
- ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
- &mp_id);
- if (ret < 0 && rte_errno != EEXIST)
- DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd, rte_strerror(rte_errno));
-}
-
-/**
- * rte_mempool_walk() callback to unregister mempools
- * from the protection domain.
- *
- * @param mp
- * The mempool being walked.
- * @param arg
- * Pointer to the device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
-{
- mlx5_dev_ctx_shared_mempool_unregister
- ((struct mlx5_dev_ctx_shared *)arg, mp);
-}
-/**
- * Mempool life cycle callback for Ethernet devices.
- *
- * @param event
- * Mempool life cycle event.
- * @param mp
- * Associated mempool.
- * @param arg
- * Pointer to a device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
- struct rte_mempool *mp, void *arg)
-{
- struct mlx5_dev_ctx_shared *sh = arg;
- struct mlx5_mp_id mp_id;
-
- switch (event) {
- case RTE_MEMPOOL_EVENT_READY:
- mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
- &mp_id) < 0)
- DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd,
- rte_strerror(rte_errno));
- break;
- case RTE_MEMPOOL_EVENT_DESTROY:
- mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
- break;
- }
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
}
/**
struct mlx5_dev_ctx_shared *sh = arg;
if (event == RTE_MEMPOOL_EVENT_DESTROY)
- mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
}
int
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
return ret == 0 || rte_errno == EEXIST ? 0 : ret;
}
- /* Callback for this shared context may be already registered. */
- ret = rte_mempool_event_callback_register
- (mlx5_dev_ctx_shared_mempool_event_cb, sh);
- if (ret != 0 && rte_errno != EEXIST)
- return ret;
- /* Register mempools only once for this shared context. */
- if (ret == 0)
- rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
- return 0;
+ return mlx5_dev_mempool_subscribe(sh->cdev);
}
/**
for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
rte_spinlock_init(&sh->uar_lock[i]);
#endif
- /*
- * Once the device is added to the list of memory event
- * callback, its global MR cache table cannot be expanded
- * on the fly because of deadlock. If it overflows, lookup
- * should be done by searching MR list linearly, which is slow.
- *
- * At this point the device is not added to the memory
- * event list yet, context is just being created.
- */
- err = mlx5_mr_btree_init(&sh->share_cache.cache,
- MLX5_MR_BTREE_CACHE_N * 2,
- sh->numa_node);
- if (err) {
- err = rte_errno;
- goto error;
- }
- mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
- &sh->share_cache.dereg_mr_cb);
mlx5_os_dev_shared_handler_install(sh);
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
err = mlx5_flow_os_init_workspace_once();
mlx5_flow_aging_init(sh);
mlx5_flow_counters_mng_init(sh);
mlx5_flow_ipool_create(sh, config);
- /* Add device to memory callback list. */
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
- sh, mem_event_cb);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
/* Add context to the global device list. */
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
rte_spinlock_init(&sh->geneve_tlv_opt_sl);
pthread_mutex_destroy(&sh->txpp.mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
- if (sh->share_cache.cache.table)
- mlx5_mr_btree_free(&sh->share_cache.cache);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
if (--sh->refcnt)
goto exit;
/* Stop watching for mempool events and unregister all mempools. */
- ret = rte_mempool_event_callback_unregister
- (mlx5_dev_ctx_shared_mempool_event_cb, sh);
- if (ret < 0 && rte_errno == ENOENT)
+ if (!sh->cdev->config.mr_mempool_reg_en) {
ret = rte_mempool_event_callback_unregister
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
- if (ret == 0)
- rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
- sh);
- /* Remove from memory callback device list. */
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- LIST_REMOVE(sh, mem_event_cb);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
- /* Release created Memory Regions. */
- mlx5_mr_release_cache(&sh->share_cache);
+ if (ret == 0)
+ rte_mempool_walk
+ (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
+ }
/* Remove context from the global device list. */
LIST_REMOVE(sh, next);
/* Release flow workspaces objects on the last device. */
.id_table = mlx5_pci_id_map,
.probe = mlx5_os_net_probe,
.remove = mlx5_net_remove,
- .dma_map = mlx5_net_dma_map,
- .dma_unmap = mlx5_net_dma_unmap,
.probe_again = 1,
.intr_lsc = 1,
.intr_rmv = 1,