X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_mr.c;h=0b6cfc8cb9317daade817a5dfb3365745d44027d;hb=2f6c2adbe550ea95a0f73c4f9a9cc5da890b9bf2;hp=3255393ca26fd67dea89ad836adee5d6e97a9555;hpb=87acdcc78d9170e6f12d43384aed7fa593d44ab0;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 3255393ca2..0b6cfc8cb9 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -15,6 +15,8 @@ #include "mlx5.h" #include "mlx5_mr.h" #include "mlx5_rxtx.h" +#include "mlx5_rx.h" +#include "mlx5_tx.h" struct mr_find_contig_memsegs_data { uintptr_t addr; @@ -29,98 +31,6 @@ struct mr_update_mp_data { int ret; }; -/** - * Callback for memory free event. Iterate freed memsegs and check whether it - * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a - * result, the MR would be fragmented. If it becomes empty, the MR will be freed - * later by mlx5_mr_garbage_collect(). Even if this callback is called from a - * secondary process, the garbage collector will be called in primary process - * as the secondary process can't call mlx5_mr_create(). - * - * The global cache must be rebuilt if there's any change and this event has to - * be propagated to dataplane threads to flush the local caches. - * - * @param sh - * Pointer to the Ethernet device shared context. - * @param addr - * Address of freed memory. - * @param len - * Size of freed memory. - */ -static void -mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh, - const void *addr, size_t len) -{ - const struct rte_memseg_list *msl; - struct mlx5_mr *mr; - int ms_n; - int i; - int rebuild = 0; - - DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu", - sh->ibdev_name, addr, len); - msl = rte_mem_virt2memseg_list(addr); - /* addr and len must be page-aligned. */ - MLX5_ASSERT((uintptr_t)addr == - RTE_ALIGN((uintptr_t)addr, msl->page_sz)); - MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz)); - ms_n = len / msl->page_sz; - rte_rwlock_write_lock(&sh->share_cache.rwlock); - /* Clear bits of freed memsegs from MR. */ - for (i = 0; i < ms_n; ++i) { - const struct rte_memseg *ms; - struct mr_cache_entry entry; - uintptr_t start; - int ms_idx; - uint32_t pos; - - /* Find MR having this memseg. */ - start = (uintptr_t)addr + i * msl->page_sz; - mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start); - if (mr == NULL) - continue; - MLX5_ASSERT(mr->msl); /* Can't be external memory. */ - ms = rte_mem_virt2memseg((void *)start, msl); - MLX5_ASSERT(ms != NULL); - MLX5_ASSERT(msl->page_sz == ms->hugepage_sz); - ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); - pos = ms_idx - mr->ms_base_idx; - MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos)); - MLX5_ASSERT(pos < mr->ms_bmp_n); - DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p", - sh->ibdev_name, (void *)mr, pos, (void *)start); - rte_bitmap_clear(mr->ms_bmp, pos); - if (--mr->ms_n == 0) { - LIST_REMOVE(mr, mr); - LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr); - DRV_LOG(DEBUG, "device %s remove MR(%p) from list", - sh->ibdev_name, (void *)mr); - } - /* - * MR is fragmented or will be freed. the global cache must be - * rebuilt. - */ - rebuild = 1; - } - if (rebuild) { - mlx5_mr_rebuild_cache(&sh->share_cache); - /* - * Flush local caches by propagating invalidation across cores. - * rte_smp_wmb() is enough to synchronize this event. If one of - * freed memsegs is seen by other core, that means the memseg - * has been allocated by allocator, which will come after this - * free call. Therefore, this store instruction (incrementing - * generation below) will be guaranteed to be seen by other core - * before the core sees the newly allocated memory. - */ - ++sh->share_cache.dev_gen; - DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", - sh->share_cache.dev_gen); - rte_smp_wmb(); - } - rte_rwlock_write_unlock(&sh->share_cache.rwlock); -} - /** * Callback for memory event. This can be called from both primary and secondary * process. @@ -146,7 +56,8 @@ mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); /* Iterate all the existing mlx5 devices. */ LIST_FOREACH(sh, dev_list, mem_event_cb) - mlx5_mr_mem_event_free_cb(sh, addr, len); + mlx5_free_mr_by_addr(&sh->share_cache, + sh->ibdev_name, addr, len); rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); break; case RTE_MEM_EVENT_ALLOC: @@ -409,18 +320,13 @@ mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, (void *)mr); mlx5_mr_rebuild_cache(&sh->share_cache); /* - * Flush local caches by propagating invalidation across cores. - * rte_smp_wmb() is enough to synchronize this event. If one of - * freed memsegs is seen by other core, that means the memseg - * has been allocated by allocator, which will come after this - * free call. Therefore, this store instruction (incrementing - * generation below) will be guaranteed to be seen by other core - * before the core sees the newly allocated memory. + * No explicit wmb is needed after updating dev_gen due to + * store-release ordering in unlock that provides the + * implicit barrier at the software visible level. */ ++sh->share_cache.dev_gen; DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", sh->share_cache.dev_gen); - rte_smp_wmb(); rte_rwlock_read_unlock(&sh->share_cache.rwlock); return 0; }