X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_mr.c;h=0c5403e493cd815bbc24fc81818112229888d8f3;hb=835731f63b0a89deedc6878a7028844b643fb54e;hp=da4e91fc2487c291cb0df87d813592fe07646888;hpb=a9ae33fbbc2d8d51542abf71d0a73887c17f39b5;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index da4e91fc24..0c5403e493 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -15,6 +15,8 @@ #include "mlx5.h" #include "mlx5_mr.h" #include "mlx5_rxtx.h" +#include "mlx5_rx.h" +#include "mlx5_tx.h" struct mr_find_contig_memsegs_data { uintptr_t addr; @@ -57,7 +59,7 @@ mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh, int i; int rebuild = 0; - DEBUG("device %s free callback: addr=%p, len=%zu", + DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu", sh->ibdev_name, addr, len); msl = rte_mem_virt2memseg_list(addr); /* addr and len must be page-aligned. */ @@ -87,13 +89,13 @@ mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh, pos = ms_idx - mr->ms_base_idx; MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos)); MLX5_ASSERT(pos < mr->ms_bmp_n); - DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p", + DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p", sh->ibdev_name, (void *)mr, pos, (void *)start); rte_bitmap_clear(mr->ms_bmp, pos); if (--mr->ms_n == 0) { LIST_REMOVE(mr, mr); LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr); - DEBUG("device %s remove MR(%p) from list", + DRV_LOG(DEBUG, "device %s remove MR(%p) from list", sh->ibdev_name, (void *)mr); } /* @@ -105,18 +107,13 @@ mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh, if (rebuild) { mlx5_mr_rebuild_cache(&sh->share_cache); /* - * Flush local caches by propagating invalidation across cores. - * rte_smp_wmb() is enough to synchronize this event. If one of - * freed memsegs is seen by other core, that means the memseg - * has been allocated by allocator, which will come after this - * free call. Therefore, this store instruction (incrementing - * generation below) will be guaranteed to be seen by other core - * before the core sees the newly allocated memory. + * No explicit wmb is needed after updating dev_gen due to + * store-release ordering in unlock that provides the + * implicit barrier at the software visible level. */ ++sh->share_cache.dev_gen; - DEBUG("broadcasting local cache flush, gen=%d", + DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", sh->share_cache.dev_gen); - rte_smp_wmb(); } rte_rwlock_write_unlock(&sh->share_cache.rwlock); } @@ -405,22 +402,17 @@ mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, } LIST_REMOVE(mr, mr); mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb); - DEBUG("port %u remove MR(%p) from list", dev->data->port_id, + DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id, (void *)mr); mlx5_mr_rebuild_cache(&sh->share_cache); /* - * Flush local caches by propagating invalidation across cores. - * rte_smp_wmb() is enough to synchronize this event. If one of - * freed memsegs is seen by other core, that means the memseg - * has been allocated by allocator, which will come after this - * free call. Therefore, this store instruction (incrementing - * generation below) will be guaranteed to be seen by other core - * before the core sees the newly allocated memory. + * No explicit wmb is needed after updating dev_gen due to + * store-release ordering in unlock that provides the + * implicit barrier at the software visible level. */ ++sh->share_cache.dev_gen; - DEBUG("broadcasting local cache flush, gen=%d", + DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", sh->share_cache.dev_gen); - rte_smp_wmb(); rte_rwlock_read_unlock(&sh->share_cache.rwlock); return 0; }