X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_mr.c;h=0c5403e493cd815bbc24fc81818112229888d8f3;hb=835731f63b0a89deedc6878a7028844b643fb54e;hp=dbcf0aac9d2c8d96045fdcb2cd74678f352bd572;hpb=9d60f54569fd836cba697661d71935b6305a4d91;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index dbcf0aac9d..0c5403e493 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -15,6 +15,8 @@ #include "mlx5.h" #include "mlx5_mr.h" #include "mlx5_rxtx.h" +#include "mlx5_rx.h" +#include "mlx5_tx.h" struct mr_find_contig_memsegs_data { uintptr_t addr; @@ -57,7 +59,7 @@ mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh, int i; int rebuild = 0; - DEBUG("device %s free callback: addr=%p, len=%zu", + DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu", sh->ibdev_name, addr, len); msl = rte_mem_virt2memseg_list(addr); /* addr and len must be page-aligned. */ @@ -87,13 +89,13 @@ mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh, pos = ms_idx - mr->ms_base_idx; MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos)); MLX5_ASSERT(pos < mr->ms_bmp_n); - DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p", + DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p", sh->ibdev_name, (void *)mr, pos, (void *)start); rte_bitmap_clear(mr->ms_bmp, pos); if (--mr->ms_n == 0) { LIST_REMOVE(mr, mr); LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr); - DEBUG("device %s remove MR(%p) from list", + DRV_LOG(DEBUG, "device %s remove MR(%p) from list", sh->ibdev_name, (void *)mr); } /* @@ -105,18 +107,13 @@ mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh, if (rebuild) { mlx5_mr_rebuild_cache(&sh->share_cache); /* - * Flush local caches by propagating invalidation across cores. - * rte_smp_wmb() is enough to synchronize this event. If one of - * freed memsegs is seen by other core, that means the memseg - * has been allocated by allocator, which will come after this - * free call. Therefore, this store instruction (incrementing - * generation below) will be guaranteed to be seen by other core - * before the core sees the newly allocated memory. + * No explicit wmb is needed after updating dev_gen due to + * store-release ordering in unlock that provides the + * implicit barrier at the software visible level. */ ++sh->share_cache.dev_gen; - DEBUG("broadcasting local cache flush, gen=%d", + DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", sh->share_cache.dev_gen); - rte_smp_wmb(); } rte_rwlock_write_unlock(&sh->share_cache.rwlock); } @@ -404,23 +401,18 @@ mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, return -1; } LIST_REMOVE(mr, mr); - LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr); - DEBUG("port %u remove MR(%p) from list", dev->data->port_id, + mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb); + DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id, (void *)mr); mlx5_mr_rebuild_cache(&sh->share_cache); /* - * Flush local caches by propagating invalidation across cores. - * rte_smp_wmb() is enough to synchronize this event. If one of - * freed memsegs is seen by other core, that means the memseg - * has been allocated by allocator, which will come after this - * free call. Therefore, this store instruction (incrementing - * generation below) will be guaranteed to be seen by other core - * before the core sees the newly allocated memory. + * No explicit wmb is needed after updating dev_gen due to + * store-release ordering in unlock that provides the + * implicit barrier at the software visible level. */ ++sh->share_cache.dev_gen; - DEBUG("broadcasting local cache flush, gen=%d", + DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", sh->share_cache.dev_gen); - rte_smp_wmb(); rte_rwlock_read_unlock(&sh->share_cache.rwlock); return 0; } @@ -535,7 +527,21 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, .mr_ctrl = mr_ctrl, .ret = 0, }; + uint32_t flags = rte_pktmbuf_priv_flags(mp); + if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) { + /* + * The pinned external buffer should be registered for DMA + * operations by application. The mem_list of the pool contains + * the list of chunks with mbuf structures w/o built-in data + * buffers and DMA actually does not happen there, no need + * to create MR for these chunks. + */ + return 0; + } + DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s " + "having %u chunks.", dev->data->port_id, + mp->name, mp->nb_mem_chunks); rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data); if (data.ret < 0 && rte_errno == ENXIO) { /* Mempool may have externally allocated memory. */