#include "mlx5.h"
#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
struct mr_find_contig_memsegs_data {
uintptr_t addr;
int i;
int rebuild = 0;
- DEBUG("device %s free callback: addr=%p, len=%zu",
+ DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
sh->ibdev_name, addr, len);
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
pos = ms_idx - mr->ms_base_idx;
MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
MLX5_ASSERT(pos < mr->ms_bmp_n);
- DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
+ DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
sh->ibdev_name, (void *)mr, pos, (void *)start);
rte_bitmap_clear(mr->ms_bmp, pos);
if (--mr->ms_n == 0) {
LIST_REMOVE(mr, mr);
LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
- DEBUG("device %s remove MR(%p) from list",
+ DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
sh->ibdev_name, (void *)mr);
}
/*
if (rebuild) {
mlx5_mr_rebuild_cache(&sh->share_cache);
/*
- * Flush local caches by propagating invalidation across cores.
- * rte_smp_wmb() is enough to synchronize this event. If one of
- * freed memsegs is seen by other core, that means the memseg
- * has been allocated by allocator, which will come after this
- * free call. Therefore, this store instruction (incrementing
- * generation below) will be guaranteed to be seen by other core
- * before the core sees the newly allocated memory.
+ * No explicit wmb is needed after updating dev_gen due to
+ * store-release ordering in unlock that provides the
+ * implicit barrier at the software visible level.
*/
++sh->share_cache.dev_gen;
- DEBUG("broadcasting local cache flush, gen=%d",
+ DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
sh->share_cache.dev_gen);
- rte_smp_wmb();
}
rte_rwlock_write_unlock(&sh->share_cache.rwlock);
}
}
LIST_REMOVE(mr, mr);
mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
- DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
+ DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
(void *)mr);
mlx5_mr_rebuild_cache(&sh->share_cache);
/*
- * Flush local caches by propagating invalidation across cores.
- * rte_smp_wmb() is enough to synchronize this event. If one of
- * freed memsegs is seen by other core, that means the memseg
- * has been allocated by allocator, which will come after this
- * free call. Therefore, this store instruction (incrementing
- * generation below) will be guaranteed to be seen by other core
- * before the core sees the newly allocated memory.
+ * No explicit wmb is needed after updating dev_gen due to
+ * store-release ordering in unlock that provides the
+ * implicit barrier at the software visible level.
*/
++sh->share_cache.dev_gen;
- DEBUG("broadcasting local cache flush, gen=%d",
+ DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
sh->share_cache.dev_gen);
- rte_smp_wmb();
rte_rwlock_read_unlock(&sh->share_cache.rwlock);
return 0;
}