if (rebuild) {
mlx5_mr_rebuild_cache(&sh->share_cache);
/*
- * Flush local caches by propagating invalidation across cores.
- * rte_smp_wmb() is enough to synchronize this event. If one of
- * freed memsegs is seen by other core, that means the memseg
- * has been allocated by allocator, which will come after this
- * free call. Therefore, this store instruction (incrementing
- * generation below) will be guaranteed to be seen by other core
- * before the core sees the newly allocated memory.
+ * No explicit wmb is needed after updating dev_gen due to
+ * store-release ordering in unlock that provides the
+ * implicit barrier at the software visible level.
*/
++sh->share_cache.dev_gen;
DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
sh->share_cache.dev_gen);
- rte_smp_wmb();
}
rte_rwlock_write_unlock(&sh->share_cache.rwlock);
}
(void *)mr);
mlx5_mr_rebuild_cache(&sh->share_cache);
/*
- * Flush local caches by propagating invalidation across cores.
- * rte_smp_wmb() is enough to synchronize this event. If one of
- * freed memsegs is seen by other core, that means the memseg
- * has been allocated by allocator, which will come after this
- * free call. Therefore, this store instruction (incrementing
- * generation below) will be guaranteed to be seen by other core
- * before the core sees the newly allocated memory.
+ * No explicit wmb is needed after updating dev_gen due to
+ * store-release ordering in unlock that provides the
+ * implicit barrier at the software visible level.
*/
++sh->share_cache.dev_gen;
DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
sh->share_cache.dev_gen);
- rte_smp_wmb();
rte_rwlock_read_unlock(&sh->share_cache.rwlock);
return 0;
}