Caught by code review.
Using a random name for memzone allocations can result in init failures
in the unlikely case that a name collision occurs.
Use a simple sequential generator on 64 bits.
Fixes:
3f50f072ff06 ("i40e: fix memzone freeing")
Fixes:
22b123a36d07 ("net/avf: initialize PMD")
Fixes:
5f0978e96220 ("net/ice/base: add OS specific implementation")
Fixes:
737f30e1c3ab ("net/hns3: support command interface with firmware")
Cc: stable@dpdk.org
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Min Hu (Connor) <humin29@huawei.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
#include <inttypes.h>
#include <rte_common.h>
-#include <rte_random.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
u64 size,
u32 alignment)
{
+ static uint64_t iavf_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return IAVF_ERR_PARAM;
- snprintf(z_name, sizeof(z_name), "iavf_dma_%"PRIu64, rte_rand());
+ snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
+ __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
+ static uint64_t hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
- snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
+ snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
+ __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
u64 size,
u32 alignment)
{
+ static uint64_t i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return I40E_ERR_PARAM;
- snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
+ __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
-#include <rte_random.h>
#include <rte_io.h>
#include "ice_alloc.h"
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
+ static uint64_t ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return NULL;
- snprintf(z_name, sizeof(z_name), "ice_dma_%"PRIu64, rte_rand());
+ snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
+ __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)