The struct rte_memzone field .phys_addr is renamed to .iova.
The deprecated name is kept in an anonymous union to avoid breaking
the API.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
}
queue->base_addr = (char *)qp_mz->addr;
- queue->base_phys_addr = qp_mz->phys_addr;
+ queue->base_phys_addr = qp_mz->iova;
if (qat_qp_check_queue_alignment(queue->base_phys_addr,
queue_size_bytes)) {
PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
return -ENOMEM;
}
- dma->paddr = (uint64_t) z->phys_addr;
+ dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
bnx2x_rx_queue_release(rxq);
return -ENOMEM;
}
- fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
+ fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova;
rxq->rx_ring = (uint64_t*)dma->addr;
memset((void *)rxq->rx_ring, 0, dma_size);
PMD_RX_LOG(ERR, "RCQ alloc failed");
return -ENOMEM;
}
- fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
+ fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova;
rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
/* Link the CQ chain pages. */
bnx2x_tx_queue_release(txq);
return -ENOMEM;
}
- fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+ fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova;
txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
memset(txq->tx_ring, 0, tsize);
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
rx_ring->bd = ((char *)mz->addr + ag_ring_start);
rx_ring_info->ag_desc_ring =
(struct rx_prod_pkt_bd *)rx_ring->bd;
- rx_ring->bd_dma = mz->phys_addr + ag_ring_start;
+ rx_ring->bd_dma = mz->iova + ag_ring_start;
rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
rx_ring->mem_zone = (const void *)mz;
if (!mz)
return -ENOMEM;
}
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
if (metadata)
*(void **)metadata = s;
- *phys = (uint64_t)tz->phys_addr;
+ *phys = (uint64_t)tz->iova;
return tz->addr;
}
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
- txq->tx_ring_phys_addr = tz->phys_addr;
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
- rxq->rx_ring_phys_addr = rz->phys_addr;
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = tz->phys_addr;
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
- rxq->rx_ring_phys_addr = rz->phys_addr;
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
memset(mz->addr, 0, size); \
virt = mz->addr; \
- phys = mz->phys_addr; \
+ phys = mz->iova; \
handle = mz; \
} while (0)
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
mz = rte_memzone_reserve(z_name, size, node, 0); \
memset(mz->addr, 0, size); \
virt = mz->addr; \
- phys = mz->phys_addr; \
+ phys = mz->iova; \
} while (0)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
0 /* cq_entry_enable */,
1 /* cq_message_enable */,
0 /* interrupt offset */,
- (u64)enic->wq[index].cqmsg_rz->phys_addr);
+ (u64)enic->wq[index].cqmsg_rz->iova);
}
vnic_intr_init(&enic->intr,
}
vaddr = rz->addr;
- *dma_handle = (dma_addr_t)rz->phys_addr;
+ *dma_handle = (dma_addr_t)rz->iova;
mze = rte_malloc("enic memzone entry",
sizeof(struct enic_memzone_entry), 0);
rte_spinlock_lock(&enic->memzone_list_lock);
LIST_FOREACH(mze, &enic->memzone_list, entries) {
if (mze->rz->addr == vaddr &&
- mze->rz->phys_addr == dma_handle)
+ mze->rz->iova == dma_handle)
break;
}
if (mze == NULL) {
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = mz->phys_addr;
+ q->hw_ring_phys_addr = mz->iova;
/* Check if number of descs satisfied Vector requirement */
if (!rte_is_power_of_2(nb_desc)) {
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = mz->phys_addr;
+ q->hw_ring_phys_addr = mz->iova;
/*
* allocate memory for the RS bit tracker. Enough slots to hold the
mem->size = size;
mem->va = mz->addr;
- mem->pa = mz->phys_addr;
+ mem->pa = mz->iova;
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG,
"memzone %s allocated with physical address: %"PRIu64,
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
- pf->fdir.dma_addr = mz->phys_addr;
+ pf->fdir.dma_addr = mz->iova;
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
- rxq->rx_ring_phys_addr = rz->phys_addr;
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->tx_ring_phys_addr = tz->phys_addr;
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
- txq->tx_ring_phys_addr = tz->phys_addr;
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
- rxq->rx_ring_phys_addr = rz->phys_addr;
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = tz->phys_addr;
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
}
- rxq->rx_ring_phys_addr = rz->phys_addr;
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
if (droq->info_mz == NULL)
return NULL;
- droq->info_list_dma = droq->info_mz->phys_addr;
+ droq->info_list_dma = droq->info_mz->iova;
droq->info_alloc_size = droq->info_mz->len;
droq->info_base_addr = (size_t)droq->info_mz->addr;
return -1;
}
- droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
+ droq->desc_ring_dma = droq->desc_ring_mz->iova;
droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
return -1;
}
- iq->base_addr_dma = iq->iq_mz->phys_addr;
+ iq->base_addr_dma = iq->iq_mz->iova;
iq->base_addr = (uint8_t *)iq->iq_mz->addr;
iq->max_count = num_descs;
}
/* Saving physical and virtual addresses for the RX ring */
- rxq->dma = (uint64_t)tz->phys_addr;
+ rxq->dma = (uint64_t)tz->iova;
rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to RX descriptors */
txq->txq_flags = tx_conf->txq_flags;
/* Saving physical and virtual addresses for the TX ring */
- txq->dma = (uint64_t)tz->phys_addr;
+ txq->dma = (uint64_t)tz->iova;
txq->txds = (struct nfp_net_tx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to TX descriptors */
*phys = 0;
return OSAL_NULL;
}
- *phys = mz->phys_addr;
+ *phys = mz->iova;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Allocated dma memory size=%zu phys=0x%lx"
" virt=%p core=%d\n",
- mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
return mz->addr;
}
*phys = 0;
return OSAL_NULL;
}
- *phys = mz->phys_addr;
+ *phys = mz->iova;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Allocated aligned dma memory size=%zu phys=0x%lx"
" virt=%p core=%d\n",
- mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
return mz->addr;
}
uint16_t j;
for (j = 0 ; j < ecore_mz_count; j++) {
- if (phys == ecore_mz_mapping[j]->phys_addr) {
+ if (phys == ecore_mz_mapping[j]->iova) {
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Free memzone %s\n", ecore_mz_mapping[j]->name);
rte_memzone_free(ecore_mz_mapping[j]);
}
/* configure filter with ECORE_SPQ_MODE_EBLOCK */
rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
- (dma_addr_t)mz->phys_addr,
+ (dma_addr_t)mz->iova,
pkt_len,
fdir_filter->action.rx_queue,
0, add);
return ENOMEM;
}
- esmp->esm_addr = mz->phys_addr;
+ esmp->esm_addr = mz->iova;
if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
(void)rte_memzone_free(mz);
return EFAULT;
memset(rz->addr, 0, ring_size);
- rxq->phys = rz->phys_addr;
+ rxq->phys = rz->iova;
rxq->desc = rz->addr;
rxq->qlen_mask = desc_cnt - 1;
memset(rz->addr, 0, ring_size);
- sq->phys = rz->phys_addr;
+ sq->phys = rz->iova;
sq->desc = rz->addr;
sq->qlen_mask = desc_cnt - 1;
memset(rz->addr, 0, ring_size);
- rbdr->phys = rz->phys_addr;
+ rbdr->phys = rz->iova;
rbdr->tail = 0;
rbdr->next_tail = 0;
rbdr->desc = rz->addr;
memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_mem = mz->iova;
vq->vq_ring_virt_mem = mz->addr;
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
- (uint64_t)mz->phys_addr);
+ (uint64_t)mz->iova);
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
(uint64_t)(uintptr_t)mz->addr);
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
- txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
- cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
hw->cvq = cvq;
memset(mz->addr, 0, mz->len);
hw->shared = mz->addr;
- hw->sharedPA = mz->phys_addr;
+ hw->sharedPA = mz->iova;
/*
* Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
- hw->queueDescPA = mz->phys_addr;
+ hw->queueDescPA = mz->iova;
hw->queue_desc_len = (uint16_t)size;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
memset(mz->addr, 0, mz->len);
hw->rss_conf = mz->addr;
- hw->rss_confPA = mz->phys_addr;
+ hw->rss_confPA = mz->iova;
}
return 0;
}
memset(mz->addr, 0, mz->len);
hw->memRegs = mz->addr;
- hw->memRegsPA = mz->phys_addr;
+ hw->memRegsPA = mz->iova;
}
num = hw->num_rx_queues;
/* cmd_ring initialization */
ring->base = mz->addr;
- ring->basePA = mz->phys_addr;
+ ring->basePA = mz->iova;
/* comp_ring initialization */
comp_ring->base = ring->base + ring->size;
/* cmd_ring0 initialization */
ring0->base = mz->addr;
- ring0->basePA = mz->phys_addr;
+ ring0->basePA = mz->iova;
/* cmd_ring1 initialization */
ring1->base = ring0->base + ring0->size;
mcfg->memzone_cnt++;
snprintf(mz->name, sizeof(mz->name), "%s", name);
- mz->phys_addr = rte_malloc_virt2iova(mz_addr);
+ mz->iova = rte_malloc_virt2iova(mz_addr);
mz->addr = mz_addr;
mz->len = (requested_len == 0 ? elem->size : requested_len);
mz->hugepage_sz = elem->ms->hugepage_sz;
for (i=0; i<RTE_MAX_MEMZONE; i++) {
if (mcfg->memzone[i].addr == NULL)
break;
- fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
+ fprintf(f, "Zone %u: name:<%s>, IO:0x%"PRIx64", len:0x%zx"
", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
mcfg->memzone[i].name,
- mcfg->memzone[i].phys_addr,
+ mcfg->memzone[i].iova,
mcfg->memzone[i].len,
mcfg->memzone[i].addr,
mcfg->memzone[i].socket_id,
#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */
- phys_addr_t phys_addr; /**< Start physical address. */
+ RTE_STD_C11
+ union {
+ phys_addr_t phys_addr; /**< deprecated - Start physical address. */
+ rte_iova_t iova; /**< Start IO address. */
+ };
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
paddr = RTE_BAD_PHYS_ADDR;
else
- paddr = mz->phys_addr;
+ paddr = mz->iova;
if (rte_eal_has_hugepages())
ret = rte_mempool_populate_phys(mp, mz->addr,
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
fprintf(f, " flags=%x\n", mp->flags);
fprintf(f, " pool=%p\n", mp->pool_data);
- fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->mz->phys_addr);
+ fprintf(f, " iova=0x%" PRIx64 "\n", mp->mz->iova);
fprintf(f, " nb_mem_chunks=%u\n", mp->nb_mem_chunks);
fprintf(f, " size=%"PRIu32"\n", mp->size);
fprintf(f, " populated_size=%"PRIu32"\n", mp->populated_size);
/* Test if memory overlaps: return 1 if true, or 0 if false. */
static int
-is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
+is_memory_overlap(rte_iova_t ptr1, size_t len1, rte_iova_t ptr2, size_t len2)
{
if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
return 1;
printf("Unable to reserve 64-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_32->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if ((memzone_aligned_32->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
if (((uintptr_t) memzone_aligned_32->addr & RTE_CACHE_LINE_MASK) != 0)
return -1;
printf("Unable to reserve 128-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_128->phys_addr & 127) != 0)
+ if ((memzone_aligned_128->iova & 127) != 0)
return -1;
if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
return -1;
printf("Unable to reserve 256-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_256->phys_addr & 255) != 0)
+ if ((memzone_aligned_256->iova & 255) != 0)
return -1;
if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
return -1;
printf("Unable to reserve 512-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_512->phys_addr & 511) != 0)
+ if ((memzone_aligned_512->iova & 511) != 0)
return -1;
if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
return -1;
printf("Unable to reserve 1024-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_1024->phys_addr & 1023) != 0)
+ if ((memzone_aligned_1024->iova & 1023) != 0)
return -1;
if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
return -1;
/* check that zones don't overlap */
printf("check overlapping\n");
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_128->phys_addr, memzone_aligned_128->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_128->iova, memzone_aligned_128->len))
return -1;
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_256->phys_addr, memzone_aligned_256->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_256->iova, memzone_aligned_256->len))
return -1;
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_512->iova, memzone_aligned_512->len))
return -1;
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
- if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
- memzone_aligned_256->phys_addr, memzone_aligned_256->len))
+ if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
+ memzone_aligned_256->iova, memzone_aligned_256->len))
return -1;
- if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
- memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+ if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
+ memzone_aligned_512->iova, memzone_aligned_512->len))
return -1;
- if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
- if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
- memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+ if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len,
+ memzone_aligned_512->iova, memzone_aligned_512->len))
return -1;
- if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
- if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_512->iova, memzone_aligned_512->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
return 0;
}
uint32_t bound)
{
const struct rte_memzone *mz;
- phys_addr_t bmask;
+ rte_iova_t bmask;
- bmask = ~((phys_addr_t)bound - 1);
+ bmask = ~((rte_iova_t)bound - 1);
if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0,
align, bound)) == NULL) {
return -1;
}
- if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) {
+ if ((mz->iova & ((rte_iova_t)align - 1)) != 0) {
printf("%s(%s): invalid phys addr alignment\n",
__func__, mz->name);
return -1;
return -1;
}
- if ((mz->phys_addr & bmask) !=
- ((mz->phys_addr + mz->len - 1) & bmask)) {
+ if ((mz->iova & bmask) !=
+ ((mz->iova + mz->len - 1) & bmask)) {
printf("%s(%s): invalid memzone boundary %u crossed\n",
__func__, mz->name, bound);
return -1;
/* check cache-line alignments */
printf("check alignments and lengths\n");
- if ((memzone1->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if ((memzone1->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
- if ((memzone2->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if ((memzone2->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
- if (memzone3 != NULL && (memzone3->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if (memzone3 != NULL && (memzone3->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0)
return -1;
/* check that zones don't overlap */
printf("check overlapping\n");
- if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
- memzone2->phys_addr, memzone2->len))
+ if (is_memory_overlap(memzone1->iova, memzone1->len,
+ memzone2->iova, memzone2->len))
return -1;
if (memzone3 != NULL &&
- is_memory_overlap(memzone1->phys_addr, memzone1->len,
- memzone3->phys_addr, memzone3->len))
+ is_memory_overlap(memzone1->iova, memzone1->len,
+ memzone3->iova, memzone3->len))
return -1;
if (memzone3 != NULL &&
- is_memory_overlap(memzone2->phys_addr, memzone2->len,
- memzone3->phys_addr, memzone3->len))
+ is_memory_overlap(memzone2->iova, memzone2->len,
+ memzone3->iova, memzone3->len))
return -1;
printf("check socket ID\n");