* eal: The ``rte_logs`` struct and global symbol was made private
and is no longer part of the API.
+* mem: Removed the unioned field ``phys_addr`` from
+ the structures ``rte_memseg`` and ``rte_memzone``.
+ The field ``iova`` is remaining from the old unions.
+
* mbuf: Removed the unioned field ``refcnt_atomic`` from
the structures ``rte_mbuf`` and ``rte_mbuf_ext_shared_info``.
The field ``refcnt`` is remaining from the old unions.
memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
if (memseg)
- return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
+ return memseg->iova + RTE_PTR_DIFF(vaddr, memseg->addr);
return (size_t)NULL;
}
void *arg __rte_unused)
{
DPAAX_DEBUG("Walking for %p (pa=%"PRIu64") and len %zu",
- ms->addr, ms->phys_addr, len);
+ ms->addr, ms->iova, len);
dpaax_iova_table_update(rte_mem_virt2phy(ms->addr), ms->addr, len);
return 0;
}
ICP_QAT_FW_SLICE_XLAT);
comp_req->u1.xlt_pars.inter_buff_ptr =
- interm_buff_mz->phys_addr;
+ interm_buff_mz->iova;
}
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
}
mz_start = (uint8_t *)memzone->addr;
- mz_start_phys = memzone->phys_addr;
+ mz_start_phys = memzone->iova;
QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
", size required %d, size created %zu",
inter_buff_mz_name, mz_start, mz_start_phys,
cmd_q->qsize, SOCKET_ID_ANY);
cmd_q->qbase_addr = (void *)q_mz->addr;
cmd_q->qbase_desc = (void *)q_mz->addr;
- cmd_q->qbase_phys_addr = q_mz->phys_addr;
+ cmd_q->qbase_phys_addr = q_mz->iova;
cmd_q->qcontrol = 0;
/* init control reg to zero */
}
mem = rz->addr;
- dma_addr = rz->phys_addr;
+ dma_addr = rz->iova;
alloc_len = len;
memset(mem, 0, len);
* and only accepts 32 bit page frame number.
* Check if the allocated physical memory exceeds 16TB.
*/
- if ((mz->phys_addr + vq->vq_ring_size - 1)
+ if ((mz->iova + vq->vq_ring_size - 1)
>> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
"above 16TB!");
memset(mz->addr, 0, sizeof(mz->len));
vq->mz = mz;
- vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_mem = mz->iova;
vq->vq_ring_virt_mem = mz->addr;
VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
- (uint64_t)mz->phys_addr);
+ (uint64_t)mz->iova);
VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
(uint64_t)(uintptr_t)mz->addr);
mem->size = size;
mem->va = mz->addr;
- mem->pa = mz->phys_addr;
+ mem->pa = mz->iova;
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
"%"PRIu64, mz->name, mem->pa);
memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
if (memseg)
- return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
+ return memseg->iova + RTE_PTR_DIFF(vaddr, memseg->addr);
return (size_t)NULL;
}
#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)
/**< Prevent this segment from being freed back to the OS. */
struct rte_memseg {
- RTE_STD_C11
- union {
- phys_addr_t phys_addr; /**< deprecated - Start physical address. */
- rte_iova_t iova; /**< Start IO address. */
- };
+ rte_iova_t iova; /**< Start IO address. */
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */
#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */
- RTE_STD_C11
- union {
- phys_addr_t phys_addr; /**< deprecated - Start physical address. */
- rte_iova_t iova; /**< Start IO address. */
- };
+ rte_iova_t iova; /**< Start IO address. */
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */
/* TX RING */
kni->tx_q = kni->m_tx_q->addr;
kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
- dev_info.tx_phys = kni->m_tx_q->phys_addr;
+ dev_info.tx_phys = kni->m_tx_q->iova;
/* RX RING */
kni->rx_q = kni->m_rx_q->addr;
kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
- dev_info.rx_phys = kni->m_rx_q->phys_addr;
+ dev_info.rx_phys = kni->m_rx_q->iova;
/* ALLOC RING */
kni->alloc_q = kni->m_alloc_q->addr;
kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
- dev_info.alloc_phys = kni->m_alloc_q->phys_addr;
+ dev_info.alloc_phys = kni->m_alloc_q->iova;
/* FREE RING */
kni->free_q = kni->m_free_q->addr;
kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
- dev_info.free_phys = kni->m_free_q->phys_addr;
+ dev_info.free_phys = kni->m_free_q->iova;
/* Request RING */
kni->req_q = kni->m_req_q->addr;
kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
- dev_info.req_phys = kni->m_req_q->phys_addr;
+ dev_info.req_phys = kni->m_req_q->iova;
/* Response RING */
kni->resp_q = kni->m_resp_q->addr;
kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
- dev_info.resp_phys = kni->m_resp_q->phys_addr;
+ dev_info.resp_phys = kni->m_resp_q->iova;
/* Req/Resp sync mem area */
kni->sync_addr = kni->m_sync_addr->addr;
dev_info.sync_va = kni->m_sync_addr->addr;
- dev_info.sync_phys = kni->m_sync_addr->phys_addr;
+ dev_info.sync_phys = kni->m_sync_addr->iova;
kni->pktmbuf_pool = pktmbuf_pool;
kni->group_id = conf->group_id;