memset(&rx_ctx, 0, sizeof(rx_ctx));
- rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
rx_ctx.qlen = rxq->nb_rx_desc;
rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
txq_elem.num_txqs = 1;
txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
- tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.pf_num = hw->pf_id;
tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
- rxq->rx_ring_phys_addr = rz->phys_addr;
+ rxq->rx_ring_dma = rz->iova;
rxq->rx_ring = (union ice_rx_desc *)rz->addr;
#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->tx_ring_phys_addr = tz->phys_addr;
+ txq->tx_ring_dma = tz->iova;
txq->tx_ring = (struct ice_tx_desc *)tz->addr;
/* Allocate software ring */
struct ice_rx_queue {
struct rte_mempool *mp; /* mbuf pool to populate RX ring */
volatile union ice_rx_desc *rx_ring;/* RX ring virtual address */
- uint64_t rx_ring_phys_addr; /* RX ring DMA address */
+ rte_iova_t rx_ring_dma; /* RX ring DMA address */
struct ice_rx_entry *sw_ring; /* address of RX soft ring */
uint16_t nb_rx_desc; /* number of RX descriptors */
uint16_t rx_free_thresh; /* max free RX desc to hold */
struct ice_tx_queue {
uint16_t nb_tx_desc; /* number of TX descriptors */
- uint64_t tx_ring_phys_addr; /* TX ring DMA address */
+ rte_iova_t tx_ring_dma; /* TX ring DMA address */
volatile struct ice_tx_desc *tx_ring; /* TX ring virtual address */
struct ice_tx_entry *sw_ring; /* virtual address of SW ring */
uint16_t tx_tail; /* current value of tail register */