Renamed data type from phys_addr_t to rte_iova_t.
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Reviewed-by: Anatoly Burakov <anatoly.burakov@intel.com>
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
}
void
-ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr, uint32_t interval)
+ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr, uint32_t interval)
{
ddm->setup.cons_write_index_addr = cons_addr;
ddm->setup.write_index_interval = interval / 4; /* 4 ns period */
#define ARK_DDM_SETUP 0x00e0
struct ark_ddm_setup_t {
- phys_addr_t cons_write_index_addr;
+ rte_iova_t cons_write_index_addr;
uint32_t write_index_interval; /* 4ns each */
volatile uint32_t cons_index;
};
int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
void ark_ddm_reset(struct ark_ddm_t *ddm);
void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
-void ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr,
+void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
uint32_t interval);
void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
struct rte_mbuf **reserve_q;
/* array of physical addresses of the mbuf data pointer */
/* This point is a virtual address */
- phys_addr_t *paddress_q;
+ rte_iova_t *paddress_q;
struct rte_mempool *mb_pool;
struct ark_udm_t *udm;
struct ark_rx_queue *queue,
uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
{
- phys_addr_t queue_base;
- phys_addr_t phys_addr_q_base;
- phys_addr_t phys_addr_prod_index;
+ rte_iova_t queue_base;
+ rte_iova_t phys_addr_q_base;
+ rte_iova_t phys_addr_prod_index;
queue_base = rte_malloc_virt2iova(queue);
phys_addr_prod_index = queue_base +
phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
/* Verify HW */
- if (ark_mpu_verify(queue->mpu, sizeof(phys_addr_t))) {
+ if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
PMD_DRV_LOG(ERR, "Illegal configuration rx queue\n");
return -1;
}
socket_id);
queue->paddress_q =
rte_zmalloc_socket("Ark_rx_queue paddr",
- nb_desc * sizeof(phys_addr_t),
+ nb_desc * sizeof(rte_iova_t),
64,
socket_id);
static int
eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
{
- phys_addr_t queue_base, ring_base, cons_index_addr;
+ rte_iova_t queue_base, ring_base, cons_index_addr;
uint32_t write_interval_ns;
/* Verify HW -- MPU */
}
int
-ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring, uint32_t ring_size,
+ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring, uint32_t ring_size,
int is_tx)
{
ark_mpu_reset(mpu);
#define ARK_MPU_CFG 0x040
struct ark_mpu_cfg_t {
- phys_addr_t ring_base; /* phys_addr_t is a uint64_t */
+ rte_iova_t ring_base; /* rte_iova_t is a uint64_t */
uint32_t ring_size;
uint32_t ring_mask;
uint32_t min_host_move;
void ark_mpu_stop(struct ark_mpu_t *mpu);
void ark_mpu_start(struct ark_mpu_t *mpu);
int ark_mpu_reset(struct ark_mpu_t *mpu);
-int ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring,
+int ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring,
uint32_t ring_size, int is_tx);
void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
}
void
-ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr)
+ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr)
{
udm->rt_cfg.hw_prod_addr = addr;
}
#define ARK_UDM_RT_CFG 0x00e0
struct ark_udm_rt_cfg_t {
- phys_addr_t hw_prod_addr;
+ rte_iova_t hw_prod_addr;
uint32_t write_interval; /* 4ns cycles */
volatile uint32_t prod_idx; /* RO */
};
uint32_t headroom,
uint32_t dataroom,
uint32_t write_interval_ns);
-void ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr);
+void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
void ark_udm_stats_reset(struct ark_udm_t *udm);
void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
/* translate from host physical address to guest virtual address */
static void *
avp_dev_translate_address(struct rte_eth_dev *eth_dev,
- phys_addr_t host_phys_addr)
+ rte_iova_t host_phys_addr)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_mem_resource *resource;
*/
struct rte_avp_memmap {
void *addr;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint64_t length;
};
*/
struct rte_avp_mempool_info {
void *addr;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint64_t length;
};
char ifname[RTE_AVP_NAMESIZE]; /**< Network device name for AVP */
- phys_addr_t tx_phys;
- phys_addr_t rx_phys;
- phys_addr_t alloc_phys;
- phys_addr_t free_phys;
+ rte_iova_t tx_phys;
+ rte_iova_t rx_phys;
+ rte_iova_t alloc_phys;
+ rte_iova_t free_phys;
uint32_t features; /**< Supported feature bitmap */
uint8_t min_rx_queues; /**< Minimum supported receive/free queues */
uint32_t free_size; /**< Size of each free queue */
/* Used by Ethtool */
- phys_addr_t req_phys;
- phys_addr_t resp_phys;
- phys_addr_t sync_phys;
+ rte_iova_t req_phys;
+ rte_iova_t resp_phys;
+ rte_iova_t sync_phys;
void *sync_va;
/* mbuf mempool (used when a single memory area is supported) */
void *mbuf_va;
- phys_addr_t mbuf_phys;
+ rte_iova_t mbuf_phys;
/* mbuf mempools */
struct rte_avp_mempool_info pool[RTE_AVP_MAX_MEMPOOLS];
}
void
-bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, uint32_t dst_addr,
+bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr,
uint32_t len32)
{
struct dmae_command dmae;
}
static void
-bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
uint32_t addr, uint32_t len)
{
uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
static void
__storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr,
- phys_addr_t mapping)
+ rte_iova_t mapping)
{
REG_WR(sc, addr, U64_LO(mapping));
REG_WR(sc, (addr + 4), U64_HI(mapping));
}
static void
-storm_memset_spq_addr(struct bnx2x_softc *sc, phys_addr_t mapping,
+storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping,
uint16_t abs_fid)
{
uint32_t addr = (XSEM_REG_FAST_MEMORY +
ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata);
ramrod_param.rdata_mapping =
- (phys_addr_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
ramrod_param.ramrod_flags = ramrod_flags;
ecore_init_func_obj(sc,
&sc->func_obj,
BNX2X_SP(sc, func_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, func_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata),
BNX2X_SP(sc, func_afex_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
&bnx2x_func_sp_drv);
}
}
static void
-bnx2x_init_sb(struct bnx2x_softc *sc, phys_addr_t busaddr, int vfid,
+bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid,
uint8_t vf_valid, int fw_sb_id, int igu_sb_id)
{
struct hc_status_block_data_e2 sb_data_e2;
sc->max_cos,
SC_FUNC(sc),
BNX2X_SP(sc, q_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, q_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata),
q_type);
/* configure classification DBs */
idx,
SC_FUNC(sc),
BNX2X_SP(sc, mac_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, mac_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata),
ECORE_FILTER_MAC_PENDING, &sc->sp_state,
ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool);
}
static void bnx2x_init_def_sb(struct bnx2x_softc *sc)
{
struct host_sp_status_block *def_sb = sc->def_sb;
- phys_addr_t mapping = sc->def_sb_dma.paddr;
+ rte_iova_t mapping = sc->def_sb_dma.paddr;
int igu_sp_sb_index;
int igu_seg_id;
int port = SC_PORT(sc);
SC_FUNC(sc),
SC_FUNC(sc),
BNX2X_SP(sc, mcast_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
ECORE_FILTER_MCAST_PENDING,
&sc->sp_state, o_type);
SC_FUNC(sc),
SC_FUNC(sc),
BNX2X_SP(sc, rss_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, rss_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata),
ECORE_FILTER_RSS_CONF_PENDING,
&sc->sp_state, ECORE_OBJ_TYPE_RX);
}
pause->pri_map = 1;
/* rxq setup */
- rxq_init->dscr_map = (phys_addr_t)rxq->rx_ring_phys_addr;
- rxq_init->rcq_map = (phys_addr_t)rxq->cq_ring_phys_addr;
- rxq_init->rcq_np_map = (phys_addr_t)(rxq->cq_ring_phys_addr +
+ rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr;
+ rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr;
+ rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr +
BNX2X_PAGE_SIZE);
/*
PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
return;
}
- txq_init->dscr_map = (phys_addr_t)txq->tx_ring_phys_addr;
+ txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
for (i = 0; i < L2_ILT_LINES(sc); i++) {
ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- (phys_addr_t)sc->context[i].vcxt_dma.paddr;
+ (rte_iova_t)sc->context[i].vcxt_dma.paddr;
ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
}
ecore_ilt_init_op(sc, INITOP_SET);
}
}
-static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, phys_addr_t addr)
+static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr)
{
int reg;
uint32_t wb_write[2];
}
static void
-ecore_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
uint32_t addr, uint32_t len)
{
bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len);
/* Used to manage DMA allocations. */
struct bnx2x_dma {
struct bnx2x_softc *sc;
- phys_addr_t paddr;
+ rte_iova_t paddr;
void *vaddr;
int nseg;
char msg[RTE_MEMZONE_NAMESIZE - 6];
struct bnx2x_dma sb_dma;
union bnx2x_host_hc_status_block status_block;
- phys_addr_t tx_desc_mapping;
+ rte_iova_t tx_desc_mapping;
- phys_addr_t rx_desc_mapping;
- phys_addr_t rx_comp_mapping;
+ rte_iova_t rx_desc_mapping;
+ rte_iova_t rx_comp_mapping;
uint16_t *sb_index_values;
uint16_t *sb_running_index;
struct hw_context {
struct bnx2x_dma vcxt_dma;
union cdu_context *vcxt;
- //phys_addr_t cxt_mapping;
+ //rte_iova_t cxt_mapping;
size_t size;
};
uint32_t gz_outlen;
#define GUNZIP_BUF(sc) (sc->gz_buf)
#define GUNZIP_OUTLEN(sc) (sc->gz_outlen)
-#define GUNZIP_PHYS(sc) (phys_addr_t)(sc->gz_buf_dma.paddr)
+#define GUNZIP_PHYS(sc) (rte_iova_t)(sc->gz_buf_dma.paddr)
#define FW_BUF_SIZE 0x40000
struct raw_op *init_ops;
*/
int fw_stats_req_size;
struct bnx2x_fw_stats_req *fw_stats_req;
- phys_addr_t fw_stats_req_mapping;
+ rte_iova_t fw_stats_req_mapping;
/*
* FW statistics data shortcut (points at the beginning of fw_stats
* buffer + fw_stats_req_size).
*/
int fw_stats_data_size;
struct bnx2x_fw_stats_data *fw_stats_data;
- phys_addr_t fw_stats_data_mapping;
+ rte_iova_t fw_stats_data_mapping;
/* tracking a pending STAT_QUERY ramrod */
uint16_t stats_pending;
#define FUNC_FLG_LEADING 0x0020 /* PF only */
struct bnx2x_func_init_params {
- phys_addr_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */
- phys_addr_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */
+ rte_iova_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */
+ rte_iova_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */
uint16_t func_flgs;
uint16_t func_id; /* abs function id */
uint16_t pf_id;
uint8_t comp_type);
void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx);
void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32);
-void bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr,
+void bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr,
uint32_t dst_addr, uint32_t len32);
void bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt,
uint32_t cid);
struct bnx2x_softc *sc = dev->data->dev_private;
struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
struct eth_rx_cqe_next_page *nextpg;
- phys_addr_t *rx_bd;
- phys_addr_t busaddr;
+ rte_iova_t *rx_bd;
+ rte_iova_t busaddr;
/* First allocate the rx queue data structure */
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
int i;
int first_queue_query_index;
struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
- phys_addr_t cur_data_offset;
+ rte_iova_t cur_data_offset;
struct stats_query_entry *cur_query_entry;
stats_hdr->cmd_num = sc->fw_stats_num;
#define BNX2X_VF_CHANNEL_TRIES 100
static int
-bnx2x_do_req4pf(struct bnx2x_softc *sc, phys_addr_t phys_addr)
+bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
{
uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status;
uint8_t i;
#include "ecore_reg.h"
struct bnx2x_softc;
-typedef phys_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */
+typedef rte_iova_t ecore_dma_addr_t; /* expected to be 64 bit wide */
typedef volatile int ecore_atomic_t;
uint16_t max_vfs;
uint32_t func_cfg_flags;
void *vf_req_buf;
- phys_addr_t vf_req_buf_dma_addr;
+ rte_iova_t vf_req_buf_dma_addr;
uint32_t vf_req_fwd[8];
uint16_t total_vnics;
struct bnxt_child_vf_info *vf_info;
struct bnxt_rx_queue **rx_queues;
const void *rx_mem_zone;
struct rx_port_stats *hw_rx_port_stats;
- phys_addr_t hw_rx_port_stats_map;
+ rte_iova_t hw_rx_port_stats_map;
unsigned int tx_nr_rings;
unsigned int tx_cp_nr_rings;
struct bnxt_tx_queue **tx_queues;
const void *tx_mem_zone;
struct tx_port_stats *hw_tx_port_stats;
- phys_addr_t hw_tx_port_stats_map;
+ rte_iova_t hw_tx_port_stats_map;
/* Default completion ring */
struct bnxt_cp_ring_info *def_cp_ring;
uint16_t hwrm_cmd_seq;
void *hwrm_cmd_resp_addr;
- phys_addr_t hwrm_cmd_resp_dma_addr;
+ rte_iova_t hwrm_cmd_resp_dma_addr;
void *hwrm_short_cmd_req_addr;
- phys_addr_t hwrm_short_cmd_req_dma_addr;
+ rte_iova_t hwrm_short_cmd_req_dma_addr;
rte_spinlock_t hwrm_lock;
uint16_t max_req_len;
uint16_t max_resp_len;
struct cmpl_base *cp_desc_ring;
- phys_addr_t cp_desc_mapping;
+ rte_iova_t cp_desc_mapping;
struct ctx_hw_stats *hw_stats;
- phys_addr_t hw_stats_map;
+ rte_iova_t hw_stats_map;
uint32_t hw_stats_ctx_id;
struct bnxt_ring *cp_ring_struct;
const struct rte_memzone *mz = NULL;
static int version_printed;
uint32_t total_alloc_len;
- phys_addr_t mz_phys_addr;
+ rte_iova_t mz_phys_addr;
struct bnxt *bp;
int rc;
uint32_t entry_length;
uint8_t *buf;
size_t buflen;
- phys_addr_t dma_handle;
+ rte_iova_t dma_handle;
struct hwrm_nvm_get_dir_entries_input req = {0};
struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
{
int rc;
uint8_t *buf;
- phys_addr_t dma_handle;
+ rte_iova_t dma_handle;
struct hwrm_nvm_read_input req = {0};
struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
struct hwrm_nvm_write_input req = {0};
struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
- phys_addr_t dma_handle;
+ rte_iova_t dma_handle;
uint8_t *buf;
HWRM_PREP(req, NVM_WRITE);
struct rte_pci_device *pdev = bp->pdev;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
- phys_addr_t mz_phys_addr;
+ rte_iova_t mz_phys_addr;
int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
struct bnxt_ring {
void *bd;
- phys_addr_t bd_dma;
+ rte_iova_t bd_dma;
uint32_t ring_size;
uint32_t ring_mask;
struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */
- phys_addr_t rx_desc_mapping;
- phys_addr_t ag_desc_mapping;
+ rte_iova_t rx_desc_mapping;
+ rte_iova_t ag_desc_mapping;
struct bnxt_ring *rx_ring_struct;
struct bnxt_ring *ag_ring_struct;
struct tx_bd_long *tx_desc_ring;
struct bnxt_sw_tx_bd *tx_buf_ring;
- phys_addr_t tx_desc_mapping;
+ rte_iova_t tx_desc_mapping;
#define BNXT_DEV_STATE_CLOSING 0x1
uint32_t dev_state;
BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);
uint16_t max_vnics;
int i;
- phys_addr_t mz_phys_addr;
+ rte_iova_t mz_phys_addr;
max_vnics = bp->max_vnics;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;
- phys_addr_t rss_table_dma_addr;
+ rte_iova_t rss_table_dma_addr;
uint16_t *rss_table;
- phys_addr_t rss_hash_key_dma_addr;
+ rte_iova_t rss_hash_key_dma_addr;
void *rss_hash_key;
- phys_addr_t mc_list_dma_addr;
+ rte_iova_t mc_list_dma_addr;
char *mc_list;
uint32_t mc_addr_cnt;
#define BNXT_MAX_MC_ADDRS 16
} else {
struct lio_buf_free_info *finfo;
struct lio_gather *g;
- phys_addr_t phyaddr;
+ rte_iova_t phyaddr;
int i, frags;
finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
}
phyaddr = rte_mem_virt2iova(g->sg);
- if (phyaddr == RTE_BAD_PHYS_ADDR) {
+ if (phyaddr == RTE_BAD_IOVA) {
PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
goto xmit_failed;
}
static inline uint64_t
lio_map_ring(void *buf)
{
- phys_addr_t dma_addr;
+ rte_iova_t dma_addr;
dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));
static inline uint64_t
lio_map_ring_info(struct lio_droq *droq, uint32_t i)
{
- phys_addr_t dma_addr;
+ rte_iova_t dma_addr;
dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
struct octeontx_pko_iomem {
uint8_t *va;
- phys_addr_t iova;
+ rte_iova_t iova;
size_t size;
};
#define osal_uintptr_t uintptr_t
-typedef phys_addr_t dma_addr_t;
+typedef rte_iova_t dma_addr_t;
typedef rte_spinlock_t osal_spinlock_t;
/* DMA */
-typedef phys_addr_t efsys_dma_addr_t;
+typedef rte_iova_t efsys_dma_addr_t;
typedef struct efsys_mem_s {
const struct rte_memzone *esm_mz;
}
esmp->esm_addr = mz->iova;
- if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
+ if (esmp->esm_addr == RTE_BAD_IOVA) {
(void)rte_memzone_free(mz);
return EFAULT;
}
++i, ++id) {
struct rte_mbuf *m = objs[i];
struct sfc_ef10_rx_sw_desc *rxd;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
SFC_ASSERT((id & ~ptr_mask) == 0);
rxd = &rxq->sw_ring[id];
}
static void
-sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
+sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
efx_qword_t *edp)
{
EFX_POPULATE_QWORD_4(*edp,
pkt_len = m_seg->pkt_len;
do {
- phys_addr_t seg_addr = rte_mbuf_data_iova(m_seg);
+ rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
unsigned int id = added & ptr_mask;
struct rbdr_entry_t *desc, *desc0;
struct nicvf_rbdr *rbdr = nic->rbdr;
uint32_t count;
- nicvf_phys_addr_t phy;
+ nicvf_iova_addr_t phy;
assert(rbdr != NULL);
desc = rbdr->desc;
NICVF_ERR_RSS_GET_SZ, /* -8171 */
};
-typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
+typedef nicvf_iova_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
struct nicvf_hw_rx_qstats {
uint64_t q_rx_bytes;
#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
#define assert_primary(nic) assert((nic)->sqs_mode == 0)
-typedef uint64_t nicvf_phys_addr_t;
+typedef uint64_t nicvf_iova_addr_t;
/* vNIC HW Enumerations */
uint64_t buf_addr:42;
uint64_t cache_align:7;
};
- nicvf_phys_addr_t full_addr;
+ nicvf_iova_addr_t full_addr;
};
#else
union {
uint64_t buf_addr:42;
uint64_t rsvd0:15;
};
- nicvf_phys_addr_t full_addr;
+ nicvf_iova_addr_t full_addr;
};
#endif
};
static void
nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
- nicvf_phys_addr_t phy)
+ nicvf_iova_addr_t phy)
{
uint16_t qidx;
void *obj;
};
}
-static nicvf_phys_addr_t
+static nicvf_iova_addr_t
rbdr_rte_mempool_get(void *dev, void *opaque)
{
uint16_t qidx;
* P = V - offset
*/
static inline uintptr_t
-nicvf_mbuff_phy2virt(phys_addr_t phy, uint64_t mbuf_phys_off)
+nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off)
{
return (uintptr_t)(phy + mbuf_phys_off);
}
static inline uintptr_t
nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
{
- return (phys_addr_t)(virt - mbuf_phys_off);
+ return (rte_iova_t)(virt - mbuf_phys_off);
}
static inline void
uintptr_t rbdr_status;
uintptr_t rbdr_door;
struct rbdr_entry_t *desc;
- nicvf_phys_addr_t phys;
+ nicvf_iova_addr_t phys;
uint32_t buffsz;
uint32_t tail;
uint32_t next_tail;
struct nicvf_txq {
union sq_entry_t *desc;
- nicvf_phys_addr_t phys;
+ nicvf_iova_addr_t phys;
struct rte_mbuf **txbuffs;
uintptr_t sq_head;
uintptr_t sq_door;
uintptr_t cq_status;
uintptr_t cq_door;
union mbuf_initializer mbuf_initializer;
- nicvf_phys_addr_t phys;
+ nicvf_iova_addr_t phys;
union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
struct nicvf *nic;
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
- phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t queue_id; /**< DPDK queue index. */
uint16_t port_id; /**< Device port identifier. */
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
- phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t port_id; /**< Device port identifier. */
const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
};
struct virtnet_ctl cq;
};
- phys_addr_t vq_ring_mem; /**< physical address of vring,
- * or virtual address for virtio_user. */
+ rte_iova_t vq_ring_mem; /**< physical address of vring,
+ * or virtual address for virtio_user. */
/**
* Head of the free chain in the descriptor table. If
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
/* Convert guest physical address to host physical address */
-static __rte_always_inline phys_addr_t
+static __rte_always_inline rte_iova_t
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
uint32_t i;