Renamed data type from phys_addr_t to rte_iova_t.
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Reviewed-by: Anatoly Burakov <anatoly.burakov@intel.com>
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
uint16_t remaining_segments = segments_nb;
struct rte_mbuf *next_mbuf;
- phys_addr_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
+ rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
mbuf_offset + mbuf_hdr_size;
do {
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint16_t length;
} aad;
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint16_t length;
} digest;
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
} digest; /**< Digest parameters */
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
} aad;
/**< Additional authentication parameters */
} aead;
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
} digest; /**< Digest parameters */
} auth;
};
return ctx;
}
-static inline phys_addr_t
+static inline rte_iova_t
dpaa_mem_vtop(void *vaddr)
{
const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
paddr = memseg[i].phys_addr +
(vaddr_64 - memseg[i].addr_64);
- return (phys_addr_t)paddr;
+ return (rte_iova_t)paddr;
}
}
- return (phys_addr_t)(NULL);
+ return (rte_iova_t)(NULL);
}
static inline void *
-dpaa_mem_ptov(phys_addr_t paddr)
+dpaa_mem_ptov(rte_iova_t paddr)
{
const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
int i;
* all the packets in this queue could be dispatched into caam
*/
static int
-dpaa_sec_init_rx(struct qman_fq *fq_in, phys_addr_t hwdesc,
+dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
uint32_t fqid_out)
{
struct qm_mcc_initfq fq_opts;
struct dpaa_sec_job *cf;
struct dpaa_sec_op_ctx *ctx;
struct qm_sg_entry *sg;
- phys_addr_t start_addr;
+ rte_iova_t start_addr;
uint8_t *old_digest;
ctx = dpaa_sec_alloc_ctx(ses);
struct dpaa_sec_job *cf;
struct dpaa_sec_op_ctx *ctx;
struct qm_sg_entry *sg;
- phys_addr_t src_start_addr, dst_start_addr;
+ rte_iova_t src_start_addr, dst_start_addr;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
struct dpaa_sec_op_ctx *ctx;
struct qm_sg_entry *sg;
uint32_t length = 0;
- phys_addr_t src_start_addr, dst_start_addr;
+ rte_iova_t src_start_addr, dst_start_addr;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
struct dpaa_sec_job *cf;
struct dpaa_sec_op_ctx *ctx;
struct qm_sg_entry *sg;
- phys_addr_t src_start_addr, dst_start_addr;
+ rte_iova_t src_start_addr, dst_start_addr;
uint32_t length = 0;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
struct qat_crypto_op_cookie {
struct qat_alg_buf_list qat_sgl_list_src;
struct qat_alg_buf_list qat_sgl_list_dst;
- phys_addr_t qat_sgl_src_phys_addr;
- phys_addr_t qat_sgl_dst_phys_addr;
+ rte_iova_t qat_sgl_src_phys_addr;
+ rte_iova_t qat_sgl_dst_phys_addr;
};
/* Common content descriptor */
void *bpi_ctx;
struct qat_alg_cd cd;
uint8_t *cd_cur_ptr;
- phys_addr_t cd_paddr;
+ rte_iova_t cd_paddr;
struct icp_qat_fw_la_bulk_req fw_req;
uint8_t aad_len;
struct qat_crypto_instance *inst;
* This address may used for setting AAD physical pointer
* into IV offset from op
*/
- phys_addr_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
struct qat_queue {
char memz_name[RTE_MEMZONE_NAMESIZE];
void *base_addr; /* Base address */
- phys_addr_t base_phys_addr; /* Queue physical address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
uint32_t head; /* Shadow copy of the head */
uint32_t tail; /* Shadow copy of the tail */
uint32_t modulo;
struct l2fwd_key {
uint8_t *data;
uint32_t length;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
};
struct l2fwd_iv {
struct rte_mempool *mempool;
/**< crypto operation mempool which operation is allocated from */
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< physical address of crypto operation */
RTE_STD_C11
* For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
* "digest result" read "authentication tag T".
*/
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< Physical address of digest */
} digest; /**< Digest parameters */
struct {
* of the block size (16 bytes).
*
*/
- phys_addr_t phys_addr; /**< physical address */
+ rte_iova_t phys_addr; /**< physical address */
} aad;
/**< Additional authentication parameters */
} aead;
* will overwrite any data at this location.
*
*/
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< Physical address of digest */
} digest; /**< Digest parameters */
} auth;
* to calculate address from.
*/
#define rte_crypto_op_ctophys_offset(c, o) \
- (phys_addr_t)((c)->phys_addr + (o))
+ (rte_iova_t)((c)->phys_addr + (o))
/**
* Crypto parameters range description