sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->aead.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
}
ctx->op = op;
old_digest = ctx->digest;
- start_addr = rte_pktmbuf_mtophys(mbuf);
+ start_addr = rte_pktmbuf_iova(mbuf);
/* output */
sg = &cf->sg[0];
qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
cf = &ctx->job;
ctx->op = op;
- src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
if (sym->m_dst)
- dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
else
dst_start_addr = src_start_addr;
request->num_bufs = 1;
request->src = src_bd;
src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
- src_bd->paddr = rte_pktmbuf_mtophys(op->sym->m_src);
+ src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
/* Empty source. */
request->dst = dst_bd;
dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
- dst_bd->paddr = rte_pktmbuf_mtophys(dst_mbuf);
+ dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
/*
* We can use all available space in dst_mbuf,
{
int nr = 1;
- uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ uint32_t buf_len = rte_pktmbuf_iova(buf) -
buff_start + rte_pktmbuf_data_len(buf);
list->bufers[0].addr = buff_start;
list->bufers[nr].len = rte_pktmbuf_data_len(buf);
list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+ list->bufers[nr].addr = rte_pktmbuf_iova(buf);
buf_len += list->bufers[nr].len;
buf = buf->next;
* so as not to overwrite data in dest buffer
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
dst_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
} else {
/* In-place operation
* Start DMA at nearest aligned address below min_ofs
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
& QAT_64_BTYE_ALIGN_MASK;
- if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
rte_pktmbuf_headroom(op->sym->m_src))
> src_buf_start)) {
/* alignment has pushed addr ahead of start of mbuf
* so revert and take the performance hit
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ rte_pktmbuf_iova_offset(op->sym->m_src,
min_ofs);
}
dst_buf_start = src_buf_start;
if (do_cipher || do_aead) {
cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_mtophys_offset(
+ (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
cipher_param->cipher_length = cipher_len;
} else {
}
if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
} else {
const struct rte_mbuf *mbuf,
uint8_t flags)
{
- meta->physaddr = rte_mbuf_data_dma_addr(mbuf);
+ meta->physaddr = rte_mbuf_data_iova(mbuf);
meta->delta_ns = 0;
meta->data_len = rte_pktmbuf_data_len(mbuf);
meta->flags = flags;
tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
tx_start_bd->addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
+ rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data =
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma_addr;
rxdp->status = 0;
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma;
rxdp->status = 0;
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
/* Clear HW ring memory */
rxq->rx_ring[i] = rxd_init;
* Set up transmit descriptor.
*/
slen = (uint16_t) m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.pkt_addr = dma;
rxdp->read.hdr_addr = 0;
return -ENOMEM;
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
- rte_mbuf_data_dma_addr_default(mb));
+ rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/* Set data buffer address and data length of the mbuf */
rxdp->read.hdr_addr = 0;
/* Setup TX Descriptor */
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
uint32_t i;
for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
{
uint64_t dma_addr;
- dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
int i;
for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
uint64_t buf_dma_addr;
uint32_t pkt_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
mb->data_off = RTE_PKTMBUF_HEADROOM;
/* populate the descriptors */
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
if (!bulk_alloc) {
__le64 dma =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/*
* Update RX descriptor with the physical address of the
* new data buffer of the new allocated mbuf.
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
- sc->dma_addr = rte_mbuf_data_dma_addr(m);
+ sc->dma_addr = rte_mbuf_data_iova(m);
sc->mbuf = m;
dma_addr = sc->dma_addr;
cmdsetup.s.u.datasize = pkt_len;
lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
&cmdsetup, tag);
- ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
+ ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
ndata.reqtype = LIO_REQTYPE_NORESP_NET;
} else {
struct lio_buf_free_info *finfo;
&cmdsetup, tag);
memset(g->sg, 0, g->sg_size);
- g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
+ g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[0], m->data_len, 0);
pkt_len = m->data_len;
finfo->mbuf = m;
m = m->next;
while (frags--) {
g->sg[(i >> 2)].ptr[(i & 3)] =
- rte_mbuf_data_dma_addr(m);
+ rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[(i >> 2)],
m->data_len, (i & 3));
pkt_len += m->data_len;
{
phys_addr_t dma_addr;
- dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
+ dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));
return (uint64_t)dma_addr;
}
}
entries[i].buff.addr =
- rte_mbuf_data_dma_addr_default(mbufs[i]);
+ rte_mbuf_data_iova_default(mbufs[i]);
entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
entries[i].bpool = bpool;
}
status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
if (unlikely(status != PP2_DESC_ERR_OK)) {
struct pp2_buff_inf binf = {
- .addr = rte_mbuf_data_dma_addr_default(mbuf),
+ .addr = rte_mbuf_data_iova_default(mbuf),
.cookie = (pp2_cookie_t)(uint64_t)mbuf,
};
sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
sq->ent[sq->head].buff.addr =
- rte_mbuf_data_dma_addr_default(mbuf);
+ rte_mbuf_data_iova_default(mbuf);
sq->ent[sq->head].bpool =
(unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
NULL : mrvl_port_to_bpool_lookup[mbuf->port];
pp2_ppio_outq_desc_reset(&descs[i]);
pp2_ppio_outq_desc_set_phys_addr(&descs[i],
- rte_pktmbuf_mtophys(mbuf));
+ rte_pktmbuf_iova(mbuf));
pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
pp2_ppio_outq_desc_set_pkt_len(&descs[i],
rte_pktmbuf_pkt_len(mbuf));
*lmbuf = pkt;
dma_size = pkt->data_len;
- dma_addr = rte_mbuf_data_dma_addr(pkt);
+ dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
"%" PRIx64 "\n", dma_addr);
cmd_buf[0] |= (1ULL << 58); /* SET DF */
/* Setup PKO_SEND_GATHER_S */
- cmd_buf[(1 << 1) | 1] = rte_mbuf_data_dma_addr(tx_pkt);
+ cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
PKO_SEND_GATHER_LDTYPE(0x1ull) |
PKO_SEND_GATHER_GAUAR((long)gaura_id) |
}
rxq->sw_rx_ring[idx].mbuf = new_mb;
rxq->sw_rx_ring[idx].page_offset = 0;
- mapping = rte_mbuf_data_dma_addr_default(new_mb);
+ mapping = rte_mbuf_data_iova_default(new_mb);
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
curr_prod = &rxq->sw_rx_ring[idx];
*curr_prod = *curr_cons;
- new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+ new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
curr_prod->page_offset;
rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
nb_segs++;
}
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
} else if (nb_segs == 1) {
memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
nb_segs++;
}
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
} else {
ecore_chain_produce(&txq->tx_pbl);
memset(tx_bd, 0, sizeof(*tx_bd));
nb_segs++;
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
}
nbds++;
/* Map MBUF linear data for DMA and set in the BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
mbuf->data_len);
bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
nbds++;
/* BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
hdr_size);
/* BD2 */
QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
- rte_mbuf_data_dma_addr(mbuf)),
+ rte_mbuf_data_iova(mbuf)),
mbuf->data_len - hdr_size);
bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
if (mplsoudp_flg) {
* structure members.
*/
- phys_addr = rte_mbuf_data_dma_addr_default(m);
+ phys_addr = rte_mbuf_data_iova_default(m);
EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
ESF_DZ_RX_KER_BYTE_CNT, buf_size,
ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
pkt_len = m_seg->pkt_len;
do {
- phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+ phys_addr_t seg_addr = rte_mbuf_data_iova(m_seg);
unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
unsigned int id = added & ptr_mask;
SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
SFC_EF10_TX_DMA_DESC_LEN_MAX);
- sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
rte_pktmbuf_data_len(pkt),
true, &txq->txq_hw_ring[id]);
SFC_ASSERT(m->nb_segs == 1);
m->port = port_id;
- addr[i] = rte_pktmbuf_mtophys(m);
+ addr[i] = rte_pktmbuf_iova(m);
}
efx_rx_qpost(rxq->common, addr, rxq->buf_size,
if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
return EMSGSIZE;
- header_paddr = rte_pktmbuf_mtophys(m);
+ header_paddr = rte_pktmbuf_iova(m);
/*
* Sometimes headers may be split across multiple mbufs. In such cases
size_t seg_len;
seg_len = m_seg->data_len;
- next_frag = rte_mbuf_data_dma_addr(m_seg);
+ next_frag = rte_mbuf_data_iova(m_seg);
/*
* If we've started TSO transaction few steps earlier,
sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
sqe.gather.size = pkt->data_len;
- sqe.gather.addr = rte_mbuf_data_dma_addr(pkt);
+ sqe.gather.addr = rte_mbuf_data_iova(pkt);
entry->buff[0] = sqe.buff[0];
entry->buff[1] = sqe.buff[1];
entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
(uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
pkt->data_len;
- entry->buff[1] = rte_mbuf_data_dma_addr(pkt);
+ entry->buff[1] = rte_mbuf_data_iova(pkt);
}
#endif
rte_cpu_to_le_64(txq->data_ring.basePA +
offset);
} else {
- gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
}
gdesc->dword[2] = dw2 | m_seg->data_len;
*/
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+ buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
aad = get_aad(m);
memcpy(aad, iv - sizeof(struct esp_hdr), 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
aad = get_aad(m);
memcpy(aad, esp, 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
switch (sa->cipher_algo) {
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
/* For wireless algorithms, offset/length must be in bits */
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
if (cparams->aad.length) {
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
/**
- * Return the DMA address of the beginning of the mbuf data
+ * Return the IO address of the beginning of the mbuf data
*
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + mb->data_off;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
{
- return mb->buf_iova + mb->data_off;
+ return rte_mbuf_data_iova(mb);
}
/**
- * Return the default DMA address of the beginning of the mbuf data
+ * Return the default IO address of the beginning of the mbuf data
*
* This function is used by drivers in their receive function, as it
* returns the location where data should be written by the NIC, taking
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
{
- return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ return rte_mbuf_data_iova_default(mb);
}
/**
#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
/**
- * A macro that returns the physical address that points to an offset of the
+ * A macro that returns the IO address that points to an offset of the
* start of the data in the mbuf
*
* @param m
* @param o
* The offset into the data to calculate address from.
*/
-#define rte_pktmbuf_mtophys_offset(m, o) \
+#define rte_pktmbuf_iova_offset(m, o) \
(rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
+/* deprecated */
+#define rte_pktmbuf_mtophys_offset(m, o) \
+ rte_pktmbuf_iova_offset(m, o)
+
/**
- * A macro that returns the physical address that points to the start of the
+ * A macro that returns the IO address that points to the start of the
* data in the mbuf
*
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
+#define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
+
+/* deprecated */
+#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
/**
* A macro that returns the length of the packet.
/* Set crypto operation authentication parameters */
sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.data.offset = 0;
sym_op->m_src = ut_params->ibuf;
sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.data.offset = 0;
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
memset(sym_op->auth.digest.data, 0, auth_tag_len);
"no room to append aad");
sym_op->aead.aad.phys_addr =
- rte_pktmbuf_mtophys(ut_params->ibuf);
+ rte_pktmbuf_iova(ut_params->ibuf);
/* Copy AAD 18 bytes after the AAD pointer, according to the API */
memcpy(sym_op->aead.aad.data + 18, tdata->aad.data, tdata->aad.len);
TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
"no room to append aad");
sym_op->aead.aad.phys_addr =
- rte_pktmbuf_mtophys(ut_params->ibuf);
+ rte_pktmbuf_iova(ut_params->ibuf);
memcpy(sym_op->aead.aad.data, tdata->aad.data, tdata->aad.len);
TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
tdata->aad.len);
TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
memset(sym_op->aead.digest.data, 0, tdata->auth_tag.len);
- sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->obuf ? ut_params->obuf :
ut_params->ibuf,
plaintext_pad_len +
ut_params->ibuf, tdata->auth_tag.len);
TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
- sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf,
plaintext_pad_len + aad_pad_len);
ut_params->ibuf, MD5_DIGEST_LEN);
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, plaintext_pad_len);
if (ut_params->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, plaintext_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->plaintext.len);
if (auth_generate)
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->ciphertext.len);
if (auth_generate)
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->ciphertext.len);
if (auth_generate)
ut_params->ibuf, aad_len);
TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
"no room to prepend aad");
- sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
ut_params->ibuf);
memset(sym_op->aead.aad.data, 0, aad_len);
ut_params->ibuf, aad_len);
TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
"no room to prepend aad");
- sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
ut_params->ibuf);
memset(sym_op->aead.aad.data, 0, aad_len);
digest_mem = rte_pktmbuf_append(ut_params->obuf,
tdata->auth_tag.len);
- digest_phys = rte_pktmbuf_mtophys_offset(
+ digest_phys = rte_pktmbuf_iova_offset(
ut_params->obuf,
tdata->plaintext.len + prepend_len);
}
* Place digest at the end of the last buffer
*/
if (!digest_phys)
- digest_phys = rte_pktmbuf_mtophys(buf) + to_trn;
+ digest_phys = rte_pktmbuf_iova(buf) + to_trn;
if (oop && buf_last_oop)
- digest_phys = rte_pktmbuf_mtophys(buf_last_oop) + to_trn;
+ digest_phys = rte_pktmbuf_iova(buf_last_oop) + to_trn;
if (!digest_mem && !oop) {
digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ tdata->auth_tag.len);
- digest_phys = rte_pktmbuf_mtophys_offset(ut_params->ibuf,
+ digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
tdata->plaintext.len);
}
return rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
}
-static inline phys_addr_t
-pktmbuf_mtophys_offset(struct rte_mbuf *mbuf, int offset) {
+static inline rte_iova_t
+pktmbuf_iova_offset(struct rte_mbuf *mbuf, int offset) {
struct rte_mbuf *m;
for (m = mbuf; (m != NULL) && (offset > m->data_len); m = m->next)
offset -= m->data_len;
if (m == NULL) {
- printf("pktmbuf_mtophys_offset: offset out of buffer\n");
+ printf("pktmbuf_iova_offset: offset out of buffer\n");
return 0;
}
- return rte_pktmbuf_mtophys_offset(m, offset);
+ return rte_pktmbuf_iova_offset(m, offset);
}
static inline struct rte_mbuf *
sym_op->auth.digest.data = pktmbuf_mtod_offset
(iobuf, digest_offset);
sym_op->auth.digest.phys_addr =
- pktmbuf_mtophys_offset(iobuf,
+ pktmbuf_iova_offset(iobuf,
digest_offset);
} else {
auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
sym_op->auth.digest.data = pktmbuf_mtod_offset
(sym_op->m_src, digest_offset);
sym_op->auth.digest.phys_addr =
- pktmbuf_mtophys_offset(sym_op->m_src,
+ pktmbuf_iova_offset(sym_op->m_src,
digest_offset);
}