mbuf: rename data address helpers to IOVA
authorThomas Monjalon <thomas@monjalon.net>
Sun, 5 Nov 2017 23:22:55 +0000 (00:22 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Mon, 6 Nov 2017 21:44:26 +0000 (22:44 +0100)
The following inline functions and macros have been renamed to be
consistent with the IOVA wording:

rte_mbuf_data_dma_addr         -> rte_mbuf_data_iova
rte_mbuf_data_dma_addr_default -> rte_mbuf_data_iova_default
rte_pktmbuf_mtophys            -> rte_pktmbuf_iova
rte_pktmbuf_mtophys_offset     -> rte_pktmbuf_iova_offset

The deprecated functions and macros are kept to avoid breaking the API.

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
29 files changed:
app/test-crypto-perf/cperf_ops.c
drivers/crypto/dpaa_sec/dpaa_sec.c
drivers/crypto/mrvl/rte_mrvl_pmd.c
drivers/crypto/qat/qat_crypto.c
drivers/net/ark/ark_ethdev_tx.c
drivers/net/bnx2x/bnx2x.c
drivers/net/e1000/em_rxtx.c
drivers/net/e1000/igb_rxtx.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/liquidio/lio_rxtx.c
drivers/net/liquidio/lio_rxtx.h
drivers/net/mrvl/mrvl_ethdev.c
drivers/net/nfp/nfp_net.c
drivers/net/octeontx/octeontx_rxtx.c
drivers/net/qede/qede_rxtx.c
drivers/net/sfc/sfc_ef10_rx.c
drivers/net/sfc/sfc_ef10_tx.c
drivers/net/sfc/sfc_rx.c
drivers/net/sfc/sfc_tso.c
drivers/net/sfc/sfc_tx.c
drivers/net/thunderx/nicvf_rxtx.h
drivers/net/vmxnet3/vmxnet3_rxtx.c
examples/ipsec-secgw/esp.c
examples/l2fwd-crypto/main.c
lib/librte_mbuf/rte_mbuf.h
test/test/test_cryptodev.c
test/test/test_cryptodev.h
test/test/test_cryptodev_blockcipher.c

index bc6b24f..23d30ca 100644 (file)
@@ -221,7 +221,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
                        sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
                                        uint8_t *, offset);
                        sym_op->auth.digest.phys_addr =
-                                       rte_pktmbuf_mtophys_offset(buf, offset);
+                                       rte_pktmbuf_iova_offset(buf, offset);
 
                }
 
@@ -318,7 +318,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
                        sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
                                        uint8_t *, offset);
                        sym_op->auth.digest.phys_addr =
-                                       rte_pktmbuf_mtophys_offset(buf, offset);
+                                       rte_pktmbuf_iova_offset(buf, offset);
                }
 
                if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -425,7 +425,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
                        sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
                                        uint8_t *, offset);
                        sym_op->aead.digest.phys_addr =
-                                       rte_pktmbuf_mtophys_offset(buf, offset);
+                                       rte_pktmbuf_iova_offset(buf, offset);
                }
        }
 
index 234c84f..1d9d03a 100644 (file)
@@ -577,7 +577,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        ctx->op = op;
        old_digest = ctx->digest;
 
-       start_addr = rte_pktmbuf_mtophys(mbuf);
+       start_addr = rte_pktmbuf_iova(mbuf);
        /* output */
        sg = &cf->sg[0];
        qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
@@ -637,10 +637,10 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        cf = &ctx->job;
        ctx->op = op;
 
-       src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+       src_start_addr = rte_pktmbuf_iova(sym->m_src);
 
        if (sym->m_dst)
-               dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+               dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
        else
                dst_start_addr = src_start_addr;
 
index 63895c5..f778a80 100644 (file)
@@ -480,7 +480,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
        request->num_bufs = 1;
        request->src = src_bd;
        src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
-       src_bd->paddr = rte_pktmbuf_mtophys(op->sym->m_src);
+       src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
        src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
 
        /* Empty source. */
@@ -502,7 +502,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
 
        request->dst = dst_bd;
        dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
-       dst_bd->paddr = rte_pktmbuf_mtophys(dst_mbuf);
+       dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
 
        /*
         * We can use all available space in dst_mbuf,
index e49b71f..6014841 100644 (file)
@@ -1119,7 +1119,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
 {
        int nr = 1;
 
-       uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+       uint32_t buf_len = rte_pktmbuf_iova(buf) -
                        buff_start + rte_pktmbuf_data_len(buf);
 
        list->bufers[0].addr = buff_start;
@@ -1143,7 +1143,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
 
                list->bufers[nr].len = rte_pktmbuf_data_len(buf);
                list->bufers[nr].resrvd = 0;
-               list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+               list->bufers[nr].addr = rte_pktmbuf_iova(buf);
 
                buf_len += list->bufers[nr].len;
                buf = buf->next;
@@ -1499,26 +1499,26 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
                 * so as not to overwrite data in dest buffer
                 */
                src_buf_start =
-                       rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+                       rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
                dst_buf_start =
-                       rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+                       rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
 
        } else {
                /* In-place operation
                 * Start DMA at nearest aligned address below min_ofs
                 */
                src_buf_start =
-                       rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+                       rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
                                                & QAT_64_BTYE_ALIGN_MASK;
 
-               if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+               if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
                                        rte_pktmbuf_headroom(op->sym->m_src))
                                                        > src_buf_start)) {
                        /* alignment has pushed addr ahead of start of mbuf
                         * so revert and take the performance hit
                         */
                        src_buf_start =
-                               rte_pktmbuf_mtophys_offset(op->sym->m_src,
+                               rte_pktmbuf_iova_offset(op->sym->m_src,
                                                                min_ofs);
                }
                dst_buf_start = src_buf_start;
@@ -1526,7 +1526,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 
        if (do_cipher || do_aead) {
                cipher_param->cipher_offset =
-                               (uint32_t)rte_pktmbuf_mtophys_offset(
+                               (uint32_t)rte_pktmbuf_iova_offset(
                                op->sym->m_src, cipher_ofs) - src_buf_start;
                cipher_param->cipher_length = cipher_len;
        } else {
@@ -1535,7 +1535,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
        }
 
        if (do_auth || do_aead) {
-               auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+               auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
                                op->sym->m_src, auth_ofs) - src_buf_start;
                auth_param->auth_len = auth_len;
        } else {
index 57841df..0d3c7dc 100644 (file)
@@ -93,7 +93,7 @@ eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
                          const struct rte_mbuf *mbuf,
                          uint8_t flags)
 {
-       meta->physaddr = rte_mbuf_data_dma_addr(mbuf);
+       meta->physaddr = rte_mbuf_data_iova(mbuf);
        meta->delta_ns = 0;
        meta->data_len = rte_pktmbuf_data_len(mbuf);
        meta->flags = flags;
index 6b4526b..99b532b 100644 (file)
@@ -2135,7 +2135,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
        tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
 
        tx_start_bd->addr =
-           rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
+           rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
        tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
        tx_start_bd->general_data =
index 32ca920..1d8f079 100644 (file)
@@ -577,7 +577,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         * Set up Transmit Data Descriptor.
                         */
                        slen = m_seg->data_len;
-                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+                       buf_dma_addr = rte_mbuf_data_iova(m_seg);
 
                        txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
                        txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
@@ -799,7 +799,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->buffer_addr = dma_addr;
                rxdp->status = 0;
 
@@ -979,7 +979,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 */
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
-               dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+               dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->buffer_addr = dma;
                rxdp->status = 0;
 
@@ -1652,7 +1652,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
                }
 
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
 
                /* Clear HW ring memory */
                rxq->rx_ring[i] = rxd_init;
index 4590179..4ee12e9 100644 (file)
@@ -597,7 +597,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         * Set up transmit descriptor.
                         */
                        slen = (uint16_t) m_seg->data_len;
-                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+                       buf_dma_addr = rte_mbuf_data_iova(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
                        txd->read.cmd_type_len =
@@ -925,7 +925,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
@@ -1119,7 +1119,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 */
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
-               dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+               dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->read.pkt_addr = dma;
                rxdp->read.hdr_addr = 0;
 
@@ -2203,7 +2203,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
                        return -ENOMEM;
                }
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
                rxd = &rxq->rx_ring[i];
                rxd->read.hdr_addr = 0;
                rxd->read.pkt_addr = dma_addr;
index 943e1c1..8b4f612 100644 (file)
@@ -589,7 +589,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
                mb->nb_segs = 1;
                mb->port = rxq->port_id;
                dma_addr = rte_cpu_to_le_64(\
-                       rte_mbuf_data_dma_addr_default(mb));
+                       rte_mbuf_data_iova_default(mb));
                rxdp[i].read.hdr_addr = 0;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -752,7 +752,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
@@ -869,7 +869,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
 
                /* Set data buffer address and data length of the mbuf */
                rxdp->read.hdr_addr = 0;
@@ -1202,7 +1202,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                        /* Setup TX Descriptor */
                        slen = m_seg->data_len;
-                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+                       buf_dma_addr = rte_mbuf_data_iova(m_seg);
 
                        PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
                                "buf_dma_addr: %#"PRIx64";\n"
@@ -1301,7 +1301,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
        uint32_t i;
 
        for (i = 0; i < 4; i++, txdp++, pkts++) {
-               dma_addr = rte_mbuf_data_dma_addr(*pkts);
+               dma_addr = rte_mbuf_data_iova(*pkts);
                txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
                txdp->cmd_type_offset_bsz =
                        i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1315,7 +1315,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
 {
        uint64_t dma_addr;
 
-       dma_addr = rte_mbuf_data_dma_addr(*pkts);
+       dma_addr = rte_mbuf_data_iova(*pkts);
        txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
        txdp->cmd_type_offset_bsz =
                i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -2451,7 +2451,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
                mbuf->port = rxq->port_id;
 
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
 
                rxd = &rxq->rx_ring[i];
                rxd->read.pkt_addr = dma_addr;
index daaf02d..012d9ee 100644 (file)
@@ -185,7 +185,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
        int i;
 
        for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
-               buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+               buf_dma_addr = rte_mbuf_data_iova(*pkts);
                pkt_len = (*pkts)->data_len;
 
                /* write data to descriptor */
@@ -208,7 +208,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
        uint64_t buf_dma_addr;
        uint32_t pkt_len;
 
-       buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+       buf_dma_addr = rte_mbuf_data_iova(*pkts);
        pkt_len = (*pkts)->data_len;
 
        /* write data to descriptor */
@@ -924,7 +924,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         * Set up Transmit Data Descriptor.
                         */
                        slen = m_seg->data_len;
-                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+                       buf_dma_addr = rte_mbuf_data_iova(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
                        txd->read.cmd_type_len =
@@ -1633,7 +1633,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
                mb->data_off = RTE_PKTMBUF_HEADROOM;
 
                /* populate the descriptors */
-               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
+               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
                rxdp[i].read.hdr_addr = 0;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -1865,7 +1865,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
@@ -2159,7 +2159,7 @@ next_desc:
 
                if (!bulk_alloc) {
                        __le64 dma =
-                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                        /*
                         * Update RX descriptor with the physical address of the
                         * new data buffer of the new allocated mbuf.
@@ -4188,7 +4188,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
                mbuf->port = rxq->port_id;
 
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
                rxd = &rxq->rx_ring[i];
                rxd->read.hdr_addr = 0;
                rxd->read.pkt_addr = dma_addr;
index 71099e1..efad4e7 100644 (file)
@@ -1298,7 +1298,7 @@ lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
        sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
        memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
        sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
-       sc->dma_addr = rte_mbuf_data_dma_addr(m);
+       sc->dma_addr = rte_mbuf_data_iova(m);
        sc->mbuf = m;
 
        dma_addr = sc->dma_addr;
@@ -1739,7 +1739,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
                        cmdsetup.s.u.datasize = pkt_len;
                        lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
                                            &cmdsetup, tag);
-                       ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
+                       ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
                        ndata.reqtype = LIO_REQTYPE_NORESP_NET;
                } else {
                        struct lio_buf_free_info *finfo;
@@ -1771,7 +1771,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
                                            &cmdsetup, tag);
 
                        memset(g->sg, 0, g->sg_size);
-                       g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
+                       g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
                        lio_add_sg_size(&g->sg[0], m->data_len, 0);
                        pkt_len = m->data_len;
                        finfo->mbuf = m;
@@ -1782,7 +1782,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
                        m = m->next;
                        while (frags--) {
                                g->sg[(i >> 2)].ptr[(i & 3)] =
-                                               rte_mbuf_data_dma_addr(m);
+                                               rte_mbuf_data_iova(m);
                                lio_add_sg_size(&g->sg[(i >> 2)],
                                                m->data_len, (i & 3));
                                pkt_len += m->data_len;
index 85685dc..47d84fb 100644 (file)
@@ -688,7 +688,7 @@ lio_map_ring(void *buf)
 {
        phys_addr_t dma_addr;
 
-       dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
+       dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));
 
        return (uint64_t)dma_addr;
 }
index 03d9fec..a897ba0 100644 (file)
@@ -1153,7 +1153,7 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
                }
 
                entries[i].buff.addr =
-                       rte_mbuf_data_dma_addr_default(mbufs[i]);
+                       rte_mbuf_data_iova_default(mbufs[i]);
                entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
                entries[i].bpool = bpool;
        }
@@ -1598,7 +1598,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
                if (unlikely(status != PP2_DESC_ERR_OK)) {
                        struct pp2_buff_inf binf = {
-                               .addr = rte_mbuf_data_dma_addr_default(mbuf),
+                               .addr = rte_mbuf_data_iova_default(mbuf),
                                .cookie = (pp2_cookie_t)(uint64_t)mbuf,
                        };
 
@@ -1854,7 +1854,7 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
                sq->ent[sq->head].buff.addr =
-                       rte_mbuf_data_dma_addr_default(mbuf);
+                       rte_mbuf_data_iova_default(mbuf);
                sq->ent[sq->head].bpool =
                        (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
                         NULL : mrvl_port_to_bpool_lookup[mbuf->port];
@@ -1863,7 +1863,7 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                pp2_ppio_outq_desc_reset(&descs[i]);
                pp2_ppio_outq_desc_set_phys_addr(&descs[i],
-                                                rte_pktmbuf_mtophys(mbuf));
+                                                rte_pktmbuf_iova(mbuf));
                pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
                pp2_ppio_outq_desc_set_pkt_len(&descs[i],
                                               rte_pktmbuf_pkt_len(mbuf));
index e141627..e9001f4 100644 (file)
@@ -2266,7 +2266,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        *lmbuf = pkt;
 
                        dma_size = pkt->data_len;
-                       dma_addr = rte_mbuf_data_dma_addr(pkt);
+                       dma_addr = rte_mbuf_data_iova(pkt);
                        PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
                                   "%" PRIx64 "\n", dma_addr);
 
index 2b58423..c97d5b3 100644 (file)
@@ -70,7 +70,7 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
                cmd_buf[0] |= (1ULL << 58); /* SET DF */
 
        /* Setup PKO_SEND_GATHER_S */
-       cmd_buf[(1 << 1) | 1] = rte_mbuf_data_dma_addr(tx_pkt);
+       cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
        cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
                                PKO_SEND_GATHER_LDTYPE(0x1ull) |
                                PKO_SEND_GATHER_GAUAR((long)gaura_id) |
index 49de13b..8e8536f 100644 (file)
@@ -28,7 +28,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
        }
        rxq->sw_rx_ring[idx].mbuf = new_mb;
        rxq->sw_rx_ring[idx].page_offset = 0;
-       mapping = rte_mbuf_data_dma_addr_default(new_mb);
+       mapping = rte_mbuf_data_iova_default(new_mb);
        /* Advance PROD and get BD pointer */
        rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
        rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
@@ -1064,7 +1064,7 @@ qede_reuse_page(__rte_unused struct qede_dev *qdev,
        curr_prod = &rxq->sw_rx_ring[idx];
        *curr_prod = *curr_cons;
 
-       new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+       new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
                      curr_prod->page_offset;
 
        rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
@@ -1565,7 +1565,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                                memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
                                nb_segs++;
                        }
-                       mapping = rte_mbuf_data_dma_addr(m_seg);
+                       mapping = rte_mbuf_data_iova(m_seg);
                        QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
                        PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
                } else if (nb_segs == 1) {
@@ -1575,7 +1575,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                                memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
                                nb_segs++;
                        }
-                       mapping = rte_mbuf_data_dma_addr(m_seg);
+                       mapping = rte_mbuf_data_iova(m_seg);
                        QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
                        PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
                } else {
@@ -1583,7 +1583,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                                ecore_chain_produce(&txq->tx_pbl);
                        memset(tx_bd, 0, sizeof(*tx_bd));
                        nb_segs++;
-                       mapping = rte_mbuf_data_dma_addr(m_seg);
+                       mapping = rte_mbuf_data_iova(m_seg);
                        QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
                        PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
                }
@@ -1966,7 +1966,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                nbds++;
 
                /* Map MBUF linear data for DMA and set in the BD1 */
-               QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+               QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
                                     mbuf->data_len);
                bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
                bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
@@ -1979,11 +1979,11 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        nbds++;
 
                        /* BD1 */
-                       QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+                       QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
                                             hdr_size);
                        /* BD2 */
                        QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
-                                            rte_mbuf_data_dma_addr(mbuf)),
+                                            rte_mbuf_data_iova(mbuf)),
                                             mbuf->data_len - hdr_size);
                        bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
                        if (mplsoudp_flg) {
index 500d652..23e45b4 100644 (file)
@@ -189,7 +189,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
                         * structure members.
                         */
 
-                       phys_addr = rte_mbuf_data_dma_addr_default(m);
+                       phys_addr = rte_mbuf_data_iova_default(m);
                        EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
                            ESF_DZ_RX_KER_BYTE_CNT, buf_size,
                            ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
index 167c91d..2e246f4 100644 (file)
@@ -341,7 +341,7 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                pkt_len = m_seg->pkt_len;
                do {
-                       phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+                       phys_addr_t seg_addr = rte_mbuf_data_iova(m_seg);
                        unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
                        unsigned int id = added & ptr_mask;
 
@@ -464,7 +464,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
                           SFC_EF10_TX_DMA_DESC_LEN_MAX);
 
-               sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+               sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
                                             rte_pktmbuf_data_len(pkt),
                                             true, &txq->txq_hw_ring[id]);
 
index 79ed046..2ae095b 100644 (file)
@@ -128,7 +128,7 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
                        SFC_ASSERT(m->nb_segs == 1);
                        m->port = port_id;
 
-                       addr[i] = rte_pktmbuf_mtophys(m);
+                       addr[i] = rte_pktmbuf_iova(m);
                }
 
                efx_rx_qpost(rxq->common, addr, rxq->buf_size,
index ad10067..2e7b595 100644 (file)
@@ -141,7 +141,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
        if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
                return EMSGSIZE;
 
-       header_paddr = rte_pktmbuf_mtophys(m);
+       header_paddr = rte_pktmbuf_iova(m);
 
        /*
         * Sometimes headers may be split across multiple mbufs. In such cases
index 4ea7bd7..127d59e 100644 (file)
@@ -765,7 +765,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        size_t                  seg_len;
 
                        seg_len = m_seg->data_len;
-                       next_frag = rte_mbuf_data_dma_addr(m_seg);
+                       next_frag = rte_mbuf_data_iova(m_seg);
 
                        /*
                         * If we've started TSO transaction few steps earlier,
index cd1b754..a3ccce2 100644 (file)
@@ -60,7 +60,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
        sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
        sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
        sqe.gather.size = pkt->data_len;
-       sqe.gather.addr = rte_mbuf_data_dma_addr(pkt);
+       sqe.gather.addr = rte_mbuf_data_iova(pkt);
 
        entry->buff[0] = sqe.buff[0];
        entry->buff[1] = sqe.buff[1];
@@ -80,7 +80,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
        entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
                         (uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
                         pkt->data_len;
-       entry->buff[1] = rte_mbuf_data_dma_addr(pkt);
+       entry->buff[1] = rte_mbuf_data_iova(pkt);
 }
 #endif
 
index aac23d8..437dcb1 100644 (file)
@@ -509,7 +509,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                        rte_cpu_to_le_64(txq->data_ring.basePA +
                                                         offset);
                        } else {
-                               gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+                               gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
                        }
 
                        gdesc->dword[2] = dw2 | m_seg->data_len;
@@ -617,7 +617,7 @@ vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
         */
        buf_info->m = mbuf;
        buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
-       buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+       buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
 
        /* Load Rx Descriptor with the buffer's GPA */
        rxd->addr = buf_info->bufPA;
index f7afe13..c3efe52 100644 (file)
@@ -106,12 +106,12 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
                aad = get_aad(m);
                memcpy(aad, iv - sizeof(struct esp_hdr), 8);
                sym_cop->aead.aad.data = aad;
-               sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
                                aad - rte_pktmbuf_mtod(m, uint8_t *));
 
                sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
                                rte_pktmbuf_pkt_len(m) - sa->digest_len);
-               sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
                                rte_pktmbuf_pkt_len(m) - sa->digest_len);
        } else {
                sym_cop->cipher.data.offset =  ip_hdr_len + sizeof(struct esp_hdr) +
@@ -157,7 +157,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 
                sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
                                rte_pktmbuf_pkt_len(m) - sa->digest_len);
-               sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
                                rte_pktmbuf_pkt_len(m) - sa->digest_len);
        }
 
@@ -405,12 +405,12 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
                aad = get_aad(m);
                memcpy(aad, esp, 8);
                sym_cop->aead.aad.data = aad;
-               sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
                                aad - rte_pktmbuf_mtod(m, uint8_t *));
 
                sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
                        rte_pktmbuf_pkt_len(m) - sa->digest_len);
-               sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
                        rte_pktmbuf_pkt_len(m) - sa->digest_len);
        } else {
                switch (sa->cipher_algo) {
@@ -458,7 +458,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 
                sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
                                rte_pktmbuf_pkt_len(m) - sa->digest_len);
-               sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
                                rte_pktmbuf_pkt_len(m) - sa->digest_len);
        }
 
index 06f90ab..b973679 100644 (file)
@@ -497,7 +497,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
                                uint8_t *) + ipdata_offset + data_len;
                }
 
-               op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
                                rte_pktmbuf_pkt_len(m) - cparams->digest_length);
 
                /* For wireless algorithms, offset/length must be in bits */
@@ -558,7 +558,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
                                uint8_t *) + ipdata_offset + data_len;
                }
 
-               op->sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+               op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
                                rte_pktmbuf_pkt_len(m) - cparams->digest_length);
 
                if (cparams->aad.length) {
index 7a4634f..6d91f7d 100644 (file)
@@ -625,21 +625,28 @@ rte_mbuf_prefetch_part2(struct rte_mbuf *m)
 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
 
 /**
- * Return the DMA address of the beginning of the mbuf data
+ * Return the IO address of the beginning of the mbuf data
  *
  * @param mb
  *   The pointer to the mbuf.
  * @return
- *   The physical address of the beginning of the mbuf data
+ *   The IO address of the beginning of the mbuf data
  */
+static inline rte_iova_t
+rte_mbuf_data_iova(const struct rte_mbuf *mb)
+{
+       return mb->buf_iova + mb->data_off;
+}
+
+__rte_deprecated
 static inline phys_addr_t
 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
 {
-       return mb->buf_iova + mb->data_off;
+       return rte_mbuf_data_iova(mb);
 }
 
 /**
- * Return the default DMA address of the beginning of the mbuf data
+ * Return the default IO address of the beginning of the mbuf data
  *
  * This function is used by drivers in their receive function, as it
  * returns the location where data should be written by the NIC, taking
@@ -648,12 +655,19 @@ rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
  * @param mb
  *   The pointer to the mbuf.
  * @return
- *   The physical address of the beginning of the mbuf data
+ *   The IO address of the beginning of the mbuf data
  */
+static inline rte_iova_t
+rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
+{
+       return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+}
+
+__rte_deprecated
 static inline phys_addr_t
 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
 {
-       return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+       return rte_mbuf_data_iova_default(mb);
 }
 
 /**
@@ -1564,7 +1578,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
 
 /**
- * A macro that returns the physical address that points to an offset of the
+ * A macro that returns the IO address that points to an offset of the
  * start of the data in the mbuf
  *
  * @param m
@@ -1572,17 +1586,24 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
  * @param o
  *   The offset into the data to calculate address from.
  */
-#define rte_pktmbuf_mtophys_offset(m, o) \
+#define rte_pktmbuf_iova_offset(m, o) \
        (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
 
+/* deprecated */
+#define rte_pktmbuf_mtophys_offset(m, o) \
+       rte_pktmbuf_iova_offset(m, o)
+
 /**
- * A macro that returns the physical address that points to the start of the
+ * A macro that returns the IO address that points to the start of the
  * data in the mbuf
  *
  * @param m
  *   The packet mbuf.
  */
-#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
+#define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
+
+/* deprecated */
+#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
 
 /**
  * A macro that returns the length of the packet.
index 72988c5..060b498 100644 (file)
@@ -1332,7 +1332,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
 
        /* Set crypto operation authentication parameters */
        sym_op->auth.digest.data = ut_params->digest;
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, QUOTE_512_BYTES);
 
        sym_op->auth.data.offset = 0;
@@ -1484,7 +1484,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
        sym_op->m_src = ut_params->ibuf;
 
        sym_op->auth.digest.data = ut_params->digest;
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, QUOTE_512_BYTES);
 
        sym_op->auth.data.offset = 0;
@@ -2385,7 +2385,7 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                                "no room to append auth tag");
        ut_params->digest = sym_op->auth.digest.data;
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, data_pad_len);
        if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
                memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2441,7 +2441,7 @@ create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append auth tag");
        ut_params->digest = sym_op->auth.digest.data;
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, data_pad_len);
        if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
                memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2508,7 +2508,7 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append auth tag");
        ut_params->digest = sym_op->auth.digest.data;
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, data_pad_len);
        if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
                memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2566,7 +2566,7 @@ create_wireless_algo_auth_cipher_operation(unsigned int auth_tag_len,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append auth tag");
 
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, data_pad_len);
 
        memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -5153,7 +5153,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
                                "no room to append aad");
 
                sym_op->aead.aad.phys_addr =
-                               rte_pktmbuf_mtophys(ut_params->ibuf);
+                               rte_pktmbuf_iova(ut_params->ibuf);
                /* Copy AAD 18 bytes after the AAD pointer, according to the API */
                memcpy(sym_op->aead.aad.data + 18, tdata->aad.data, tdata->aad.len);
                TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
@@ -5175,7 +5175,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
                                "no room to append aad");
 
                sym_op->aead.aad.phys_addr =
-                               rte_pktmbuf_mtophys(ut_params->ibuf);
+                               rte_pktmbuf_iova(ut_params->ibuf);
                memcpy(sym_op->aead.aad.data, tdata->aad.data, tdata->aad.len);
                TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
                        tdata->aad.len);
@@ -5243,7 +5243,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
                TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
                                "no room to append digest");
                memset(sym_op->aead.digest.data, 0, tdata->auth_tag.len);
-               sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+               sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
                                ut_params->obuf ? ut_params->obuf :
                                                ut_params->ibuf,
                                                plaintext_pad_len +
@@ -5253,7 +5253,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
                                ut_params->ibuf, tdata->auth_tag.len);
                TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
                                "no room to append digest");
-               sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+               sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
                                ut_params->ibuf,
                                plaintext_pad_len + aad_pad_len);
 
@@ -6226,7 +6226,7 @@ static int MD5_HMAC_create_op(struct crypto_unittest_params *ut_params,
                        ut_params->ibuf, MD5_DIGEST_LEN);
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append digest");
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, plaintext_pad_len);
 
        if (ut_params->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
@@ -6962,7 +6962,7 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append digest");
 
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, plaintext_pad_len);
 
        if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
@@ -7484,7 +7484,7 @@ create_auth_operation(struct crypto_testsuite_params *ts_params,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append auth tag");
 
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, reference->plaintext.len);
 
        if (auth_generate)
@@ -7531,7 +7531,7 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append auth tag");
 
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, reference->ciphertext.len);
 
        if (auth_generate)
@@ -7584,7 +7584,7 @@ create_cipher_auth_operation(struct crypto_testsuite_params *ts_params,
        TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
                        "no room to append auth tag");
 
-       sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+       sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
                        ut_params->ibuf, reference->ciphertext.len);
 
        if (auth_generate)
@@ -7863,7 +7863,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op,
                                ut_params->ibuf, aad_len);
                TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
                                "no room to prepend aad");
-               sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
+               sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
                                ut_params->ibuf);
 
                memset(sym_op->aead.aad.data, 0, aad_len);
@@ -7883,7 +7883,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op,
                                ut_params->ibuf, aad_len);
                TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
                                "no room to prepend aad");
-               sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
+               sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
                                ut_params->ibuf);
 
                memset(sym_op->aead.aad.data, 0, aad_len);
@@ -8030,7 +8030,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
                        digest_mem = rte_pktmbuf_append(ut_params->obuf,
                                tdata->auth_tag.len);
 
-                       digest_phys = rte_pktmbuf_mtophys_offset(
+                       digest_phys = rte_pktmbuf_iova_offset(
                                        ut_params->obuf,
                                        tdata->plaintext.len + prepend_len);
                }
@@ -8068,14 +8068,14 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
         * Place digest at the end of the last buffer
         */
        if (!digest_phys)
-               digest_phys = rte_pktmbuf_mtophys(buf) + to_trn;
+               digest_phys = rte_pktmbuf_iova(buf) + to_trn;
        if (oop && buf_last_oop)
-               digest_phys = rte_pktmbuf_mtophys(buf_last_oop) + to_trn;
+               digest_phys = rte_pktmbuf_iova(buf_last_oop) + to_trn;
 
        if (!digest_mem && !oop) {
                digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
                                + tdata->auth_tag.len);
-               digest_phys = rte_pktmbuf_mtophys_offset(ut_params->ibuf,
+               digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
                                tdata->plaintext.len);
        }
 
index 2e9eb0b..26bfbe6 100644 (file)
@@ -153,18 +153,18 @@ pktmbuf_mtod_offset(struct rte_mbuf *mbuf, int offset) {
        return rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
 }
 
-static inline phys_addr_t
-pktmbuf_mtophys_offset(struct rte_mbuf *mbuf, int offset) {
+static inline rte_iova_t
+pktmbuf_iova_offset(struct rte_mbuf *mbuf, int offset) {
        struct rte_mbuf *m;
 
        for (m = mbuf; (m != NULL) && (offset > m->data_len); m = m->next)
                offset -= m->data_len;
 
        if (m == NULL) {
-               printf("pktmbuf_mtophys_offset: offset out of buffer\n");
+               printf("pktmbuf_iova_offset: offset out of buffer\n");
                return 0;
        }
-       return rte_pktmbuf_mtophys_offset(m, offset);
+       return rte_pktmbuf_iova_offset(m, offset);
 }
 
 static inline struct rte_mbuf *
index 9a9fd8b..20f3296 100644 (file)
@@ -329,14 +329,14 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
                        sym_op->auth.digest.data = pktmbuf_mtod_offset
                                (iobuf, digest_offset);
                        sym_op->auth.digest.phys_addr =
-                               pktmbuf_mtophys_offset(iobuf,
+                               pktmbuf_iova_offset(iobuf,
                                        digest_offset);
                } else {
                        auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
                        sym_op->auth.digest.data = pktmbuf_mtod_offset
                                (sym_op->m_src, digest_offset);
                        sym_op->auth.digest.phys_addr =
-                               pktmbuf_mtophys_offset(sym_op->m_src,
+                               pktmbuf_iova_offset(sym_op->m_src,
                                        digest_offset);
                }