The flag name ``MEMPOOL_F_NO_PHYS_CONTIG`` is removed,
while the aliased flag ``MEMPOOL_F_NO_IOVA_CONTIG`` is kept.
+* mbuf: Removed the functions ``rte_mbuf_data_dma_addr*``
+ and the macros ``rte_pktmbuf_mtophys*``.
+ The same functionality is still available with the functions and macros
+ having ``iova`` in their names instead of ``dma_addr`` or ``mtophys``.
+
* mbuf: Removed the unioned field ``refcnt_atomic`` from
the structures ``rte_mbuf`` and ``rte_mbuf_ext_shared_info``.
The field ``refcnt`` is remaining from the old unions.
desc->num_null = op->ldpc_enc.n_filler;
/* Set inbound data buffer address */
desc->in_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
+ rte_pktmbuf_iova_offset(input, in_offset) >> 32);
desc->in_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset));
+ rte_pktmbuf_iova_offset(input, in_offset));
desc->out_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
+ rte_pktmbuf_iova_offset(output, out_offset) >> 32);
desc->out_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset));
+ rte_pktmbuf_iova_offset(output, out_offset));
/* Save software context needed for dequeue */
desc->op_addr = op;
/* Set total number of CBs in an op */
desc->error = 0;
/* Set inbound data buffer address */
desc->in_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
+ rte_pktmbuf_iova_offset(input, in_offset) >> 32);
desc->in_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset));
+ rte_pktmbuf_iova_offset(input, in_offset));
desc->rm_e = op->ldpc_dec.cb_params.e;
desc->harq_input_length = harq_in_length;
desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
desc->max_iter = op->ldpc_dec.iter_max;
desc->qm_idx = op->ldpc_dec.q_m / 2;
desc->out_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
+ rte_pktmbuf_iova_offset(output, out_offset) >> 32);
desc->out_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset));
+ rte_pktmbuf_iova_offset(output, out_offset));
/* Save software context needed for dequeue */
desc->op_addr = op;
/* Set total number of CBs in an op */
desc->offset = desc_offset;
/* Set inbound data buffer address */
desc->in_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
+ rte_pktmbuf_iova_offset(input, in_offset) >> 32);
desc->in_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset));
+ rte_pktmbuf_iova_offset(input, in_offset));
desc->out_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
+ rte_pktmbuf_iova_offset(output, out_offset) >> 32);
desc->out_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset));
+ rte_pktmbuf_iova_offset(output, out_offset));
/* Save software context needed for dequeue */
desc->op_addr = op;
desc->done = 0;
/* Set inbound data buffer address */
desc->in_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
+ rte_pktmbuf_iova_offset(input, in_offset) >> 32);
desc->in_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(input, in_offset));
+ rte_pktmbuf_iova_offset(input, in_offset));
desc->in_len = in_length;
desc->k = k;
desc->crc_type = !check_bit(op->turbo_dec.op_flags,
desc->max_iter = op->turbo_dec.iter_max * 2;
desc->offset = desc_offset;
desc->out_addr_hi = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
+ rte_pktmbuf_iova_offset(output, out_offset) >> 32);
desc->out_addr_lw = (uint32_t)(
- rte_pktmbuf_mtophys_offset(output, out_offset));
+ rte_pktmbuf_iova_offset(output, out_offset));
/* Save software context needed for dequeue */
desc->op_addr = op;
if (!start_offset) {
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
} else {
while (start_offset >= pkt->data_len) {
}
seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
- seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
+ seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
seg_size = pkt->data_len - start_offset;
if (!seg_size)
return 1;
while (unlikely(pkt != NULL)) {
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
if (!seg_size)
break;
iov_ptr_t *iovec;
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
/* first seg */
while (unlikely(pkt != NULL)) {
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
if (!seg_size)
params.mac_buf.vaddr =
rte_pktmbuf_mtod_offset(m_dst, void *, off);
params.mac_buf.dma_addr =
- rte_pktmbuf_mtophys_offset(m_dst, off);
+ rte_pktmbuf_iova_offset(m_dst, off);
params.mac_buf.size = mac_len;
}
} else {
comp_req->comp_pars.out_buffer_sz;
comp_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
+ rte_pktmbuf_iova_offset(op->m_src, op->src.offset);
comp_req->comn_mid.dest_data_addr =
- rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
+ rte_pktmbuf_iova_offset(op->m_dst, op->dst.offset);
}
if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
ccp_cryptodev_driver_id);
addr = session->auth.pre_compute;
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->auth.data.offset);
append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
op->sym->session,
ccp_cryptodev_driver_id);
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->auth.data.offset);
append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
op->sym->session,
ccp_cryptodev_driver_id);
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->auth.data.offset);
append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
op->sym->session,
ccp_cryptodev_driver_id);
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->auth.data.offset);
append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
ccp_cryptodev_driver_id);
key_addr = rte_mem_virt2phy(session->auth.key_ccp);
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->auth.data.offset);
append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
desc = &cmd_q->qbase_desc[cmd_q->qidx];
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->cipher.data.offset);
if (likely(op->sym->m_dst != NULL))
- dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst,
op->sym->cipher.data.offset);
else
dest_addr = src_addr;
return -ENOTSUP;
}
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->cipher.data.offset);
if (unlikely(op->sym->m_dst != NULL))
dest_addr =
- rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ rte_pktmbuf_iova_offset(op->sym->m_dst,
op->sym->cipher.data.offset);
else
dest_addr = src_addr;
iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
key_addr = session->cipher.key_phys;
- src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
op->sym->aead.data.offset);
if (unlikely(op->sym->m_dst != NULL))
- dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst,
op->sym->aead.data.offset);
else
dest_addr = src_addr;
sg++;
}
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->offset = data_offset;
if (data_len <= (mbuf->data_len - data_offset)) {
(mbuf = mbuf->next)) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
if (data_len > mbuf->data_len)
sg->length = mbuf->data_len;
else
sg++;
}
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->offset = data_offset;
sg->length = data_len;
/* 1st seg */
sg = &cf->sg[2];
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len - data_offset;
sg->offset = data_offset;
while (mbuf) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
mbuf = mbuf->next;
}
/* 1st seg */
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len - data_offset;
sg->offset = data_offset;
while (mbuf) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
mbuf = mbuf->next;
}
cpu_to_hw_sg(out_sg);
/* 1st seg */
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len - sym->aead.data.offset;
sg->offset = sym->aead.data.offset;
while (mbuf) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
mbuf = mbuf->next;
}
/* 3rd seg */
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len - sym->aead.data.offset;
sg->offset = sym->aead.data.offset;
while (mbuf) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
mbuf = mbuf->next;
}
cpu_to_hw_sg(out_sg);
/* 1st seg */
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len - sym->auth.data.offset;
sg->offset = sym->auth.data.offset;
while (mbuf) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
mbuf = mbuf->next;
}
/* 2nd seg */
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len - sym->auth.data.offset;
sg->offset = sym->auth.data.offset;
while (mbuf) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
mbuf = mbuf->next;
}
cf = &ctx->job;
ctx->op = op;
- src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
if (sym->m_dst)
- dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
else
dst_start_addr = src_start_addr;
/* 1st seg */
sg = &cf->sg[2];
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->offset = 0;
/* Successive segs */
mbuf = mbuf->next;
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->offset = 0;
}
sg->length = mbuf->buf_len - mbuf->data_off;
qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
/* 1st seg */
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
sg->offset = 0;
while (mbuf) {
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
sg->length = mbuf->data_len;
sg->offset = 0;
in_len += sg->length;
op->sym->auth.data.length + digest->len))
return -EINVAL;
- digest->iova = rte_pktmbuf_mtophys_offset(mdst,
+ digest->iova = rte_pktmbuf_iova_offset(mdst,
op->sym->auth.data.offset +
op->sym->auth.data.length);
digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
if (datalen <= mlen)
mlen = datalen;
sglist[cnt].len = mlen;
- sglist[cnt].iova = rte_pktmbuf_mtophys_offset(m, off);
+ sglist[cnt].iova = rte_pktmbuf_iova_offset(m, off);
sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
sgtbl->total_bytes += mlen;
cnt++;
mlen = rte_pktmbuf_data_len(m) < datalen ?
rte_pktmbuf_data_len(m) : datalen;
sglist[cnt].len = mlen;
- sglist[cnt].iova = rte_pktmbuf_mtophys(m);
+ sglist[cnt].iova = rte_pktmbuf_iova(m);
sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
sgtbl->total_bytes += mlen;
cnt++;
}
/* indirect vring: src data */
- desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
desc[idx].len = (sym_op->cipher.data.offset
+ sym_op->cipher.data.length);
desc[idx++].flags = VRING_DESC_F_NEXT;
/* indirect vring: dst data */
if (sym_op->m_dst) {
- desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+ desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_dst, 0);
desc[idx].len = (sym_op->cipher.data.offset
+ sym_op->cipher.data.length);
} else {
- desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
desc[idx].len = (sym_op->cipher.data.offset
+ sym_op->cipher.data.length);
}
axgbe_rx_queue_release(rxq);
return -ENOMEM;
}
- rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
+ rxq->ring_phys_addr = (uint64_t)dma->iova;
rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
memset((void *)rxq->desc, 0, size);
/* Allocate software ring */
return -ENOMEM;
}
memset(tz->addr, 0, tsize);
- txq->ring_phys_addr = (uint64_t)tz->phys_addr;
+ txq->ring_phys_addr = (uint64_t)tz->iova;
txq->desc = tz->addr;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
*/
rte_pktmbuf_append(m, extend_tail);
data = rte_pktmbuf_prepend(m, extend_head);
- data_addr = rte_pktmbuf_mtophys(m);
+ data_addr = rte_pktmbuf_iova(m);
/*
* Move the Ethernet header, to insert otx2_ipsec_fp_out_hdr prior
sym->auth.data.length = vec.pt.len;
sym->auth.digest.data = pt + vec.pt.len;
- sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
env.mbuf, vec.pt.len);
memcpy(pt, vec.pt.val, vec.pt.len);
memcpy(pt, vec.pt.val, vec.pt.len);
sym->aead.data.length = vec.pt.len;
sym->aead.digest.data = pt + vec.pt.len;
- sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
env.mbuf, vec.pt.len);
} else {
uint8_t *ct;
return mb->buf_iova + mb->data_off;
}
-__rte_deprecated
-static inline phys_addr_t
-rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
-{
- return rte_mbuf_data_iova(mb);
-}
-
/**
* Return the default IO address of the beginning of the mbuf data
*
return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
}
-__rte_deprecated
-static inline phys_addr_t
-rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
-{
- return rte_mbuf_data_iova_default(mb);
-}
-
/**
* Return the mbuf owning the data buffer address of an indirect mbuf.
*
return m;
}
-/* deprecated */
-#define rte_pktmbuf_mtophys_offset(m, o) \
- rte_pktmbuf_iova_offset(m, o)
-
-/* deprecated */
-#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
-
/**
* A macro that returns the length of the packet.
*