Rename buf_physaddr to buf_iova.
Keep the deprecated name in an anonymous union to avoid breaking
the API.
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Reviewed-by: Anatoly Burakov <anatoly.burakov@intel.com>
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
/* start of buffer is after mbuf structure and priv data */
m->priv_size = 0;
m->buf_addr = (char *)m + mbuf_hdr_size;
- m->buf_physaddr = rte_mempool_virt2iova(obj) +
+ m->buf_iova = rte_mempool_virt2iova(obj) +
mbuf_offset + mbuf_hdr_size;
m->buf_len = segment_sz;
m->data_len = segment_sz;
/* start of buffer is after mbuf structure and priv data */
m->priv_size = 0;
m->buf_addr = (char *)m + mbuf_hdr_size;
- m->buf_physaddr = next_seg_phys_addr;
+ m->buf_iova = next_seg_phys_addr;
next_seg_phys_addr += mbuf_hdr_size + segment_sz;
m->buf_len = segment_sz;
m->data_len = segment_sz;
sodipodi:role="line"
x="187.85715"
y="347.7193"
- id="tspan5240">(m->buf_physaddr is the</tspan><tspan
+ id="tspan5240">(m->buf_iova is the</tspan><tspan
sodipodi:role="line"
x="187.85715"
y="360.2193"
* These routines are called with help of below MACRO's
*/
-#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
/**
DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
/* save physical address of mbuf */
- op->sym->aead.digest.phys_addr = mbuf->buf_physaddr;
- mbuf->buf_physaddr = (uint64_t)op;
+ op->sym->aead.digest.phys_addr = mbuf->buf_iova;
+ mbuf->buf_iova = (uint64_t)op;
return 0;
}
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
- op = (struct rte_crypto_op *)mbuf->buf_physaddr;
- mbuf->buf_physaddr = op->sym->aead.digest.phys_addr;
+ op = (struct rte_crypto_op *)mbuf->buf_iova;
+ mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
- src_start_addr = sym->m_src->buf_physaddr + sym->m_src->data_off;
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
if (sym->m_dst)
- dst_start_addr = sym->m_dst->buf_physaddr + sym->m_dst->data_off;
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
else
dst_start_addr = src_start_addr;
case 0:
while (count != nb) {
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
case 3:
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
case 2:
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
case 1:
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
return -ENOMEM;
}
rxq->sw_ring[idx] = mbuf;
- rxq->rx_ring[idx] = mbuf->buf_physaddr;
+ rxq->rx_ring[idx] = mbuf->buf_iova;
}
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
rx_mb = rxq->sw_ring[bd_cons];
rxq->sw_ring[bd_cons] = new_mb;
- rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
+ rxq->rx_ring[bd_prod] = new_mb->buf_iova;
rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
rte_prefetch0(rxq->sw_ring[rx_pref]);
#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask)
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
+ ((uint64_t)((mb)->buf_iova + (mb)->data_off))
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
struct rte_mbuf *m = mbuf;
for (; m; m = m->next, addr++) {
- *addr = m->buf_physaddr + rte_pktmbuf_headroom(m);
+ *addr = m->buf_iova + rte_pktmbuf_headroom(m);
if (*addr == 0)
goto out_err;
}
mbuf->nb_segs = 1;
mbuf->port = rxq->rspq.port_id;
- mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_physaddr +
+ mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
mbuf->data_off,
adap->sge.fl_align);
mapping |= buf_size_idx;
(_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
(_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
(_fd)->opaque |= (_mbuf)->pkt_len; \
- (_fd)->addr = (_mbuf)->buf_physaddr; \
+ (_fd)->addr = (_mbuf)->buf_iova; \
(_fd)->bpid = _bpid; \
} while (0)
sgt = temp->buf_addr + temp->data_off;
fd->format = QM_FD_SG;
- fd->addr = temp->buf_physaddr;
+ fd->addr = temp->buf_iova;
fd->offset = temp->data_off;
fd->bpid = bpid;
fd->length20 = mbuf->pkt_len;
sg_temp = &sgt[i++];
sg_temp->opaque = 0;
sg_temp->val = 0;
- sg_temp->addr = cur_seg->buf_physaddr;
+ sg_temp->addr = cur_seg->buf_iova;
sg_temp->offset = cur_seg->data_off;
sg_temp->length = cur_seg->data_len;
if (RTE_MBUF_DIRECT(cur_seg)) {
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
/* prepare physical address for DMA transaction */
- ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
* consideration pushed header
*/
if (mbuf->data_len > ena_tx_ctx.header_len) {
- ebuf->paddr = mbuf->buf_physaddr +
+ ebuf->paddr = mbuf->buf_iova +
mbuf->data_off +
ena_tx_ctx.header_len;
ebuf->len = mbuf->data_len - ena_tx_ctx.header_len;
}
while ((mbuf = mbuf->next) != NULL) {
- ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off;
+ ebuf->paddr = mbuf->buf_iova + mbuf->data_off;
ebuf->len = mbuf->data_len;
ebuf++;
tx_info->num_of_bufs++;
}
mb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(mb->buf_physaddr
+ dma_addr = (dma_addr_t)(mb->buf_iova
+ RTE_PKTMBUF_HEADROOM);
rq_enet_desc_enc(rqd, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
/* Push descriptor for newly allocated mbuf */
nmb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(nmb->buf_physaddr +
+ dma_addr = (dma_addr_t)(nmb->buf_iova +
RTE_PKTMBUF_HEADROOM);
rq_enet_desc_enc(rqd_ptr, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
vlan_id = 0;
vlan_tag_insert = 0;
bus_addr = (dma_addr_t)
- (tx_pkt->buf_physaddr + tx_pkt->data_off);
+ (tx_pkt->buf_iova + tx_pkt->data_off);
descs = (struct wq_enet_desc *)wq->ring.descs;
desc_p = descs + head_idx;
if (tx_pkt->next == NULL)
eop = 1;
desc_p = descs + head_idx;
- bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
+ bus_addr = (dma_addr_t)(tx_pkt->buf_iova
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
};
#define MBUF_DMA_ADDR(mb) \
- ((uint64_t) ((mb)->buf_physaddr + (mb)->data_off))
+ ((uint64_t) ((mb)->buf_iova + (mb)->data_off))
/* enforce 512B alignment on default Rx DMA addresses */
#define MBUF_DMA_ADDR_DEFAULT(mb) \
- ((uint64_t) RTE_ALIGN(((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM),\
+ ((uint64_t) RTE_ALIGN(((mb)->buf_iova + RTE_PKTMBUF_HEADROOM),\
FM10K_RX_DATABUF_ALIGN))
static inline void fifo_reset(struct fifo *fifo, uint32_t len)
p1 = (uintptr_t)&mb1->rearm_data;
*(uint64_t *)p1 = rxq->mbuf_initializer;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
p1 = (uintptr_t)&mb1->rearm_data;
*(uint64_t *)p1 = rxq->mbuf_initializer;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
vector unsigned long descriptor = (vector unsigned long){
- pkt->buf_physaddr + pkt->data_off, high_qw};
+ pkt->buf_iova + pkt->data_off, high_qw};
*(vector unsigned long *)txdp = descriptor;
}
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr0 = vdupq_n_u64(paddr);
/* flush desc with pa dma_addr */
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
- paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr1 = vdupq_n_u64(paddr);
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
}
((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
- uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw};
+ uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, high_qw};
vst1q_u64((uint64_t *)txdp, descriptor);
}
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
__m128i descriptor = _mm_set_epi64x(high_qw,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)txdp, descriptor);
}
* Data to be rearmed is 6 bytes long.
*/
vst1_u8((uint8_t *)&mb0->rearm_data, p);
- paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
/* flush desc with pa dma_addr */
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
vst1_u8((uint8_t *)&mb1->rearm_data, p);
- paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
}
struct rte_mbuf *pkt, uint64_t flags)
{
uint64x2_t descriptor = {
- pkt->buf_physaddr + pkt->data_off,
+ pkt->buf_iova + pkt->data_off,
(uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
vst1q_u64((uint64_t *)&txdp->read, descriptor);
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
{
__m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
flags | pkt->data_len,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)&txdp->read, descriptor);
}
#define NFP_QCP_MAX_ADD 0x7f
#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
- (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+ (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
enum nfp_qcp_ptr {
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
if (!hw->virtio_user_dev)
- vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
+ vq->offset = offsetof(struct rte_mbuf, buf_iova);
else {
vq->vq_ring_mem = (uintptr_t)mz->addr;
vq->offset = offsetof(struct rte_mbuf, buf_addr);
#define VIRTIO_MBUF_ADDR(mb, vq) \
((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
#else
-#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
+#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
#endif
/**
{
return (void *)((unsigned long)m -
((unsigned long)m->buf_addr -
- (unsigned long)m->buf_physaddr));
+ (unsigned long)m->buf_iova));
}
static void
/* start of buffer is after mbuf structure and priv data */
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
- m->buf_physaddr = rte_mempool_virt2iova(m) + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
m->buf_len = (uint16_t)buf_len;
/* keep some headroom between start of buffer and data */
/* generic checks */
if (m->pool == NULL)
rte_panic("bad mbuf pool\n");
- if (m->buf_physaddr == 0)
- rte_panic("bad phys addr\n");
+ if (m->buf_iova == 0)
+ rte_panic("bad IO addr\n");
if (m->buf_addr == NULL)
rte_panic("bad virt addr\n");
__rte_mbuf_sanity_check(m, 1);
- fprintf(f, "dump mbuf at %p, phys=%"PRIx64", buf_len=%u\n",
- m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
+ fprintf(f, "dump mbuf at %p, iova=%"PRIx64", buf_len=%u\n",
+ m, (uint64_t)m->buf_iova, (unsigned)m->buf_len);
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
"in_port=%u\n", m->pkt_len, m->ol_flags,
(unsigned)m->nb_segs, (unsigned)m->port);
* same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
* working on vector drivers easier.
*/
- phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t));
+ RTE_STD_C11
+ union {
+ rte_iova_t buf_iova;
+ rte_iova_t buf_physaddr; /**< deprecated */
+ } __rte_aligned(sizeof(rte_iova_t));
/* next 8 bytes are initialised on RX descriptor rearm */
MARKER64 rearm_data;
static inline phys_addr_t
rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
{
- return mb->buf_physaddr + mb->data_off;
+ return mb->buf_iova + mb->data_off;
}
/**
static inline phys_addr_t
rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
{
- return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
}
/**
* For standard needs, prefer rte_pktmbuf_alloc().
*
* The caller can expect that the following fields of the mbuf structure
- * are initialized: buf_addr, buf_physaddr, buf_len, refcnt=1, nb_segs=1,
+ * are initialized: buf_addr, buf_iova, buf_len, refcnt=1, nb_segs=1,
* next=NULL, pool, priv_size. The other fields must be initialized
* by the caller.
*
rte_mbuf_refcnt_update(md, 1);
mi->priv_size = m->priv_size;
- mi->buf_physaddr = m->buf_physaddr;
+ mi->buf_iova = m->buf_iova;
mi->buf_addr = m->buf_addr;
mi->buf_len = m->buf_len;
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
- m->buf_physaddr = rte_mempool_virt2iova(m) + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
m->buf_len = (uint16_t)buf_len;
rte_pktmbuf_reset_headroom(m);
m->data_len = 0;
* The offset into the data to calculate address from.
*/
#define rte_pktmbuf_mtophys_offset(m, o) \
- (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
+ (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
/**
* A macro that returns the physical address that points to the start of the
cur->data_len = cpy_len;
cur->data_off = 0;
cur->buf_addr = (void *)(uintptr_t)desc_addr;
- cur->buf_physaddr = hpa;
+ cur->buf_iova = hpa;
/*
* In zero copy mode, one mbuf can only reference data
}
badbuf = *buf;
- badbuf.buf_physaddr = 0;
+ badbuf.buf_iova = 0;
if (verify_mbuf_check_panics(&badbuf)) {
printf("Error with bad-physaddr mbuf test\n");
return -1;