Remove the deprecated buf_physaddr union field from rte_mbuf.
It is replaced with buf_iova which is at the same offset.
The single field buf_physaddr in rte_kni_mbuf is also renamed.
This concludes a 3-year process of semantic change.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
avoiding impact on vectorized implementation of the driver datapaths,
while evaluating performance gains of a better use of the first cache line.
- The deprecated unioned field ``buf_physaddr`` will be removed in DPDK 20.11.
-
* ethdev: Split the ``struct eth_dev_ops`` struct to hide it as much as possible
will be done in 20.11.
Currently the ``struct eth_dev_ops`` struct is accessible by the application
The same functionality is still available with the functions and macros
having ``iova`` in their names instead of ``dma_addr`` or ``mtophys``.
+* mbuf: Removed the unioned field ``buf_physaddr`` from ``rte_mbuf``.
+ The field ``buf_iova`` is remaining from the old union.
+
* mbuf: Removed the unioned field ``refcnt_atomic`` from
the structures ``rte_mbuf`` and ``rte_mbuf_ext_shared_info``.
The field ``refcnt`` is remaining from the old unions.
tailroom = rte_pktmbuf_tailroom(m_src);
if (likely(tailroom > len + 8)) {
mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
- mphys = m_src->buf_physaddr + m_src->buf_len;
+ mphys = m_src->buf_iova + m_src->buf_len;
mdata -= len;
mphys -= len;
buf->vaddr = mdata;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
mb2 = rxep[2].mbuf;
mb3 = rxep[3].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
__m128i descriptor = _mm_set_epi64x(high_qw,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)txdp, descriptor);
}
((uint64_t)pkt[0]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
__m256i desc2_3 = _mm256_set_epi64x(
- hi_qw3, pkt[3]->buf_physaddr + pkt[3]->data_off,
- hi_qw2, pkt[2]->buf_physaddr + pkt[2]->data_off);
+ hi_qw3, pkt[3]->buf_iova + pkt[3]->data_off,
+ hi_qw2, pkt[2]->buf_iova + pkt[2]->data_off);
__m256i desc0_1 = _mm256_set_epi64x(
- hi_qw1, pkt[1]->buf_physaddr + pkt[1]->data_off,
- hi_qw0, pkt[0]->buf_physaddr + pkt[0]->data_off);
+ hi_qw1, pkt[1]->buf_iova + pkt[1]->data_off,
+ hi_qw0, pkt[0]->buf_iova + pkt[0]->data_off);
_mm256_store_si256((void *)(txdp + 2), desc2_3);
_mm256_store_si256((void *)txdp, desc0_1);
}
mb0 = rxp[0];
mb1 = rxp[1];
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
mb2 = rxp[2];
mb3 = rxp[3];
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
((uint64_t)pkt->data_len << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT));
__m128i descriptor = _mm_set_epi64x(high_qw,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)txdp, descriptor);
}
__m256i desc2_3 =
_mm256_set_epi64x
(hi_qw3,
- pkt[3]->buf_physaddr + pkt[3]->data_off,
+ pkt[3]->buf_iova + pkt[3]->data_off,
hi_qw2,
- pkt[2]->buf_physaddr + pkt[2]->data_off);
+ pkt[2]->buf_iova + pkt[2]->data_off);
__m256i desc0_1 =
_mm256_set_epi64x
(hi_qw1,
- pkt[1]->buf_physaddr + pkt[1]->data_off,
+ pkt[1]->buf_iova + pkt[1]->data_off,
hi_qw0,
- pkt[0]->buf_physaddr + pkt[0]->data_off);
+ pkt[0]->buf_iova + pkt[0]->data_off);
_mm256_store_si256((void *)(txdp + 2), desc2_3);
_mm256_store_si256((void *)txdp, desc0_1);
}
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
mb2 = rxep[2].mbuf;
mb3 = rxep[3].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
__m128i descriptor = _mm_set_epi64x(high_qw,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)txdp, descriptor);
}
__m256i desc2_3 =
_mm256_set_epi64x
(hi_qw3,
- pkt[3]->buf_physaddr + pkt[3]->data_off,
+ pkt[3]->buf_iova + pkt[3]->data_off,
hi_qw2,
- pkt[2]->buf_physaddr + pkt[2]->data_off);
+ pkt[2]->buf_iova + pkt[2]->data_off);
__m256i desc0_1 =
_mm256_set_epi64x
(hi_qw1,
- pkt[1]->buf_physaddr + pkt[1]->data_off,
+ pkt[1]->buf_iova + pkt[1]->data_off,
hi_qw0,
- pkt[0]->buf_physaddr + pkt[0]->data_off);
+ pkt[0]->buf_iova + pkt[0]->data_off);
_mm256_store_si256((void *)(txdp + 2), desc2_3);
_mm256_store_si256((void *)txdp, desc0_1);
}
static inline void *
iova2data_kva(struct kni_dev *kni, struct rte_kni_mbuf *m)
{
- return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_physaddr) +
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_iova) +
m->data_off);
}
#endif
va = (void *)((unsigned long)pa +
(unsigned long)m->buf_addr -
- (unsigned long)m->buf_physaddr);
+ (unsigned long)m->buf_iova);
return va;
}
static void *
kva2data_kva(struct rte_kni_mbuf *m)
{
- return phys_to_virt(m->buf_physaddr + m->data_off);
+ return phys_to_virt(m->buf_iova + m->data_off);
}
static inline void *
*/
struct rte_kni_mbuf {
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
- uint64_t buf_physaddr;
+ uint64_t buf_iova;
uint16_t data_off; /**< Start address of data in segment buffer. */
char pad1[2];
uint16_t nb_segs; /**< Number of segments. */
* same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
* working on vector drivers easier.
*/
- RTE_STD_C11
- union {
- rte_iova_t buf_iova;
- rte_iova_t buf_physaddr; /**< deprecated */
- } __rte_aligned(sizeof(rte_iova_t));
+ rte_iova_t buf_iova __rte_aligned(sizeof(rte_iova_t));
/* next 8 bytes are initialised on RX descriptor rearm */
RTE_MARKER64 rearm_data;