From d6b324c00fc933f757e68c54c0e50c92826d83f0 Mon Sep 17 00:00:00 2001 From: Ravi Kerur Date: Fri, 4 Mar 2016 10:09:40 +0100 Subject: [PATCH] mbuf: get DMA address Macros RTE_MBUF_DATA_DMA_ADDR and RTE_MBUF_DATA_DMA_ADDR_DEFAULT are defined in each PMD driver file. Convert macros to inline functions and move them to common lib/librte_mbuf/rte_mbuf.h file. PMD drivers include rte_mbuf.h file directly/indirectly hence no additioanl header file inclusion is necessary. Signed-off-by: Ravi Kerur Signed-off-by: Olivier Matz --- drivers/net/bnx2x/bnx2x.c | 2 +- drivers/net/bnx2x/bnx2x.h | 3 --- drivers/net/cxgbe/sge.c | 3 --- drivers/net/e1000/em_rxtx.c | 15 ++++-------- drivers/net/e1000/igb_rxtx.c | 14 ++++------- drivers/net/i40e/i40e_rxtx.c | 20 ++++++---------- drivers/net/ixgbe/ixgbe_rxtx.c | 14 +++++------ drivers/net/ixgbe/ixgbe_rxtx.h | 6 ----- drivers/net/nfp/nfp_net.c | 2 +- drivers/net/nfp/nfp_net_pmd.h | 3 --- drivers/net/virtio/virtio_rxtx.c | 2 +- drivers/net/virtio/virtio_rxtx_simple.c | 4 ++-- drivers/net/virtio/virtqueue.h | 3 --- drivers/net/vmxnet3/vmxnet3_rxtx.c | 11 +++------ drivers/net/xenvirt/virtqueue.h | 9 +------ lib/librte_mbuf/rte_mbuf.h | 32 +++++++++++++++++++++++++ 16 files changed, 64 insertions(+), 79 deletions(-) diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index 5a38c32198..9d640dace3 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -2147,7 +2147,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_p tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; tx_start_bd->addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(m0)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0)); tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->general_data = diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 1b5e26e388..5f5245072a 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -128,9 +128,6 @@ struct bnx2x_device_type { char *bnx2x_name; }; -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - ((uint64_t)((mb)->buf_physaddr + (mb)->data_off)) - #define BNX2X_PAGE_SHIFT 12 #define BNX2X_PAGE_SIZE (1 << BNX2X_PAGE_SHIFT) #define BNX2X_PAGE_MASK (~(BNX2X_PAGE_SIZE - 1)) diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index 3c62d03e17..ab5a842acd 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -1332,9 +1332,6 @@ static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl) return t4_pktgl_to_mbuf_usembufs(gl); } -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off)) - /** * t4_ethrx_handler - process an ingress ethernet packet * @q: the response queue that received the packet diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 7ef965c801..441ccad8c1 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -88,12 +88,6 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) return m; } -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -585,7 +579,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); @@ -769,7 +763,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->buffer_addr = dma_addr; rxdp->status = 0; @@ -949,7 +943,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm = rxe->mbuf; rxe->mbuf = nmb; - dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->buffer_addr = dma; rxdp->status = 0; @@ -1575,7 +1569,8 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) return -ENOMEM; } - dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); /* Clear HW ring memory */ rxq->rx_ring[i] = rxd_init; diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index e7c30b7fd8..74587b524a 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -89,12 +89,6 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) return m; } -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -596,7 +590,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up transmit descriptor. */ slen = (uint16_t) m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = @@ -875,7 +869,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -1061,7 +1055,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm = rxe->mbuf; rxe->mbuf = nmb; - dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->read.pkt_addr = dma; rxdp->read.hdr_addr = 0; @@ -1970,7 +1964,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) return -ENOMEM; } dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 40cffc103d..8931b8ea1a 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -78,12 +78,6 @@ PKT_TX_L4_MASK | \ PKT_TX_OUTER_IP_CKSUM) -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - ((uint64_t)((mb)->buf_physaddr + (mb)->data_off)) - static uint16_t i40e_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -1098,7 +1092,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) mb->nb_segs = 1; mb->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(\ - RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); + rte_mbuf_data_dma_addr_default(mb)); rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -1245,7 +1239,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -1356,7 +1350,7 @@ i40e_recv_scattered_pkts(void *rx_queue, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); /* Set data buffer address and data length of the mbuf */ rxdp->read.hdr_addr = 0; @@ -1691,7 +1685,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Setup TX Descriptor */ slen = m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" "buf_dma_addr: %#"PRIx64";\n" @@ -1790,7 +1784,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) uint32_t i; for (i = 0; i < 4; i++, txdp++, pkts++) { - dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); + dma_addr = rte_mbuf_data_dma_addr(*pkts); txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, @@ -1804,7 +1798,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) { uint64_t dma_addr; - dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); + dma_addr = rte_mbuf_data_dma_addr(*pkts); txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, @@ -2741,7 +2735,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) mbuf->port = rxq->port_id; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.pkt_addr = dma_addr; diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index e95e6b7e7d..54278ce9ff 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -171,7 +171,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) int i; for (i = 0; i < 4; ++i, ++txdp, ++pkts) { - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); + buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ @@ -194,7 +194,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) uint64_t buf_dma_addr; uint32_t pkt_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); + buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ @@ -816,7 +816,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = @@ -1152,7 +1152,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) mb->data_off = RTE_PKTMBUF_HEADROOM; /* populate the descriptors */ - dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb)); rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -1379,7 +1379,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -1672,7 +1672,7 @@ next_desc: if (!bulk_alloc) { __le64 dma = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); /* * Update RX descriptor with the physical address of the * new data buffer of the new allocated mbuf. @@ -3594,7 +3594,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) mbuf->port = rxq->port_id; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 475a800553..42eb449849 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -61,12 +61,6 @@ #define RTE_IXGBE_DESCS_PER_LOOP 4 -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - #ifdef RTE_IXGBE_INC_VECTOR #define RTE_IXGBE_RXQ_REARM_THRESH 32 #define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index fd4dd39305..9c4f218f87 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -1985,7 +1985,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_pktmbuf_free_seg(*lmbuf); dma_size = pkt->data_len; - dma_addr = RTE_MBUF_DATA_DMA_ADDR(pkt); + dma_addr = rte_mbuf_data_dma_addr(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" "%" PRIx64 "\n", dma_addr); diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h index a7f938629c..232ce5caee 100644 --- a/drivers/net/nfp/nfp_net_pmd.h +++ b/drivers/net/nfp/nfp_net_pmd.h @@ -75,9 +75,6 @@ struct nfp_net_adapter; /* Interrupt definitions */ #define NFP_NET_IRQ_LSC_IDX 0 -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - ((uint64_t)((mb)->buf_physaddr + (mb)->data_off)) - /* Default values for RX/TX configuration */ #define DEFAULT_RX_FREE_THRESH 32 #define DEFAULT_RX_PTHRESH 8 diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index ec0b8de692..e96352cedb 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -239,7 +239,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) { idx = start_dp[idx].next; - start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie); + start_dp[idx].addr = rte_mbuf_data_dma_addr(cookie); start_dp[idx].len = cookie->data_len; start_dp[idx].flags = VRING_DESC_F_NEXT; cookie = cookie->next; diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c index 3a1de9d282..8f5293ddce 100644 --- a/drivers/net/virtio/virtio_rxtx_simple.c +++ b/drivers/net/virtio/virtio_rxtx_simple.c @@ -366,7 +366,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; for (i = 0; i < nb_tail; i++) { start_dp[desc_idx].addr = - RTE_MBUF_DATA_DMA_ADDR(*tx_pkts); + rte_mbuf_data_dma_addr(*tx_pkts); start_dp[desc_idx].len = (*tx_pkts)->pkt_len; tx_pkts++; desc_idx++; @@ -377,7 +377,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, for (i = 0; i < nb_commit; i++) txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; for (i = 0; i < nb_commit; i++) { - start_dp[desc_idx].addr = RTE_MBUF_DATA_DMA_ADDR(*tx_pkts); + start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts); start_dp[desc_idx].len = (*tx_pkts)->pkt_len; tx_pkts++; desc_idx++; diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 99d4fa91e8..68e0b4b8bf 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -66,9 +66,6 @@ struct rte_mbuf; #define VIRTQUEUE_MAX_NAME_SZ 32 -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - #define VTNET_SQ_RQ_QUEUE_IDX 0 #define VTNET_SQ_TQ_QUEUE_IDX 1 #define VTNET_SQ_CQ_QUEUE_IDX 2 diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 4de5d8968c..8385478fcf 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -77,12 +77,6 @@ #include "vmxnet3_logs.h" #include "vmxnet3_ethdev.h" -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t); @@ -377,7 +371,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, transmit buffer size (16K) is greater than maximum sizeof mbuf segment size. */ gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; - gdesc->txd.addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg); gdesc->dword[2] = dw2 | m_seg->data_len; gdesc->dword[3] = 0; @@ -475,7 +469,8 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) buf_info->m = mbuf; buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM); - buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf); + buf_info->bufPA = + rte_mbuf_data_dma_addr_default(mbuf); /* Load Rx Descriptor with the buffer's GPA */ rxd->addr = buf_info->bufPA; diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h index 1d1bbcc423..e425a04e5e 100644 --- a/drivers/net/xenvirt/virtqueue.h +++ b/drivers/net/xenvirt/virtqueue.h @@ -50,13 +50,6 @@ struct rte_mbuf; /* The alignment to use between consumer and producer parts of vring. */ #define VIRTIO_PCI_VRING_ALIGN 4096 -/* - * Address translatio is between gva<->hva, - * rather than gpa<->hva in virito spec. - */ -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - ((uint64_t)(uintptr_t)rte_pktmbuf_mtod(mb, void *)) - enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; /** @@ -238,7 +231,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) start_dp[idx].flags = VRING_DESC_F_NEXT; start_dp[idx].addr = (uintptr_t)NULL; idx = start_dp[idx].next; - start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie); + start_dp[idx].addr = rte_pktmbuf_mtod(cookie, uint64_t); start_dp[idx].len = cookie->data_len; start_dp[idx].flags = 0; idx = start_dp[idx].next; diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index c1f6bc4ffd..e39ad28da8 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -854,6 +854,38 @@ struct rte_mbuf { static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp); +/** + * Return the DMA address of the beginning of the mbuf data + * + * @param mb + * The pointer to the mbuf. + * @return + * The physical address of the beginning of the mbuf data + */ +static inline phys_addr_t +rte_mbuf_data_dma_addr(const struct rte_mbuf *mb) +{ + return mb->buf_physaddr + mb->data_off; +} + +/** + * Return the default DMA address of the beginning of the mbuf data + * + * This function is used by drivers in their receive function, as it + * returns the location where data should be written by the NIC, taking + * the default headroom in account. + * + * @param mb + * The pointer to the mbuf. + * @return + * The physical address of the beginning of the mbuf data + */ +static inline phys_addr_t +rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb) +{ + return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM; +} + /** * Return the mbuf owning the data buffer address of an indirect mbuf. * -- 2.20.1