tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
tx_start_bd->addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(m0));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data =
char *bnx2x_name;
};
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-
#define BNX2X_PAGE_SHIFT 12
#define BNX2X_PAGE_SIZE (1 << BNX2X_PAGE_SHIFT)
#define BNX2X_PAGE_MASK (~(BNX2X_PAGE_SIZE - 1))
return t4_pktgl_to_mbuf_usembufs(gl);
}
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off))
-
/**
* t4_ethrx_handler - process an ingress ethernet packet
* @q: the response queue that received the packet
return m;
}
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->buffer_addr = dma_addr;
rxdp->status = 0;
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->buffer_addr = dma;
rxdp->status = 0;
return -ENOMEM;
}
- dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
/* Clear HW ring memory */
rxq->rx_ring[i] = rxd_init;
return m;
}
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
* Set up transmit descriptor.
*/
slen = (uint16_t) m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->read.pkt_addr = dma;
rxdp->read.hdr_addr = 0;
return -ENOMEM;
}
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM)
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
- RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+ rte_mbuf_data_dma_addr_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
/* Set data buffer address and data length of the mbuf */
rxdp->read.hdr_addr = 0;
/* Setup TX Descriptor */
slen = m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
uint32_t i;
for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ dma_addr = rte_mbuf_data_dma_addr(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
{
uint64_t dma_addr;
- dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ dma_addr = rte_mbuf_data_dma_addr(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
int i;
for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
uint64_t buf_dma_addr;
uint32_t pkt_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
mb->data_off = RTE_PKTMBUF_HEADROOM;
/* populate the descriptors */
- dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
if (!bulk_alloc) {
__le64 dma =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
/*
* Update RX descriptor with the physical address of the
* new data buffer of the new allocated mbuf.
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
#define RTE_IXGBE_DESCS_PER_LOOP 4
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
#ifdef RTE_IXGBE_INC_VECTOR
#define RTE_IXGBE_RXQ_REARM_THRESH 32
#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
rte_pktmbuf_free_seg(*lmbuf);
dma_size = pkt->data_len;
- dma_addr = RTE_MBUF_DATA_DMA_ADDR(pkt);
+ dma_addr = rte_mbuf_data_dma_addr(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
"%" PRIx64 "\n", dma_addr);
/* Interrupt definitions */
#define NFP_NET_IRQ_LSC_IDX 0
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-
/* Default values for RX/TX configuration */
#define DEFAULT_RX_FREE_THRESH 32
#define DEFAULT_RX_PTHRESH 8
for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
idx = start_dp[idx].next;
- start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
+ start_dp[idx].addr = rte_mbuf_data_dma_addr(cookie);
start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = VRING_DESC_F_NEXT;
cookie = cookie->next;
txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
for (i = 0; i < nb_tail; i++) {
start_dp[desc_idx].addr =
- RTE_MBUF_DATA_DMA_ADDR(*tx_pkts);
+ rte_mbuf_data_dma_addr(*tx_pkts);
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
tx_pkts++;
desc_idx++;
for (i = 0; i < nb_commit; i++)
txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
for (i = 0; i < nb_commit; i++) {
- start_dp[desc_idx].addr = RTE_MBUF_DATA_DMA_ADDR(*tx_pkts);
+ start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts);
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
tx_pkts++;
desc_idx++;
#define VIRTQUEUE_MAX_NAME_SZ 32
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define VTNET_SQ_TQ_QUEUE_IDX 1
#define VTNET_SQ_CQ_QUEUE_IDX 2
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
transmit buffer size (16K) is greater than
maximum sizeof mbuf segment size. */
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
- gdesc->txd.addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
gdesc->dword[2] = dw2 | m_seg->data_len;
gdesc->dword[3] = 0;
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len -
RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf);
+ buf_info->bufPA =
+ rte_mbuf_data_dma_addr_default(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
/* The alignment to use between consumer and producer parts of vring. */
#define VIRTIO_PCI_VRING_ALIGN 4096
-/*
- * Address translatio is between gva<->hva,
- * rather than gpa<->hva in virito spec.
- */
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)(uintptr_t)rte_pktmbuf_mtod(mb, void *))
-
enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
/**
start_dp[idx].flags = VRING_DESC_F_NEXT;
start_dp[idx].addr = (uintptr_t)NULL;
idx = start_dp[idx].next;
- start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
+ start_dp[idx].addr = rte_pktmbuf_mtod(cookie, uint64_t);
start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = 0;
idx = start_dp[idx].next;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
+/**
+ * Return the DMA address of the beginning of the mbuf data
+ *
+ * @param mb
+ * The pointer to the mbuf.
+ * @return
+ * The physical address of the beginning of the mbuf data
+ */
+static inline phys_addr_t
+rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
+{
+ return mb->buf_physaddr + mb->data_off;
+}
+
+/**
+ * Return the default DMA address of the beginning of the mbuf data
+ *
+ * This function is used by drivers in their receive function, as it
+ * returns the location where data should be written by the NIC, taking
+ * the default headroom in account.
+ *
+ * @param mb
+ * The pointer to the mbuf.
+ * @return
+ * The physical address of the beginning of the mbuf data
+ */
+static inline phys_addr_t
+rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
+{
+ return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+}
+
/**
* Return the mbuf owning the data buffer address of an indirect mbuf.
*