pkt_ol_flags = mb->ol_flags;
ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
- eth_hdr = (struct ether_hdr *) mb->data;
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
/* Only allow single VLAN label here */
pkt->next = NULL;
/* Initialize Ethernet header. */
- eth_hdr = (struct ether_hdr *)pkt->data;
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
nb_replies = 0;
for (i = 0; i < nb_rx; i++) {
pkt = pkts_burst[i];
- eth_h = (struct ether_hdr *) pkt->data;
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type);
l2_len = sizeof(struct ether_hdr);
if (verbose_level > 0) {
* Check that the received packet is a PTP packet that was detected
* by the hardware.
*/
- eth_hdr = (struct ether_hdr *)mb->data;
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
if (eth_type == ETHER_TYPE_1588) {
* Check that the received PTP packet is a PTP V2 packet of type
* PTP_SYNC_MESSAGE.
*/
- ptp_hdr = (struct ptpv2_msg *) ((char *) mb->data +
+ ptp_hdr = (struct ptpv2_msg *) (rte_pktmbuf_mtod(mb, char *) +
sizeof(struct ether_hdr));
if (ptp_hdr->version != 0x02) {
printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->data;
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
ð_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
txp = &ports[fs->tx_port];
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->data;
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
ð_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
txp = &ports[fs->tx_port];
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->data;
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
/* Swap dest and src mac addresses. */
ether_addr_copy(ð_hdr->d_addr, &addr);
rte_pktmbuf_free(mb);
continue;
}
- eth_hdr = (struct ether_hdr *) mb->data;
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
ol_flags = mb->ol_flags;
print_ether_addr(" src=", ð_hdr->s_addr);
mb_ctor_arg->seg_buf_offset);
mb->buf_len = mb_ctor_arg->seg_buf_size;
mb->ol_flags = 0;
- mb->data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
mb->l2_l3_len = 0;
mb->vlan_tci = 0;
seg = seg->next;
}
copy_len = seg->data_len - offset;
- seg_buf = ((char *) seg->data + offset);
+ seg_buf = (rte_pktmbuf_mtod(seg, char *) + offset);
while (len > copy_len) {
rte_memcpy(seg_buf, buf, (size_t) copy_len);
len -= copy_len;
buf = ((char*) buf + copy_len);
seg = seg->next;
- seg_buf = seg->data;
+ seg_buf = rte_pktmbuf_mtod(seg, char *);
}
rte_memcpy(seg_buf, buf, (size_t) len);
}
copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
if (offset + len <= pkt->data_len) {
- rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
+ rte_memcpy((rte_pktmbuf_mtod(pkt, char *) + offset),
+ buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
seg = seg->next;
}
copy_len = seg->data_len - offset;
- seg_buf = ((char *) seg->data + offset);
+ seg_buf = rte_pktmbuf_mtod(seg, char *) + offset;
while (len > copy_len) {
rte_memcpy(seg_buf, buf, (size_t) copy_len);
len -= copy_len;
buf = ((char *) buf + copy_len);
seg = seg->next;
- seg_buf = seg->data;
+ seg_buf = rte_pktmbuf_mtod(seg, void *);
}
rte_memcpy(seg_buf, buf, (size_t) len);
}
copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
if (offset + len <= pkt->data_len) {
- rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
+ rte_memcpy(rte_pktmbuf_mtod(pkt, char *) + offset,
+ buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
printf("rte_pktmbuf_alloc() failed (%u)\n", i);
ret = -1;
}
- m[i]->data = RTE_PTR_ADD(m[i]->data, 64);
+ m[i]->data_off += 64;
}
/* free them */
printf("rte_pktmbuf_alloc() failed (%u)\n", i);
ret = -1;
}
- if (m[i]->data != RTE_PTR_ADD(m[i]->buf_addr, RTE_PKTMBUF_HEADROOM)) {
- printf ("data pointer not set properly\n");
+ if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) {
+ printf("invalid data_off\n");
ret = -1;
}
}
struct rte_mbuf *mbuf;
mbuf = rte_pktmbuf_alloc(pool);
- memset(mbuf->data, 0x00,
+ memset(rte_pktmbuf_mtod(mbuf, char *), 0x00,
sizeof(struct ipv4_5tuple));
five_tuple.proto = j;
five_tuple.port_src = rte_bswap16(100 + j);
five_tuple.port_dst = rte_bswap16(200 + j);
- memcpy(mbuf->data, &five_tuple,
+ memcpy(rte_pktmbuf_mtod(mbuf, char *), &five_tuple,
sizeof(struct ipv4_5tuple));
RTE_LOG(INFO, PIPELINE, "%s: Enqueue onto ring %d\n",
__func__, i);
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
mbuf = (struct rte_mbuf *)objs[j];
- rte_hexdump(stdout, "mbuf", mbuf->data, 64);
+ rte_hexdump(stdout, "mbuf",
+ rte_pktmbuf_mtod(mbuf, char *), 64);
rte_pktmbuf_free(mbuf);
}
tx_count += ret;
#include "test_table.h"
#include "test_table_pipeline.h"
-#define RTE_CBUF_UINT8_PTR(cbuf, offset) \
- (&cbuf->data[offset])
-#define RTE_CBUF_UINT32_PTR(cbuf, offset) \
- (&cbuf->data32[offset/sizeof(uint32_t)])
-
#if 0
static rte_pipeline_port_out_action_handler port_action_0x00
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
mbuf = (struct rte_mbuf *)objs[j];
- rte_hexdump(stdout, "Object:", mbuf->data,
+ rte_hexdump(stdout, "Object:",
+ rte_pktmbuf_mtod(mbuf, char *),
mbuf->data_len);
rte_pktmbuf_free(mbuf);
}
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP=n
-CONFIG_RTE_IXGBE_INC_VECTOR=y
+CONFIG_RTE_IXGBE_INC_VECTOR=n
CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=y
#
if (m == NULL)
continue;
- ret = read(tap_fd, m->data, MAX_PACKET_SZ);
+ ret = read(tap_fd, rte_pktmbuf_mtod(m, void *),
+ MAX_PACKET_SZ);
lcore_stats[lcore_id].rx++;
if (unlikely(ret < 0)) {
FATAL_ERROR("Reading from %s interface failed",
/* Copy mbuf data to buffer */
rte_memcpy((void *)(uintptr_t)buff_addr,
- (const void *)buff->data,
+ rte_pktmbuf_mtod(buff, const void *),
rte_pktmbuf_data_len(buff));
PRINT_PACKET(dev, (uintptr_t)buff_addr,
rte_pktmbuf_data_len(buff), 0);
int i, ret;
/* Learn MAC address of guest device from packet */
- pkt_hdr = (struct ether_hdr *)m->data;
+ pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
dev_ll = ll_root_used;
struct ether_hdr *pkt_hdr;
uint64_t ret = 0;
- pkt_hdr = (struct ether_hdr *)m->data;
+ pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
/*get the used devices list*/
dev_ll = ll_root_used;
unsigned len, ret, offset = 0;
const uint16_t lcore_id = rte_lcore_id();
struct virtio_net_data_ll *dev_ll = ll_root_used;
- struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
+ struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
/*check if destination is local VM*/
if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(dev, m) == 0))
mbuf->nb_segs = m->nb_segs;
/* Copy ethernet header to mbuf. */
- rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+ rte_pktmbuf_mtod(m, const void *),
+ ETH_HLEN);
/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
+ vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *);
vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_TCI = htons(vlan_tag);
/* Copy the remaining packet contents to the mbuf. */
- rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
- (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
+ rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf, uint8_t *) + VLAN_ETH_HLEN),
+ (const void *)(rte_pktmbuf_mtod(m, uint8_t *) + ETH_HLEN),
+ (m->data_len - ETH_HLEN));
/* Copy the remaining segments for the whole packet. */
prev = mbuf;
/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
m.data_len = desc->len;
m.pkt_len = desc->len;
- m.data = (void*)(uintptr_t)buff_addr;
+ m.data_off = 0;
PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
}
mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
- mbuf->data = (void *)(uintptr_t)(buff_addr);
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
mbuf->data_len = desc->len;
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->data = (char *) m->buf_addr + buf_ofs;
+ m->data_off = buf_ofs;
m->data_len = 0;
}
unsigned len, ret, offset = 0;
struct vpool *vpool;
struct virtio_net_data_ll *dev_ll = ll_root_used;
- struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
+ struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
/*Add packet to the port tx queue*/
mbuf->pkt_len = mbuf->data_len;
if (unlikely(need_copy)) {
/* Copy the packet contents to the mbuf. */
- rte_memcpy((void *)((uint8_t *)mbuf->data),
- (const void *) ((uint8_t *)m->data),
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+ rte_pktmbuf_mtod(m, void *),
m->data_len);
} else {
- mbuf->data = m->data;
+ mbuf->data_off = m->data_off;
mbuf->buf_physaddr = m->buf_physaddr;
mbuf->buf_addr = m->buf_addr;
}
m.data_len = desc->len;
m.nb_segs = 1;
m.next = NULL;
- m.data = (void *)(uintptr_t)buff_addr;
- m.buf_addr = m.data;
+ m.data_off = 0;
+ m.buf_addr = (void *)(uintptr_t)buff_addr;
m.buf_physaddr = phys_addr;
/*
struct ether_hdr *pkt_hdr;
uint64_t ret = 0;
- pkt_hdr = (struct ether_hdr *)m->data;
+ pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
/*get the used devices list*/
dev_ll = ll_root_used;
mbuf->pkt_len = mbuf->data_len;
/* Copy ethernet header to mbuf. */
- rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void*),
+ rte_pktmbuf_mtod(m, const void*), ETH_HLEN);
/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
+ vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *);
vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_TCI = htons(vlan_tag);
/* Copy the remaining packet contents to the mbuf. */
- rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
- (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
+ rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf, uint8_t *) + VLAN_ETH_HLEN),
+ (const void *)(rte_pktmbuf_mtod(m, uint8_t *) + ETH_HLEN),
+ (m->data_len - ETH_HLEN));
tx_q->m_table[len] = mbuf;
len++;
if (enable_stats) {
/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
m.data_len = desc->len;
- m.data = (void*)(uintptr_t)buff_addr;
+ m.data_off = 0;
m.nb_segs = 1;
virtio_tx_route(dev, &m, mbuf_pool, 0);
struct rte_kni_mbuf {
void *pool;
void *buf_addr;
- char pad0[14];
- uint16_t ol_flags; /**< Offload features. */
+ char pad0[16];
void *next;
- void *data; /**< Start address of data in segment buffer. */
+ uint16_t data_off; /**< Start address of data in segment buffer. */
uint16_t data_len; /**< Amount of data in segment buffer. */
- char pad2[2];
- uint16_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+ char pad2[4];
+ uint16_t ol_flags; /**< Offload features. */
} __attribute__((__aligned__(64)));
/*
for (i = 0; i < num; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
len = kva->data_len;
- data_kva = kva->data - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
+ + kni->mbuf_kva;
skb = dev_alloc_skb(len + 2);
if (!skb) {
for (i = 0; i < num; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
len = kva->pkt_len;
- data_kva = kva->data - kni->mbuf_va +
- kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off -
+ kni->mbuf_va + kni->mbuf_kva;
alloc_kva = (void *)alloc_va[i] - kni->mbuf_va +
kni->mbuf_kva;
- alloc_data_kva = alloc_kva->data - kni->mbuf_va +
- kni->mbuf_kva;
+ alloc_data_kva = alloc_kva->buf_addr +
+ alloc_kva->data_off - kni->mbuf_va +
+ kni->mbuf_kva;
memcpy(alloc_data_kva, data_kva, len);
alloc_kva->pkt_len = len;
alloc_kva->data_len = len;
for (i = 0; i < num; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
len = kva->data_len;
- data_kva = kva->data - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
+ kni->mbuf_kva;
skb = dev_alloc_skb(len + 2);
if (skb == NULL)
void *data_kva;
pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
- data_kva = pkt_kva->data - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
+ + kni->mbuf_kva;
len = skb->len;
memcpy(data_kva, skb->data, len);
/* Fragment size should be a multiply of 8. */
IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
- in_hdr = (struct ipv4_hdr *) pkt_in->data;
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
/* If Don't Fragment flag is set */
if (len > (in_seg->data_len - in_seg_data_pos)) {
len = in_seg->data_len - in_seg_data_pos;
}
- out_seg->data = (char*) in_seg->data + (uint16_t)in_seg_data_pos;
+ out_seg->data_off = in_seg->data_off + in_seg_data_pos;
out_seg->data_len = (uint16_t)len;
out_pkt->pkt_len = (uint16_t)(len +
out_pkt->pkt_len);
/* Build the IP header */
- out_hdr = (struct ipv4_hdr*) out_pkt->data;
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv4_hdr *);
__fill_ipv4hdr_frag(out_hdr, in_hdr,
(uint16_t)out_pkt->pkt_len,
(uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
return (-EINVAL);
- in_hdr = (struct ipv6_hdr *) pkt_in->data;
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv6_hdr *);
in_seg = pkt_in;
in_seg_data_pos = sizeof(struct ipv6_hdr);
if (len > (in_seg->data_len - in_seg_data_pos)) {
len = in_seg->data_len - in_seg_data_pos;
}
- out_seg->data = (char *) in_seg->data + (uint16_t) in_seg_data_pos;
+ out_seg->data_off = in_seg->data_off + in_seg_data_pos;
out_seg->data_len = (uint16_t)len;
out_pkt->pkt_len = (uint16_t)(len +
out_pkt->pkt_len);
/* Build the IP header */
- out_hdr = (struct ipv6_hdr *) out_pkt->data;
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv6_hdr *);
__fill_ipv6hdr_frag(out_hdr, in_hdr,
(uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
m->buf_len = (uint16_t)buf_len;
/* keep some headroom between start of buffer and data */
- m->data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
+ m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
/* init some constant fields */
m->pool = mp;
__rte_mbuf_sanity_check(m, 0);
fprintf(f, " segment at 0x%p, data=0x%p, data_len=%u\n",
- m, m->data, (unsigned)m->data_len);
+ m, rte_pktmbuf_mtod(m, void *), (unsigned)m->data_len);
len = dump_len;
if (len > m->data_len)
len = m->data_len;
if (len != 0)
- rte_hexdump(f, NULL, m->data, len);
+ rte_hexdump(f, NULL, rte_pktmbuf_mtod(m, void *), len);
dump_len -= len;
m = m->next;
nb_segs --;
void *buf_addr; /**< Virtual address of segment buffer. */
phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */
uint16_t buf_len; /**< Length of segment buffer. */
+
+ /* valid for any segment */
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+ uint16_t data_off;
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
+
#ifdef RTE_MBUF_REFCNT
/**
* 16-bit Reference counter.
uint16_t reserved; /**< Unused field. Required for padding */
uint16_t ol_flags; /**< Offload features. */
- /* valid for any segment */
- struct rte_mbuf *next; /**< Next segment of scattered packet. */
- void* data; /**< Start address of data in segment buffer. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
-
/* these fields are valid for first segment only */
uint8_t nb_segs; /**< Number of segments. */
uint8_t port; /**< Input port. */
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
/* offload features, valid for first segment only */
union {
uint16_t metadata16[0];
uint32_t metadata32[0];
uint64_t metadata64[0];
- };
+ } __rte_cache_aligned;
} __rte_cache_aligned;
#define RTE_MBUF_METADATA_UINT8(mbuf, offset) \
* @param m
* The control mbuf.
*/
-#define rte_ctrlmbuf_data(m) ((m)->data)
+#define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
/**
* A macro that returns the length of the carried data.
*/
static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
{
- uint32_t buf_ofs;
-
m->next = NULL;
m->pkt_len = 0;
m->l2_l3_len = 0;
m->port = 0xff;
m->ol_flags = 0;
- buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->data = (char*) m->buf_addr + buf_ofs;
m->data_len = 0;
__rte_mbuf_sanity_check(m, 1);
mi->buf_len = md->buf_len;
mi->next = md->next;
- mi->data = md->data;
+ mi->data_off = md->data_off;
mi->data_len = md->data_len;
mi->port = md->port;
mi->vlan_tci = md->vlan_tci;
{
const struct rte_mempool *mp = m->pool;
void *buf = RTE_MBUF_TO_BADDR(m);
- uint32_t buf_ofs;
uint32_t buf_len = mp->elt_size - sizeof(*m);
m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof (*m);
m->buf_addr = buf;
m->buf_len = (uint16_t)buf_len;
- buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->data = (char*) m->buf_addr + buf_ofs;
m->data_len = 0;
}
static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 1);
- return (uint16_t) ((char*) m->data - (char*) m->buf_addr);
+ return m->data_off;
}
/**
* @param t
* The type to cast the result into.
*/
-#define rte_pktmbuf_mtod(m, t) ((t)((m)->data))
+#define rte_pktmbuf_mtod(m, t) ((t)((char *)(m)->buf_addr + (m)->data_off))
/**
* A macro that returns the length of the packet.
if (unlikely(len > rte_pktmbuf_headroom(m)))
return NULL;
- m->data = (char*) m->data - len;
+ m->data_off -= len;
m->data_len = (uint16_t)(m->data_len + len);
m->pkt_len = (m->pkt_len + len);
- return (char*) m->data;
+ return (char *)m->buf_addr + m->data_off;
}
/**
if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
return NULL;
- tail = (char*) m_last->data + m_last->data_len;
+ tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
m_last->data_len = (uint16_t)(m_last->data_len + len);
m->pkt_len = (m->pkt_len + len);
return (char*) tail;
return NULL;
m->data_len = (uint16_t)(m->data_len - len);
- m->data = ((char*) m->data + len);
+ m->data_off += len;
m->pkt_len = (m->pkt_len - len);
- return (char*) m->data;
+ return (char *)m->buf_addr + m->data_off;
}
/**
switch (policy) {
case BALANCE_XMIT_POLICY_LAYER2:
- eth_hdr = (struct ether_hdr *)buf->data;
+ eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
hash = ether_hash(eth_hdr);
hash ^= hash >> 8;
return hash % slave_count;
case BALANCE_XMIT_POLICY_LAYER23:
- eth_hdr = (struct ether_hdr *)buf->data;
+ eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
if (buf->ol_flags & PKT_RX_VLAN_PKT)
eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
}
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + \
- (uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr))
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
rxq->crc_len);
- rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->data);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = pkt_len;
*/
data_len = rte_le_to_cpu_16(rxd.length);
rxm->data_len = data_len;
- rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->data);
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
/*
* Store the mbuf address into the next entry of the array
}
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + \
- (uint64_t) ((char *)((mb)->data) - \
- (char *)(mb)->buf_addr))
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
rxq->crc_len);
- rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->data);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = pkt_len;
*/
data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
rxm->data_len = data_len;
- rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
first_seg->ol_flags = pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->data);
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
/*
* Store the mbuf address into the next entry of the array
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + \
- (uint64_t)((char *)((mb)->data) - \
- (char *)(mb)->buf_addr)))
+ ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
static const struct rte_memzone *
i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
mb->next = NULL;
- mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_prefetch0(rxm->data);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = rx_packet_len;
rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
rxm->data_len = rx_packet_len;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
/**
* If this is the first buffer of the received packet, set the
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
/* Prefetch data of first segment, if configured to do so. */
- rte_prefetch0(first_seg->data);
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
}
rte_mbuf_refcnt_set(mbuf, 1);
mbuf->next = NULL;
- mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
mb->next = NULL;
- mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
mb->port = rxq->port_id;
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
rxq->crc_len);
- rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->data);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = pkt_len;
*/
data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
rxm->data_len = data_len;
- rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
}
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->data);
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
/*
* Store the mbuf address into the next entry of the array
rte_mbuf_refcnt_set(mbuf, 1);
mbuf->next = NULL;
- mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
#endif
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
- (char *)(mb)->buf_addr))
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
if (header.len <= buf_size) {
/* pcap packet will fit in the mbuf, go ahead and copy */
- rte_memcpy(mbuf->data, packet, header.len);
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
+ header.len);
mbuf->data_len = (uint16_t)header.len;
mbuf->pkt_len = mbuf->data_len;
bufs[num_rx] = mbuf;
calculate_timestamp(&header.ts);
header.len = mbuf->data_len;
header.caplen = header.len;
- pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->data);
+ pcap_dump((u_char *)dumper_q->dumper, &header,
+ rte_pktmbuf_mtod(mbuf, void*));
rte_pktmbuf_free(mbuf);
num_tx++;
}
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
- ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->data,
+ ret = pcap_sendpacket(tx_queue->pcap,
+ rte_pktmbuf_mtod(mbuf, u_char *),
mbuf->data_len);
if (unlikely(ret != 0))
break;
}
rte_prefetch0(cookie);
- rte_packet_prefetch(cookie->data);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
rx_pkts[i] = cookie;
vq->vq_used_cons_idx++;
vq_ring_free_chain(vq, desc_idx);
}
rxm->port = rxvq->port_id;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->nb_segs = 1;
rxm->next = NULL;
if (seg_num == 0)
seg_num = 1;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->nb_segs = seg_num;
rxm->next = NULL;
rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
while (extra_idx < rcv_cnt) {
rxm = rcv_pkts[extra_idx];
- rxm->data =
- (char *)rxm->buf_addr +
- RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
rxm->next = NULL;
rxm->pkt_len = (uint32_t)(len[extra_idx]);
rxm->data_len = (uint16_t)(len[extra_idx]);
#define VIRTQUEUE_MAX_NAME_SZ 32
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
- (char *)(mb)->buf_addr))
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define VTNET_SQ_TQ_QUEUE_IDX 1
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
- (char *)(mb)->buf_addr))
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
rxm->data_len = (uint16_t)rcd->len;
rxm->port = rxq->port_id;
rxm->vlan_tci = 0;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
rx_pkts[nb_rx++] = rxm;
rxm = rx_pkts[i];
PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
rxm->next = NULL;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
rxm->nb_segs = 1;
rxm->port = pi->port_id;