{
#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
DUMP_SIZE(struct rte_mbuf);
- DUMP_SIZE(struct rte_pktmbuf);
DUMP_SIZE(struct rte_mempool);
DUMP_SIZE(struct rte_ring);
#undef DUMP_SIZE
pkt_ol_flags = mb->ol_flags;
ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
/* Only allow single VLAN label here */
}
/* Combine the packet header write. VLAN is not consider here */
- mb->pkt.vlan_macip.f.l2_len = l2_len;
- mb->pkt.vlan_macip.f.l3_len = l3_len;
+ mb->vlan_macip.f.l2_len = l2_len;
+ mb->vlan_macip.f.l3_len = l3_len;
mb->ol_flags = ol_flags;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
if (!pkt)
break;
- pkt->pkt.data_len = pkt_size;
- pkt->pkt.next = NULL;
+ pkt->data_len = pkt_size;
+ pkt->next = NULL;
/* Initialize Ethernet header. */
- eth_hdr = (struct ether_hdr *)pkt->pkt.data;
+ eth_hdr = (struct ether_hdr *)pkt->data;
ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size -
sizeof(*eth_hdr) -
sizeof(*ip_hdr));
- pkt->pkt.nb_segs = 1;
- pkt->pkt.pkt_len = pkt_size;
+ pkt->nb_segs = 1;
+ pkt->pkt_len = pkt_size;
pkt->ol_flags = ol_flags;
- pkt->pkt.vlan_macip.f.vlan_tci = vlan_tci;
- pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_macip.f.vlan_tci = vlan_tci;
+ pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
nb_replies = 0;
for (i = 0; i < nb_rx; i++) {
pkt = pkts_burst[i];
- eth_h = (struct ether_hdr *) pkt->pkt.data;
+ eth_h = (struct ether_hdr *) pkt->data;
eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type);
l2_len = sizeof(struct ether_hdr);
if (verbose_level > 0) {
printf("\nPort %d pkt-len=%u nb-segs=%u\n",
- fs->rx_port, pkt->pkt.pkt_len, pkt->pkt.nb_segs);
+ fs->rx_port, pkt->pkt_len, pkt->nb_segs);
ether_addr_dump(" ETH: src=", ð_h->s_addr);
ether_addr_dump(" dst=", ð_h->d_addr);
}
* Check that the received packet is a PTP packet that was detected
* by the hardware.
*/
- eth_hdr = (struct ether_hdr *)mb->pkt.data;
+ eth_hdr = (struct ether_hdr *)mb->data;
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
if (eth_type == ETHER_TYPE_1588) {
printf("Port %u Received non PTP packet type=0x%4x "
"len=%u\n",
(unsigned) fs->rx_port, eth_type,
- (unsigned) mb->pkt.pkt_len);
+ (unsigned) mb->pkt_len);
}
rte_pktmbuf_free(mb);
return;
* Check that the received PTP packet is a PTP V2 packet of type
* PTP_SYNC_MESSAGE.
*/
- ptp_hdr = (struct ptpv2_msg *) ((char *) mb->pkt.data +
+ ptp_hdr = (struct ptpv2_msg *) ((char *) mb->data +
sizeof(struct ether_hdr));
if (ptp_hdr->version != 0x02) {
printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
ð_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
txp = &ports[fs->tx_port];
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
ð_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->pkt.vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
txp = &ports[fs->tx_port];
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
/* Swap dest and src mac addresses. */
ether_addr_copy(ð_hdr->d_addr, &addr);
ether_addr_copy(&addr, ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->pkt.vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
rte_pktmbuf_free(mb);
continue;
}
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
ol_flags = mb->ol_flags;
print_ether_addr(" src=", ð_hdr->s_addr);
print_ether_addr(" - dst=", ð_hdr->d_addr);
printf(" - type=0x%04x - length=%u - nb_segs=%d",
- eth_type, (unsigned) mb->pkt.pkt_len,
- (int)mb->pkt.nb_segs);
+ eth_type, (unsigned) mb->pkt_len,
+ (int)mb->nb_segs);
if (ol_flags & PKT_RX_RSS_HASH) {
- printf(" - RSS hash=0x%x", (unsigned) mb->pkt.hash.rss);
+ printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
}
else if (ol_flags & PKT_RX_FDIR)
printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
- mb->pkt.hash.fdir.hash, mb->pkt.hash.fdir.id);
+ mb->hash.fdir.hash, mb->hash.fdir.id);
if (ol_flags & PKT_RX_VLAN_PKT)
printf(" - VLAN tci=0x%x",
- mb->pkt.vlan_macip.f.vlan_tci);
+ mb->vlan_macip.f.vlan_tci);
printf("\n");
if (ol_flags != 0) {
int rxf;
mb_ctor_arg->seg_buf_offset);
mb->buf_len = mb_ctor_arg->seg_buf_size;
mb->ol_flags = 0;
- mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.vlan_macip.data = 0;
- mb->pkt.hash.rss = 0;
+ mb->data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->vlan_macip.data = 0;
+ mb->hash.rss = 0;
}
static void
* The maximum number of segments per packet is used when creating
* scattered transmit packets composed of a list of mbufs.
*/
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
+#define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */
#define MAX_PKT_BURST 512
#define DEF_PKT_BURST 32
unsigned copy_len;
seg = pkt;
- while (offset >= seg->pkt.data_len) {
- offset -= seg->pkt.data_len;
- seg = seg->pkt.next;
+ while (offset >= seg->data_len) {
+ offset -= seg->data_len;
+ seg = seg->next;
}
- copy_len = seg->pkt.data_len - offset;
- seg_buf = ((char *) seg->pkt.data + offset);
+ copy_len = seg->data_len - offset;
+ seg_buf = ((char *) seg->data + offset);
while (len > copy_len) {
rte_memcpy(seg_buf, buf, (size_t) copy_len);
len -= copy_len;
buf = ((char*) buf + copy_len);
- seg = seg->pkt.next;
- seg_buf = seg->pkt.data;
+ seg = seg->next;
+ seg_buf = seg->data;
}
rte_memcpy(seg_buf, buf, (size_t) len);
}
static inline void
copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
- if (offset + len <= pkt->pkt.data_len) {
- rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len);
+ if (offset + len <= pkt->data_len) {
+ rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
return;
break;
}
- pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+ pkt->data_len = tx_pkt_seg_lengths[0];
pkt_seg = pkt;
for (i = 1; i < tx_pkt_nb_segs; i++) {
- pkt_seg->pkt.next = tx_mbuf_alloc(mbp);
- if (pkt_seg->pkt.next == NULL) {
- pkt->pkt.nb_segs = i;
+ pkt_seg->next = tx_mbuf_alloc(mbp);
+ if (pkt_seg->next == NULL) {
+ pkt->nb_segs = i;
rte_pktmbuf_free(pkt);
goto nomore_mbuf;
}
- pkt_seg = pkt_seg->pkt.next;
- pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+ pkt_seg = pkt_seg->next;
+ pkt_seg->data_len = tx_pkt_seg_lengths[i];
}
- pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+ pkt_seg->next = NULL; /* Last segment of packet. */
/*
* Initialize Ethernet header.
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
*/
- pkt->pkt.nb_segs = tx_pkt_nb_segs;
- pkt->pkt.pkt_len = tx_pkt_length;
+ pkt->nb_segs = tx_pkt_nb_segs;
+ pkt->pkt_len = tx_pkt_length;
pkt->ol_flags = ol_flags;
- pkt->pkt.vlan_macip.f.vlan_tci = vlan_tci;
- pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_macip.f.vlan_tci = vlan_tci;
+ pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
{
#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
DUMP_SIZE(struct rte_mbuf);
- DUMP_SIZE(struct rte_pktmbuf);
DUMP_SIZE(struct rte_mempool);
DUMP_SIZE(struct rte_ring);
#undef DUMP_SIZE
unsigned copy_len;
seg = pkt;
- while (offset >= seg->pkt.data_len) {
- offset -= seg->pkt.data_len;
- seg = seg->pkt.next;
+ while (offset >= seg->data_len) {
+ offset -= seg->data_len;
+ seg = seg->next;
}
- copy_len = seg->pkt.data_len - offset;
- seg_buf = ((char *) seg->pkt.data + offset);
+ copy_len = seg->data_len - offset;
+ seg_buf = ((char *) seg->data + offset);
while (len > copy_len) {
rte_memcpy(seg_buf, buf, (size_t) copy_len);
len -= copy_len;
buf = ((char *) buf + copy_len);
- seg = seg->pkt.next;
- seg_buf = seg->pkt.data;
+ seg = seg->next;
+ seg_buf = seg->data;
}
rte_memcpy(seg_buf, buf, (size_t) len);
}
static inline void
copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
- if (offset + len <= pkt->pkt.data_len) {
- rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len);
+ if (offset + len <= pkt->data_len) {
+ rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
break;
}
- pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+ pkt->data_len = tx_pkt_seg_lengths[0];
pkt_seg = pkt;
for (i = 1; i < tx_pkt_nb_segs; i++) {
- pkt_seg->pkt.next = rte_pktmbuf_alloc(mp);
- if (pkt_seg->pkt.next == NULL) {
- pkt->pkt.nb_segs = i;
+ pkt_seg->next = rte_pktmbuf_alloc(mp);
+ if (pkt_seg->next == NULL) {
+ pkt->nb_segs = i;
rte_pktmbuf_free(pkt);
goto nomore_mbuf;
}
- pkt_seg = pkt_seg->pkt.next;
- pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+ pkt_seg = pkt_seg->next;
+ pkt_seg->data_len = tx_pkt_seg_lengths[i];
}
- pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+ pkt_seg->next = NULL; /* Last segment of packet. */
/*
* Copy headers in first packet segment(s).
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
*/
- pkt->pkt.nb_segs = tx_pkt_nb_segs;
- pkt->pkt.pkt_len = tx_pkt_length;
- pkt->pkt.vlan_macip.f.l2_len = eth_hdr_size;
+ pkt->nb_segs = tx_pkt_nb_segs;
+ pkt->pkt_len = tx_pkt_length;
+ pkt->vlan_macip.f.l2_len = eth_hdr_size;
if (ipv4) {
- pkt->pkt.vlan_macip.f.vlan_tci = ETHER_TYPE_IPv4;
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv4;
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
else
pkt->ol_flags = PKT_RX_IPV4_HDR;
} else {
- pkt->pkt.vlan_macip.f.vlan_tci = ETHER_TYPE_IPv6;
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
+ pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv6;
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
if (rte_lcore_count() >= 3) {
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = (i & 1) << 8;
+ bufs[i]->hash.rss = (i & 1) << 8;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
* so load gets distributed */
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = i;
+ bufs[i]->hash.rss = i;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
return -1;
}
for (i = 0; i < BIG_BATCH; i++)
- many_bufs[i]->pkt.hash.rss = i << 2;
+ many_bufs[i]->hash.rss = i << 2;
for (i = 0; i < BIG_BATCH/BURST; i++) {
rte_distributor_process(d, &many_bufs[i*BURST], BURST);
while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
rte_distributor_process(d, NULL, 0);
for (j = 0; j < BURST; j++) {
- bufs[j]->pkt.hash.rss = (i+j) << 1;
+ bufs[j]->hash.rss = (i+j) << 1;
rte_mbuf_refcnt_set(bufs[j], 1);
}
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
return -1;
}
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
/* get worker zero to quit */
zero_quit = 1;
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
zero_quit = 0;
quit = 1;
for (i = 0; i < num_workers; i++)
- bufs[i]->pkt.hash.rss = i << 1;
+ bufs[i]->hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
}
/* ensure we have different hash value for each pkt */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = i;
+ bufs[i]->hash.rss = i;
start = rte_rdtsc();
for (i = 0; i < (1<<ITER_POWER); i++)
quit = 1;
for (i = 0; i < num_workers; i++)
- bufs[i]->pkt.hash.rss = i << 1;
+ bufs[i]->hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
GOTO_FAIL("cannot clone data\n");
rte_pktmbuf_free(clone);
- mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool);
- if(mc->pkt.next == NULL)
+ mc->next = rte_pktmbuf_alloc(pktmbuf_pool);
+ if(mc->next == NULL)
GOTO_FAIL("Next Pkt Null\n");
clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
printf("rte_pktmbuf_alloc() failed (%u)\n", i);
ret = -1;
}
- m[i]->pkt.data = RTE_PTR_ADD(m[i]->pkt.data, 64);
+ m[i]->data = RTE_PTR_ADD(m[i]->data, 64);
}
/* free them */
printf("rte_pktmbuf_alloc() failed (%u)\n", i);
ret = -1;
}
- if (m[i]->pkt.data != RTE_PTR_ADD(m[i]->buf_addr, RTE_PKTMBUF_HEADROOM)) {
- printf ("pkt.data pointer not set properly\n");
+ if (m[i]->data != RTE_PTR_ADD(m[i]->buf_addr, RTE_PKTMBUF_HEADROOM)) {
+ printf ("data pointer not set properly\n");
ret = -1;
}
}
mb = m[i];
while(mb != NULL) {
mt = mb;
- mb = mb->pkt.next;
+ mb = mb->next;
rte_pktmbuf_free_seg(mt);
}
}
rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
/* 64 byte packet */
- mbuf->pkt.pkt_len = 60;
- mbuf->pkt.data_len = 60;
+ mbuf->pkt_len = 60;
+ mbuf->data_len = 60;
}
struct rte_mbuf *mbuf;
mbuf = rte_pktmbuf_alloc(pool);
- memset(mbuf->pkt.data, 0x00,
+ memset(mbuf->data, 0x00,
sizeof(struct ipv4_5tuple));
five_tuple.proto = j;
five_tuple.port_src = rte_bswap16(100 + j);
five_tuple.port_dst = rte_bswap16(200 + j);
- memcpy(mbuf->pkt.data, &five_tuple,
+ memcpy(mbuf->data, &five_tuple,
sizeof(struct ipv4_5tuple));
RTE_LOG(INFO, PIPELINE, "%s: Enqueue onto ring %d\n",
__func__, i);
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
mbuf = (struct rte_mbuf *)objs[j];
- rte_hexdump(stdout, "mbuf", mbuf->pkt.data, 64);
+ rte_hexdump(stdout, "mbuf", mbuf->data, 64);
rte_pktmbuf_free(mbuf);
}
tx_count += ret;
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
mbuf = (struct rte_mbuf *)objs[j];
- rte_hexdump(stdout, "Object:", mbuf->pkt.data,
- mbuf->pkt.data_len);
+ rte_hexdump(stdout, "Object:", mbuf->data,
+ mbuf->data_len);
rte_pktmbuf_free(mbuf);
}
tx_count += ret;
*
*/
#define PACKET_DATA_START_PHYS(p) \
- ((p)->buf_physaddr + ((char *)p->pkt.data - (char *)p->buf_addr))
+ ((p)->buf_physaddr + ((char *)p->data - (char *)p->buf_addr))
/*
* A fixed offset to where the crypto is to be performed, which is the first
crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
{
CpaCySymDpOpData *opData =
- (CpaCySymDpOpData *) ((char *) (rte_buff->pkt.data)
+ (CpaCySymDpOpData *) ((char *) (rte_buff->data)
+ CRYPTO_OFFSET_TO_OPDATA);
uint32_t lcore_id;
bzero(opData, sizeof(CpaCySymDpOpData));
opData->srcBuffer = opData->dstBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->srcBufferLen = opData->dstBufferLen = rte_buff->pkt.data_len;
+ opData->srcBufferLen = opData->dstBufferLen = rte_buff->data_len;
opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h];
opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
+ CRYPTO_OFFSET_TO_OPDATA;
opData->ivLenInBytes = IV_LENGTH_8_BYTES;
opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+ opData->messageLenToCipherInBytes = rte_buff->data_len
- CRYPTO_START_OFFSET;
/*
* Work around for padding, message length has to be a multiple of
if (NO_HASH != h) {
opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+ opData->messageLenToHashInBytes = rte_buff->data_len
- HASH_START_OFFSET;
/*
* Work around for padding, message length has to be a multiple of block
* Assumption: Ok ignore the passed digest pointer and place HMAC at end
* of packet.
*/
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+ opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
}
if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
{
- CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->pkt.data)
+ CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->data)
+ CRYPTO_OFFSET_TO_OPDATA);
uint32_t lcore_id;
bzero(opData, sizeof(CpaCySymDpOpData));
opData->dstBuffer = opData->srcBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->dstBufferLen = opData->srcBufferLen = rte_buff->pkt.data_len;
+ opData->dstBufferLen = opData->srcBufferLen = rte_buff->data_len;
opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
+ CRYPTO_OFFSET_TO_OPDATA;
opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h];
opData->ivLenInBytes = IV_LENGTH_8_BYTES;
opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+ opData->messageLenToCipherInBytes = rte_buff->data_len
- CRYPTO_START_OFFSET;
/*
}
if (NO_HASH != h) {
opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+ opData->messageLenToHashInBytes = rte_buff->data_len
- HASH_START_OFFSET;
/*
* Work around for padding, message length has to be a multiple of block
*/
opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
% HASH_BLOCK_DEFAULT_SIZE;
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+ opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
}
if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
}
}
- port = dst_ports[pkt->pkt.in_port];
+ port = dst_ports[pkt->in_port];
/* Transmit the packet */
nic_tx_send_packet(pkt, (uint8_t)port);
if (m == NULL)
continue;
- ret = read(tap_fd, m->pkt.data, MAX_PACKET_SZ);
+ ret = read(tap_fd, m->data, MAX_PACKET_SZ);
lcore_stats[lcore_id].rx++;
if (unlikely(ret < 0)) {
FATAL_ERROR("Reading from %s interface failed",
tap_name);
}
- m->pkt.nb_segs = 1;
- m->pkt.next = NULL;
- m->pkt.pkt_len = (uint16_t)ret;
- m->pkt.data_len = (uint16_t)ret;
+ m->nb_segs = 1;
+ m->next = NULL;
+ m->pkt_len = (uint16_t)ret;
+ m->data_len = (uint16_t)ret;
ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
if (unlikely(ret < 1)) {
rte_pktmbuf_free(m);
}
/* if we don't need to do any fragmentation */
- if (likely (IPV4_MTU_DEFAULT >= m->pkt.pkt_len)) {
+ if (likely (IPV4_MTU_DEFAULT >= m->pkt_len)) {
qconf->tx_mbufs[port_out].m_table[len] = m;
len2 = 1;
} else {
}
/* if we don't need to do any fragmentation */
- if (likely (IPV6_MTU_DEFAULT >= m->pkt.pkt_len)) {
+ if (likely (IPV6_MTU_DEFAULT >= m->pkt_len)) {
qconf->tx_mbufs[port_out].m_table[len] = m;
len2 = 1;
} else {
rte_panic("No headroom in mbuf.\n");
}
- m->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ m->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
/* 02:00:00:00:00:xx */
d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
/* Pop Ethernet header */
if (app.ether_hdr_pop_push) {
rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
- m->pkt.vlan_macip.f.l2_len = 0;
- m->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ m->vlan_macip.f.l2_len = 0;
+ m->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
}
}
ether_addr_copy(&pkt_meta->nh_arp, ðer_hdr->d_addr);
ether_addr_copy(&local_ether_addr, ðer_hdr->s_addr);
ether_hdr->ether_type = rte_bswap16(ETHER_TYPE_IPv4);
- pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
}
static int
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr);
+ m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
+ m->vlan_macip.f.l3_len = sizeof(*ip_hdr);
/* process this fragment. */
mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
+ m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
+ m->vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
if (mo == NULL)
}
/* prepend new header */
- hdr->pkt.next = pkt;
+ hdr->next = pkt;
/* update header's fields */
- hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len);
- hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
+ hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len);
+ hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
/* copy metadata from source packet*/
- hdr->pkt.in_port = pkt->pkt.in_port;
- hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
- hdr->pkt.hash = pkt->pkt.hash;
+ hdr->in_port = pkt->in_port;
+ hdr->vlan_macip = pkt->vlan_macip;
+ hdr->hash = pkt->hash;
hdr->ol_flags = pkt->ol_flags;
/* Should we use rte_pktmbuf_clone() or not. */
use_clone = (port_num <= MCAST_CLONE_PORTS &&
- m->pkt.nb_segs <= MCAST_CLONE_SEGS);
+ m->nb_segs <= MCAST_CLONE_SEGS);
/* Mark all packet's segments as referenced port_num times */
if (use_clone == 0)
unsigned char *) + sizeof(struct ether_hdr));
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt.pkt_len) >= 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt_len) >= 0) {
/* Update time to live and header checksum */
--(ipv4_hdr->time_to_live);
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
rte_pktmbuf_free(m);
return;
}
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
rte_pktmbuf_free(m);
return;
}
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
uint8_t valid_mask = MASK_ALL_PKTS;
- if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
rte_pktmbuf_free(m[0]);
valid_mask &= EXECLUDE_1ST_PKT;
}
- if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
rte_pktmbuf_free(m[1]);
valid_mask &= EXECLUDE_2ND_PKT;
}
- if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
rte_pktmbuf_free(m[2]);
valid_mask &= EXECLUDE_3RD_PKT;
}
- if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
rte_pktmbuf_free(m[3]);
valid_mask &= EXECLUDE_4TH_PKT;
}
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
rte_pktmbuf_free(m);
return;
}
ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
- port = pkt->pkt.in_port;
+ port = pkt->in_port;
}
pos = lp->mbuf_out[port].n_mbufs;
static void
handle_packet(struct rte_mbuf *buf)
{
- const uint8_t in_port = buf->pkt.in_port;
+ const uint8_t in_port = buf->in_port;
const uint8_t out_port = output_ports[in_port];
enqueue_packet(buf, out_port);
pause_frame->opcode = rte_cpu_to_be_16(0x0001);
pause_frame->param = rte_cpu_to_be_16(duration);
- mbuf->pkt.pkt_len = 60;
- mbuf->pkt.data_len = 60;
+ mbuf->pkt_len = 60;
+ mbuf->data_len = 60;
rte_eth_tx_burst(port_id, 0, &mbuf, 1);
}
/* Copy mbuf data to buffer */
rte_memcpy((void *)(uintptr_t)buff_addr,
- (const void *)buff->pkt.data,
+ (const void *)buff->data,
rte_pktmbuf_data_len(buff));
PRINT_PACKET(dev, (uintptr_t)buff_addr,
rte_pktmbuf_data_len(buff), 0);
* This current segment complete, need continue to
* check if the whole packet complete or not.
*/
- pkt = pkt->pkt.next;
+ pkt = pkt->next;
if (pkt != NULL) {
/*
* There are more segments.
uint32_t secure_len = 0;
uint16_t need_cnt;
uint32_t vec_idx = 0;
- uint32_t pkt_len = pkts[pkt_idx]->pkt.pkt_len + vq->vhost_hlen;
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
uint16_t i, id;
do {
int i, ret;
/* Learn MAC address of guest device from packet */
- pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ pkt_hdr = (struct ether_hdr *)m->data;
dev_ll = ll_root_used;
struct ether_hdr *pkt_hdr;
uint64_t ret = 0;
- pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ pkt_hdr = (struct ether_hdr *)m->data;
/*get the used devices list*/
dev_ll = ll_root_used;
unsigned len, ret, offset = 0;
const uint16_t lcore_id = rte_lcore_id();
struct virtio_net_data_ll *dev_ll = ll_root_used;
- struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
/*check if destination is local VM*/
if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(dev, m) == 0))
return;
}
- mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN + offset;
- mbuf->pkt.pkt_len = m->pkt.pkt_len + VLAN_HLEN + offset;
- mbuf->pkt.nb_segs = m->pkt.nb_segs;
+ mbuf->data_len = m->data_len + VLAN_HLEN + offset;
+ mbuf->pkt_len = m->pkt_len + VLAN_HLEN + offset;
+ mbuf->nb_segs = m->nb_segs;
/* Copy ethernet header to mbuf. */
- rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+ rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+ vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_TCI = htons(vlan_tag);
/* Copy the remaining packet contents to the mbuf. */
- rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
- (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));
+ rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+ (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
/* Copy the remaining segments for the whole packet. */
prev = mbuf;
- while (m->pkt.next) {
+ while (m->next) {
/* Allocate an mbuf and populate the structure. */
struct rte_mbuf *next_mbuf = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(next_mbuf == NULL)) {
return;
}
- m = m->pkt.next;
- prev->pkt.next = next_mbuf;
+ m = m->next;
+ prev->next = next_mbuf;
prev = next_mbuf;
- next_mbuf->pkt.data_len = m->pkt.data_len;
+ next_mbuf->data_len = m->data_len;
/* Copy data to next mbuf. */
rte_memcpy(rte_pktmbuf_mtod(next_mbuf, void *),
- rte_pktmbuf_mtod(m, const void *), m->pkt.data_len);
+ rte_pktmbuf_mtod(m, const void *), m->data_len);
}
tx_q->m_table[len] = mbuf;
vq->used->ring[used_idx].len = 0;
/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
- m.pkt.data_len = desc->len;
- m.pkt.pkt_len = desc->len;
- m.pkt.data = (void*)(uintptr_t)buff_addr;
+ m.data_len = desc->len;
+ m.pkt_len = desc->len;
+ m.data = (void*)(uintptr_t)buff_addr;
PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
* while the virtio buffer in TX vring has
* more data to be copied.
*/
- cur->pkt.data_len = seg_offset;
- m->pkt.pkt_len += seg_offset;
+ cur->data_len = seg_offset;
+ m->pkt_len += seg_offset;
/* Allocate mbuf and populate the structure. */
cur = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(cur == NULL)) {
}
seg_num++;
- prev->pkt.next = cur;
+ prev->next = cur;
prev = cur;
seg_offset = 0;
seg_avail = buf_size;
* room to accomodate more
* data.
*/
- cur->pkt.data_len = seg_offset;
- m->pkt.pkt_len += seg_offset;
+ cur->data_len = seg_offset;
+ m->pkt_len += seg_offset;
/*
* Allocate an mbuf and
* populate the structure.
break;
}
seg_num++;
- prev->pkt.next = cur;
+ prev->next = cur;
prev = cur;
seg_offset = 0;
seg_avail = buf_size;
desc->len, 0);
} else {
/* The whole packet completes. */
- cur->pkt.data_len = seg_offset;
- m->pkt.pkt_len += seg_offset;
+ cur->data_len = seg_offset;
+ m->pkt_len += seg_offset;
vb_avail = 0;
}
}
if (unlikely(alloc_err == 1))
break;
- m->pkt.nb_segs = seg_num;
+ m->nb_segs = seg_num;
/*
* If this is the first received packet we need to learn
}
mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
- mbuf->pkt.data = (void *)(uintptr_t)(buff_addr);
+ mbuf->data = (void *)(uintptr_t)(buff_addr);
mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
- mbuf->pkt.data_len = desc->len;
+ mbuf->data_len = desc->len;
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
LOG_DEBUG(VHOST_DATA,
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->pkt.data = (char *) m->buf_addr + buf_ofs;
+ m->data = (char *) m->buf_addr + buf_ofs;
- m->pkt.data_len = 0;
+ m->data_len = 0;
}
/*
unsigned len, ret, offset = 0;
struct vpool *vpool;
struct virtio_net_data_ll *dev_ll = ll_root_used;
- struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
/*Add packet to the port tx queue*/
}
}
- mbuf->pkt.nb_segs = m->pkt.nb_segs;
- mbuf->pkt.next = m->pkt.next;
- mbuf->pkt.data_len = m->pkt.data_len + offset;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ mbuf->nb_segs = m->nb_segs;
+ mbuf->next = m->next;
+ mbuf->data_len = m->data_len + offset;
+ mbuf->pkt_len = mbuf->data_len;
if (unlikely(need_copy)) {
/* Copy the packet contents to the mbuf. */
- rte_memcpy((void *)((uint8_t *)mbuf->pkt.data),
- (const void *) ((uint8_t *)m->pkt.data),
- m->pkt.data_len);
+ rte_memcpy((void *)((uint8_t *)mbuf->data),
+ (const void *) ((uint8_t *)m->data),
+ m->data_len);
} else {
- mbuf->pkt.data = m->pkt.data;
+ mbuf->data = m->data;
mbuf->buf_physaddr = m->buf_physaddr;
mbuf->buf_addr = m->buf_addr;
}
mbuf->ol_flags = PKT_TX_VLAN_PKT;
- mbuf->pkt.vlan_macip.f.vlan_tci = vlan_tag;
- mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mbuf->vlan_macip.f.vlan_tci = vlan_tag;
+ mbuf->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ mbuf->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
tx_q->m_table[len] = mbuf;
LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
dev->device_fh,
- mbuf->pkt.nb_segs,
- (mbuf->pkt.next == NULL) ? "null" : "non-null");
+ mbuf->nb_segs,
+ (mbuf->next == NULL) ? "null" : "non-null");
if (enable_stats) {
dev_statistics[dev->device_fh].tx_total++;
* Setup dummy mbuf. This is copied to a real mbuf if
* transmitted out the physical port.
*/
- m.pkt.data_len = desc->len;
- m.pkt.nb_segs = 1;
- m.pkt.next = NULL;
- m.pkt.data = (void *)(uintptr_t)buff_addr;
- m.buf_addr = m.pkt.data;
+ m.data_len = desc->len;
+ m.nb_segs = 1;
+ m.next = NULL;
+ m.data = (void *)(uintptr_t)buff_addr;
+ m.buf_addr = m.data;
m.buf_physaddr = phys_addr;
/*
vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
/* Copy mbuf data to buffer */
- rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->pkt.data, rte_pktmbuf_data_len(buff));
+ rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff));
res_cur_idx++;
packet_success++;
struct ether_hdr *pkt_hdr;
uint64_t ret = 0;
- pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ pkt_hdr = (struct ether_hdr *)m->data;
/*get the used devices list*/
dev_ll = ll_root_used;
if(!mbuf)
return;
- mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ mbuf->data_len = m->data_len + VLAN_HLEN;
+ mbuf->pkt_len = mbuf->data_len;
/* Copy ethernet header to mbuf. */
- rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+ rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+ vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_TCI = htons(vlan_tag);
/* Copy the remaining packet contents to the mbuf. */
- rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
- (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));
+ rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+ (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
tx_q->m_table[len] = mbuf;
len++;
if (enable_stats) {
rte_prefetch0((void*)(uintptr_t)buff_addr);
/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
- m.pkt.data_len = desc->len;
- m.pkt.data = (void*)(uintptr_t)buff_addr;
- m.pkt.nb_segs = 1;
+ m.data_len = desc->len;
+ m.data = (void*)(uintptr_t)buff_addr;
+ m.nb_segs = 1;
virtio_tx_route(dev, &m, mbuf_pool, 0);
next_mb = mbufs[next_idx++];
next_value = (((int64_t)(uintptr_t)next_mb)
<< RTE_DISTRIB_FLAG_BITS);
- new_tag = (next_mb->pkt.hash.rss | 1);
+ new_tag = (next_mb->hash.rss | 1);
uint32_t match = 0;
unsigned i;
struct rte_mbuf *ms;
/* adjust start of the last fragment data. */
- rte_pktmbuf_adj(mp, (uint16_t)(mp->pkt.vlan_macip.f.l2_len +
- mp->pkt.vlan_macip.f.l3_len));
+ rte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +
+ mp->vlan_macip.f.l3_len));
/* chain two fragments. */
ms = rte_pktmbuf_lastseg(mn);
- ms->pkt.next = mp;
+ ms->next = mp;
/* accumulate number of segments and total length. */
- mn->pkt.nb_segs = (uint8_t)(mn->pkt.nb_segs + mp->pkt.nb_segs);
- mn->pkt.pkt_len += mp->pkt.pkt_len;
+ mn->nb_segs = (uint8_t)(mn->nb_segs + mp->nb_segs);
+ mn->pkt_len += mp->pkt_len;
/* reset pkt_len and nb_segs for chained fragment. */
- mp->pkt.pkt_len = mp->pkt.data_len;
- mp->pkt.nb_segs = 1;
+ mp->pkt_len = mp->data_len;
+ mp->nb_segs = 1;
}
/* Fragment size should be a multiply of 8. */
IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
- in_hdr = (struct ipv4_hdr *) pkt_in->pkt.data;
+ in_hdr = (struct ipv4_hdr *) pkt_in->data;
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
/* If Don't Fragment flag is set */
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely(frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
return -EINVAL;
in_seg = pkt_in;
}
/* Reserve space for the IP header that will be built later */
- out_pkt->pkt.data_len = sizeof(struct ipv4_hdr);
- out_pkt->pkt.pkt_len = sizeof(struct ipv4_hdr);
+ out_pkt->data_len = sizeof(struct ipv4_hdr);
+ out_pkt->pkt_len = sizeof(struct ipv4_hdr);
out_seg_prev = out_pkt;
more_out_segs = 1;
__free_fragments(pkts_out, out_pkt_pos);
return -ENOMEM;
}
- out_seg_prev->pkt.next = out_seg;
+ out_seg_prev->next = out_seg;
out_seg_prev = out_seg;
/* Prepare indirect buffer */
rte_pktmbuf_attach(out_seg, in_seg);
- len = mtu_size - out_pkt->pkt.pkt_len;
- if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
- len = in_seg->pkt.data_len - in_seg_data_pos;
+ len = mtu_size - out_pkt->pkt_len;
+ if (len > (in_seg->data_len - in_seg_data_pos)) {
+ len = in_seg->data_len - in_seg_data_pos;
}
- out_seg->pkt.data = (char*) in_seg->pkt.data + (uint16_t)in_seg_data_pos;
- out_seg->pkt.data_len = (uint16_t)len;
- out_pkt->pkt.pkt_len = (uint16_t)(len +
- out_pkt->pkt.pkt_len);
- out_pkt->pkt.nb_segs += 1;
+ out_seg->data = (char*) in_seg->data + (uint16_t)in_seg_data_pos;
+ out_seg->data_len = (uint16_t)len;
+ out_pkt->pkt_len = (uint16_t)(len +
+ out_pkt->pkt_len);
+ out_pkt->nb_segs += 1;
in_seg_data_pos += len;
/* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt.pkt_len >= mtu_size))
+ if (unlikely(out_pkt->pkt_len >= mtu_size))
more_out_segs = 0;
/* Current input segment done ? */
- if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
- in_seg = in_seg->pkt.next;
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
in_seg_data_pos = 0;
if (unlikely(in_seg == NULL))
/* Build the IP header */
- out_hdr = (struct ipv4_hdr*) out_pkt->pkt.data;
+ out_hdr = (struct ipv4_hdr*) out_pkt->data;
__fill_ipv4hdr_frag(out_hdr, in_hdr,
- (uint16_t)out_pkt->pkt.pkt_len,
+ (uint16_t)out_pkt->pkt_len,
flag_offset, fragment_offset, more_in_segs);
fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt.pkt_len - sizeof(struct ipv4_hdr));
+ out_pkt->pkt_len - sizeof(struct ipv4_hdr));
out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
- out_pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ out_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
/* Write the fragment to the output list */
pkts_out[out_pkt_pos] = out_pkt;
/* update ipv4 header for the reassmebled packet */
ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
- m->pkt.vlan_macip.f.l2_len);
+ m->vlan_macip.f.l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
- m->pkt.vlan_macip.f.l3_len));
+ m->vlan_macip.f.l3_len));
ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
ip_hdr->hdr_checksum = 0;
ip_ofs *= IPV4_HDR_OFFSET_UNITS;
ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
- mb->pkt.vlan_macip.f.l3_len);
+ mb->vlan_macip.f.l3_len);
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p, tms: %" PRIu64
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv6_hdr))))
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
return (-EINVAL);
- in_hdr = (struct ipv6_hdr *) pkt_in->pkt.data;
+ in_hdr = (struct ipv6_hdr *) pkt_in->data;
in_seg = pkt_in;
in_seg_data_pos = sizeof(struct ipv6_hdr);
}
/* Reserve space for the IP header that will be built later */
- out_pkt->pkt.data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
- out_pkt->pkt.pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+ out_pkt->data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+ out_pkt->pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
out_seg_prev = out_pkt;
more_out_segs = 1;
__free_fragments(pkts_out, out_pkt_pos);
return (-ENOMEM);
}
- out_seg_prev->pkt.next = out_seg;
+ out_seg_prev->next = out_seg;
out_seg_prev = out_seg;
/* Prepare indirect buffer */
rte_pktmbuf_attach(out_seg, in_seg);
- len = mtu_size - out_pkt->pkt.pkt_len;
- if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
- len = in_seg->pkt.data_len - in_seg_data_pos;
+ len = mtu_size - out_pkt->pkt_len;
+ if (len > (in_seg->data_len - in_seg_data_pos)) {
+ len = in_seg->data_len - in_seg_data_pos;
}
- out_seg->pkt.data = (char *) in_seg->pkt.data + (uint16_t) in_seg_data_pos;
- out_seg->pkt.data_len = (uint16_t)len;
- out_pkt->pkt.pkt_len = (uint16_t)(len +
- out_pkt->pkt.pkt_len);
- out_pkt->pkt.nb_segs += 1;
+ out_seg->data = (char *) in_seg->data + (uint16_t) in_seg_data_pos;
+ out_seg->data_len = (uint16_t)len;
+ out_pkt->pkt_len = (uint16_t)(len +
+ out_pkt->pkt_len);
+ out_pkt->nb_segs += 1;
in_seg_data_pos += len;
/* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
+ if (unlikely(out_pkt->pkt_len >= mtu_size)) {
more_out_segs = 0;
}
/* Current input segment done ? */
- if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
- in_seg = in_seg->pkt.next;
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
in_seg_data_pos = 0;
if (unlikely(in_seg == NULL)) {
/* Build the IP header */
- out_hdr = (struct ipv6_hdr *) out_pkt->pkt.data;
+ out_hdr = (struct ipv6_hdr *) out_pkt->data;
__fill_ipv6hdr_frag(out_hdr, in_hdr,
- (uint16_t) out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr),
+ (uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
fragment_offset, more_in_segs);
fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr)
+ out_pkt->pkt_len - sizeof(struct ipv6_hdr)
- sizeof(struct ipv6_extension_fragment));
/* Write the fragment to the output list */
/* update ipv6 header for the reassembled datagram */
ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
- m->pkt.vlan_macip.f.l2_len);
+ m->vlan_macip.f.l2_len);
ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
* other headers, so we assume there are no other headers and thus update
* the main IPv6 header instead.
*/
- move_len = m->pkt.vlan_macip.f.l2_len + m->pkt.vlan_macip.f.l3_len -
+ move_len = m->vlan_macip.f.l2_len + m->vlan_macip.f.l3_len -
sizeof(*frag_hdr);
frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
ip_hdr->proto = frag_hdr->next_header;
m->buf_len = (uint16_t)buf_len;
/* keep some headroom between start of buffer and data */
- m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
+ m->data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
/* init some constant fields */
m->pool = mp;
- m->pkt.nb_segs = 1;
- m->pkt.in_port = 0xff;
+ m->nb_segs = 1;
+ m->in_port = 0xff;
}
/* do some sanity checks on a mbuf: panic if it fails */
if (is_header == 0)
return;
- nb_segs = m->pkt.nb_segs;
+ nb_segs = m->nb_segs;
m_seg = m;
while (m_seg && nb_segs != 0) {
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
nb_segs--;
}
if (nb_segs != 0)
fprintf(f, "dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx16", nb_segs=%u, "
- "in_port=%u\n", m->pkt.pkt_len, m->ol_flags,
- (unsigned)m->pkt.nb_segs, (unsigned)m->pkt.in_port);
- nb_segs = m->pkt.nb_segs;
+ "in_port=%u\n", m->pkt_len, m->ol_flags,
+ (unsigned)m->nb_segs, (unsigned)m->in_port);
+ nb_segs = m->nb_segs;
while (m && nb_segs != 0) {
__rte_mbuf_sanity_check(m, 0);
fprintf(f, " segment at 0x%p, data=0x%p, data_len=%u\n",
- m, m->pkt.data, (unsigned)m->pkt.data_len);
+ m, m->data, (unsigned)m->data_len);
len = dump_len;
- if (len > m->pkt.data_len)
- len = m->pkt.data_len;
+ if (len > m->data_len)
+ len = m->data_len;
if (len != 0)
- rte_hexdump(f, NULL, m->pkt.data, len);
+ rte_hexdump(f, NULL, m->data, len);
dump_len -= len;
- m = m->pkt.next;
+ m = m->next;
nb_segs --;
}
}
/**< MAC+IP length. */
#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
-/**
- * A packet message buffer.
- */
-struct rte_pktmbuf {
- /* valid for any segment */
- struct rte_mbuf *next; /**< Next segment of scattered packet. */
- void* data; /**< Start address of data in segment buffer. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
-
- /* these fields are valid for first segment only */
- uint8_t nb_segs; /**< Number of segments. */
- uint8_t in_port; /**< Input port. */
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
-
- /* offload features */
- union rte_vlan_macip vlan_macip;
- union {
- uint32_t rss; /**< RSS hash result if RSS enabled */
- struct {
- uint16_t hash;
- uint16_t id;
- } fdir; /**< Filter identifier if FDIR enabled */
- uint32_t sched; /**< Hierarchical scheduler */
- } hash; /**< hash information */
-};
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
uint16_t reserved; /**< Unused field. Required for padding */
uint16_t ol_flags; /**< Offload features. */
- struct rte_pktmbuf pkt;
+ /* valid for any segment */
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+ void* data; /**< Start address of data in segment buffer. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+
+ /* these fields are valid for first segment only */
+ uint8_t nb_segs; /**< Number of segments. */
+ uint8_t in_port; /**< Input port. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+
+ /* offload features, valid for first segment only */
+ union rte_vlan_macip vlan_macip;
+ union {
+ uint32_t rss; /**< RSS hash result if RSS enabled */
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ } fdir; /**< Filter identifier if FDIR enabled */
+ uint32_t sched; /**< Hierarchical scheduler */
+ } hash; /**< hash information */
union {
uint8_t metadata[0];
* @param m
* The control mbuf.
*/
-#define rte_ctrlmbuf_data(m) ((m)->pkt.data)
+#define rte_ctrlmbuf_data(m) ((m)->data)
/**
* A macro that returns the length of the carried data.
{
uint32_t buf_ofs;
- m->pkt.next = NULL;
- m->pkt.pkt_len = 0;
- m->pkt.vlan_macip.data = 0;
- m->pkt.nb_segs = 1;
- m->pkt.in_port = 0xff;
+ m->next = NULL;
+ m->pkt_len = 0;
+ m->vlan_macip.data = 0;
+ m->nb_segs = 1;
+ m->in_port = 0xff;
m->ol_flags = 0;
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->pkt.data = (char*) m->buf_addr + buf_ofs;
+ m->data = (char*) m->buf_addr + buf_ofs;
- m->pkt.data_len = 0;
+ m->data_len = 0;
__rte_mbuf_sanity_check(m, 1);
}
mi->buf_addr = md->buf_addr;
mi->buf_len = md->buf_len;
- mi->pkt = md->pkt;
+ mi->next = md->next;
+ mi->data = md->data;
+ mi->data_len = md->data_len;
+ mi->in_port = md->in_port;
+ mi->vlan_macip = md->vlan_macip;
+ mi->hash = md->hash;
- mi->pkt.next = NULL;
- mi->pkt.pkt_len = mi->pkt.data_len;
- mi->pkt.nb_segs = 1;
+ mi->next = NULL;
+ mi->pkt_len = mi->data_len;
+ mi->nb_segs = 1;
mi->ol_flags = md->ol_flags;
__rte_mbuf_sanity_check(mi, 1);
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->pkt.data = (char*) m->buf_addr + buf_ofs;
+ m->data = (char*) m->buf_addr + buf_ofs;
- m->pkt.data_len = 0;
+ m->data_len = 0;
}
#endif /* RTE_MBUF_REFCNT */
__rte_mbuf_sanity_check(m, 1);
while (m != NULL) {
- m_next = m->pkt.next;
+ m_next = m->next;
rte_pktmbuf_free_seg(m);
m = m_next;
}
return (NULL);
mi = mc;
- prev = &mi->pkt.next;
- pktlen = md->pkt.pkt_len;
+ prev = &mi->next;
+ pktlen = md->pkt_len;
nseg = 0;
do {
nseg++;
rte_pktmbuf_attach(mi, md);
*prev = mi;
- prev = &mi->pkt.next;
- } while ((md = md->pkt.next) != NULL &&
+ prev = &mi->next;
+ } while ((md = md->next) != NULL &&
(mi = rte_pktmbuf_alloc(mp)) != NULL);
*prev = NULL;
- mc->pkt.nb_segs = nseg;
- mc->pkt.pkt_len = pktlen;
+ mc->nb_segs = nseg;
+ mc->pkt_len = pktlen;
/* Allocation of new indirect segment failed */
if (unlikely (mi == NULL)) {
do {
rte_mbuf_refcnt_update(m, v);
- } while ((m = m->pkt.next) != NULL);
+ } while ((m = m->next) != NULL);
}
#endif /* RTE_MBUF_REFCNT */
static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 1);
- return (uint16_t) ((char*) m->pkt.data - (char*) m->buf_addr);
+ return (uint16_t) ((char*) m->data - (char*) m->buf_addr);
}
/**
{
__rte_mbuf_sanity_check(m, 1);
return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
- m->pkt.data_len);
+ m->data_len);
}
/**
struct rte_mbuf *m2 = (struct rte_mbuf *)m;
__rte_mbuf_sanity_check(m, 1);
- while (m2->pkt.next != NULL)
- m2 = m2->pkt.next;
+ while (m2->next != NULL)
+ m2 = m2->next;
return m2;
}
* @param t
* The type to cast the result into.
*/
-#define rte_pktmbuf_mtod(m, t) ((t)((m)->pkt.data))
+#define rte_pktmbuf_mtod(m, t) ((t)((m)->data))
/**
* A macro that returns the length of the packet.
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_pkt_len(m) ((m)->pkt.pkt_len)
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
/**
* A macro that returns the length of the segment.
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_data_len(m) ((m)->pkt.data_len)
+#define rte_pktmbuf_data_len(m) ((m)->data_len)
/**
* Prepend len bytes to an mbuf data area.
if (unlikely(len > rte_pktmbuf_headroom(m)))
return NULL;
- m->pkt.data = (char*) m->pkt.data - len;
- m->pkt.data_len = (uint16_t)(m->pkt.data_len + len);
- m->pkt.pkt_len = (m->pkt.pkt_len + len);
+ m->data = (char*) m->data - len;
+ m->data_len = (uint16_t)(m->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
- return (char*) m->pkt.data;
+ return (char*) m->data;
}
/**
if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
return NULL;
- tail = (char*) m_last->pkt.data + m_last->pkt.data_len;
- m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len + len);
- m->pkt.pkt_len = (m->pkt.pkt_len + len);
+ tail = (char*) m_last->data + m_last->data_len;
+ m_last->data_len = (uint16_t)(m_last->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
return (char*) tail;
}
{
__rte_mbuf_sanity_check(m, 1);
- if (unlikely(len > m->pkt.data_len))
+ if (unlikely(len > m->data_len))
return NULL;
- m->pkt.data_len = (uint16_t)(m->pkt.data_len - len);
- m->pkt.data = ((char*) m->pkt.data + len);
- m->pkt.pkt_len = (m->pkt.pkt_len - len);
- return (char*) m->pkt.data;
+ m->data_len = (uint16_t)(m->data_len - len);
+ m->data = ((char*) m->data + len);
+ m->pkt_len = (m->pkt_len - len);
+ return (char*) m->data;
}
/**
__rte_mbuf_sanity_check(m, 1);
m_last = rte_pktmbuf_lastseg(m);
- if (unlikely(len > m_last->pkt.data_len))
+ if (unlikely(len > m_last->data_len))
return -1;
- m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len - len);
- m->pkt.pkt_len = (m->pkt.pkt_len - len);
+ m_last->data_len = (uint16_t)(m_last->data_len - len);
+ m->pkt_len = (m->pkt_len - len);
return 0;
}
static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 1);
- return !!(m->pkt.nb_segs == 1);
+ return !!(m->nb_segs == 1);
}
/**
switch (policy) {
case BALANCE_XMIT_POLICY_LAYER2:
- eth_hdr = (struct ether_hdr *)buf->pkt.data;
+ eth_hdr = (struct ether_hdr *)buf->data;
hash = ether_hash(eth_hdr);
hash ^= hash >> 8;
return hash % slave_count;
case BALANCE_XMIT_POLICY_LAYER23:
- eth_hdr = (struct ether_hdr *)buf->pkt.data;
+ eth_hdr = (struct ether_hdr *)buf->data;
if (buf->ol_flags & PKT_RX_VLAN_PKT)
eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
- (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr))
+ (uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK));
if (tx_ol_req) {
- hdrlen = tx_pkt->pkt.vlan_macip;
+ hdrlen = tx_pkt->vlan_macip;
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
- nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
/*
* The number of descriptors that must be allocated for a
" tx_first=%u tx_last=%u\n",
(unsigned) txq->port_id,
(unsigned) txq->queue_id,
- (unsigned) tx_pkt->pkt.pkt_len,
+ (unsigned) tx_pkt->pkt_len,
(unsigned) tx_id,
(unsigned) tx_last);
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
- popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci <<
+ popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
E1000_TXD_VLAN_SHIFT;
}
/*
* Set up Transmit Data Descriptor.
*/
- slen = m_seg->pkt.data_len;
+ slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/*
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
rxq->crc_len);
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = pkt_len;
- rxm->pkt.data_len = pkt_len;
- rxm->pkt.in_port = rxq->port_id;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->in_port = rxq->port_id;
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
rxm->ol_flags = (uint16_t)(rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
* Store the mbuf address into the next entry of the array
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.length);
- rxm->pkt.data_len = data_len;
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = data_len;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
*/
if (first_seg == NULL) {
first_seg = rxm;
- first_seg->pkt.pkt_len = data_len;
- first_seg->pkt.nb_segs = 1;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
} else {
- first_seg->pkt.pkt_len += data_len;
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/*
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len = (uint16_t)
- (last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
(ETHER_CRC_LEN - data_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len =
+ rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
* - IP checksum flag,
* - error flags.
*/
- first_seg->pkt.in_port = rxq->port_id;
+ first_seg->in_port = rxq->port_id;
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->pkt.data);
+ rte_packet_prefetch(first_seg->data);
/*
* Store the mbuf address into the next entry of the array
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
- (uint64_t) ((char *)((mb)->pkt.data) - \
+ (uint64_t) ((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
tx_pkt = *tx_pkts++;
- pkt_len = tx_pkt->pkt.pkt_len;
+ pkt_len = tx_pkt->pkt_len;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
* for the packet, starting from the current position (tx_id)
* in the ring.
*/
- tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
+ tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+ vlan_macip_lens = tx_pkt->vlan_macip.data;
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
/* If a Context Descriptor need be built . */
/*
* Set up transmit descriptor.
*/
- slen = (uint16_t) m_seg->pkt.data_len;
+ slen = (uint16_t) m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/*
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
rxq->crc_len);
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = pkt_len;
- rxm->pkt.data_len = pkt_len;
- rxm->pkt.in_port = rxq->port_id;
-
- rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->in_port = rxq->port_id;
+
+ rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci =
+ rxm->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
- rxm->pkt.data_len = data_len;
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = data_len;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
*/
if (first_seg == NULL) {
first_seg = rxm;
- first_seg->pkt.pkt_len = data_len;
- first_seg->pkt.nb_segs = 1;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
} else {
- first_seg->pkt.pkt_len += data_len;
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/*
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len = (uint16_t)
- (last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
(ETHER_CRC_LEN - data_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len =
+ rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
* - VLAN TCI, if any,
* - error flags.
*/
- first_seg->pkt.in_port = rxq->port_id;
- first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->in_port = rxq->port_id;
+ first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_macip.f.vlan_tci =
+ first_seg->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
first_seg->ol_flags = pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->pkt.data);
+ rte_packet_prefetch(first_seg->data);
/*
* Store the mbuf address into the next entry of the array
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
((uint64_t)((mb)->buf_physaddr + \
- (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr)))
static const struct rte_memzone *
I40E_RXD_QW1_STATUS_SHIFT;
pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
- mb->pkt.data_len = pkt_len;
- mb->pkt.pkt_len = pkt_len;
- mb->pkt.vlan_macip.f.vlan_tci = rx_status &
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_macip.f.vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(\
rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
mb->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- mb->pkt.hash.rss = rte_le_to_cpu_32(\
+ mb->hash.rss = rte_le_to_cpu_32(\
rxdp->wb.qword0.hi_dword.rss);
}
for (i = 0; i < rxq->rx_free_thresh; i++) {
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
- mb->pkt.next = NULL;
- mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.in_port = rxq->port_id;
+ mb->next = NULL;
+ mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->in_port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
rxdp[i].read.hdr_addr = dma_addr;
rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_prefetch0(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = rx_packet_len;
- rxm->pkt.data_len = rx_packet_len;
- rxm->pkt.in_port = rxq->port_id;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->in_port = rxq->port_id;
- rxm->pkt.vlan_macip.f.vlan_tci = rx_status &
+ rxm->vlan_macip.f.vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
rxm->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->pkt.hash.rss =
+ rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
rx_pkts[nb_rx++] = rxm;
rxdp->read.pkt_addr = dma_addr;
rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rxm->pkt.data_len = rx_packet_len;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = rx_packet_len;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/**
* If this is the first buffer of the received packet, set the
*/
if (!first_seg) {
first_seg = rxm;
- first_seg->pkt.nb_segs = 1;
- first_seg->pkt.pkt_len = rx_packet_len;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
} else {
- first_seg->pkt.pkt_len =
- (uint16_t)(first_seg->pkt.pkt_len +
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
rx_packet_len);
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/**
* the length of that CRC part from the data length of the
* previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (rx_packet_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len =
- (uint16_t)(last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
(ETHER_CRC_LEN - rx_packet_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len = (uint16_t)(rx_packet_len -
+ rxm->data_len = (uint16_t)(rx_packet_len -
ETHER_CRC_LEN);
}
- first_seg->pkt.in_port = rxq->port_id;
- first_seg->pkt.vlan_macip.f.vlan_tci = (rx_status &
+ first_seg->in_port = rxq->port_id;
+ first_seg->vlan_macip.f.vlan_tci = (rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
first_seg->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->pkt.hash.rss =
+ rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
/* Prefetch data of first segment, if configured to do so. */
- rte_prefetch0(first_seg->pkt.data);
+ rte_prefetch0(first_seg->data);
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
}
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->pkt.vlan_macip.f.l2_len;
- l3_len = tx_pkt->pkt.vlan_macip.f.l3_len;
+ l2_len = tx_pkt->vlan_macip.f.l2_len;
+ l3_len = tx_pkt->vlan_macip.f.l3_len;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
* a packet equals to the number of the segments of that
* packet plus 1 context descriptor if needed.
*/
- nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + nb_ctx);
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);
/* Circular ring */
/* Descriptor based VLAN insertion */
if (ol_flags & PKT_TX_VLAN_PKT) {
- tx_flags |= tx_pkt->pkt.vlan_macip.f.vlan_tci <<
+ tx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<
I40E_TX_FLAG_L2TAG1_SHIFT;
tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
txe->mbuf = m_seg;
/* Setup TX Descriptor */
- slen = m_seg->pkt.data_len;
+ slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/* The last packet data descriptor needs End Of Packet (EOP) */
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
- (*pkts)->pkt.data_len, 0);
+ (*pkts)->data_len, 0);
}
}
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
- (*pkts)->pkt.data_len, 0);
+ (*pkts)->data_len, 0);
}
/* Fill hardware descriptor ring with mbuf data */
}
rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->pkt.next = NULL;
- mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
- mbuf->pkt.nb_segs = 1;
- mbuf->pkt.in_port = rxq->port_id;
+ mbuf->next = NULL;
+ mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->in_port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
- pkt_len = (*pkts)->pkt.data_len;
+ pkt_len = (*pkts)->data_len;
/* write data to descriptor */
txdp->read.buffer_addr = buf_dma_addr;
uint32_t pkt_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
- pkt_len = (*pkts)->pkt.data_len;
+ pkt_len = (*pkts)->data_len;
/* write data to descriptor */
txdp->read.buffer_addr = buf_dma_addr;
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
new_ctx = 0;
tx_pkt = *tx_pkts++;
- pkt_len = tx_pkt->pkt.pkt_len;
+ pkt_len = tx_pkt->pkt_len;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+ vlan_macip_lens = tx_pkt->vlan_macip.data;
/* If hardware offload required */
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
- nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
/*
* The number of descriptors that must be allocated for a
/*
* Set up Transmit Data Descriptor.
*/
- slen = m_seg->pkt.data_len;
+ slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/*
mb = rxep[j].mbuf;
pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
rxq->crc_len);
- mb->pkt.data_len = pkt_len;
- mb->pkt.pkt_len = pkt_len;
- mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
- mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
+ mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
/* convert descriptor fields to rte mbuf flags */
mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
- mb->pkt.next = NULL;
- mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.in_port = rxq->port_id;
+ mb->next = NULL;
+ mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->in_port = rxq->port_id;
/* populate the descriptors */
dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
rxq->crc_len);
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = pkt_len;
- rxm->pkt.data_len = pkt_len;
- rxm->pkt.in_port = rxq->port_id;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->in_port = rxq->port_id;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci =
+ rxm->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
rxm->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->pkt.hash.fdir.hash =
+ rxm->hash.fdir.hash =
(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK);
- rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
}
/*
* Store the mbuf address into the next entry of the array
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
- rxm->pkt.data_len = data_len;
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = data_len;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
*/
if (first_seg == NULL) {
first_seg = rxm;
- first_seg->pkt.pkt_len = data_len;
- first_seg->pkt.nb_segs = 1;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
} else {
- first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len
+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
+ data_len);
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/*
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len = (uint16_t)
- (last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
(ETHER_CRC_LEN - data_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len =
+ rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
* - VLAN TCI, if any,
* - error flags.
*/
- first_seg->pkt.in_port = rxq->port_id;
+ first_seg->in_port = rxq->port_id;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_macip.f.vlan_tci =
+ first_seg->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
first_seg->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
else if (pkt_flags & PKT_RX_FDIR) {
- first_seg->pkt.hash.fdir.hash =
+ first_seg->hash.fdir.hash =
(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK);
- first_seg->pkt.hash.fdir.id =
+ first_seg->hash.fdir.id =
rxd.wb.lower.hi_dword.csum_ip.ip_id;
}
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->pkt.data);
+ rte_packet_prefetch(first_seg->data);
/*
* Store the mbuf address into the next entry of the array
}
rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->pkt.next = NULL;
- mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
- mbuf->pkt.nb_segs = 1;
- mbuf->pkt.in_port = rxq->port_id;
+ mbuf->next = NULL;
+ mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->in_port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
#endif
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
{
static const struct rte_mbuf mb_def = {
- .pkt = {
- .nb_segs = 1,
- },
+ .nb_segs = 1,
};
int i;
uint16_t rx_id;
rxdp = rxq->rx_ring + rxq->rxrearm_start;
- def_low = _mm_load_si128((__m128i *)&(mb_def.pkt));
+ def_low = _mm_load_si128((__m128i *)&(mb_def.next));
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
/* flush mbuf with pkt template */
- _mm_store_si128((__m128i *)&mb0->pkt, vaddr0);
- _mm_store_si128((__m128i *)&mb1->pkt, vaddr1);
+ _mm_store_si128((__m128i *)&mb0->next, vaddr0);
+ _mm_store_si128((__m128i *)&mb1->next, vaddr1);
/* update refcnt per pkt */
rte_mbuf_refcnt_set(mb0, 1);
staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
/* D.3 copy final 3,4 data to rx_pkts */
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->data_len),
pkt_mb4);
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->data_len),
pkt_mb3);
/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
staterr = _mm_packs_epi32(staterr, zero);
/* D.3 copy final 1,2 data to rx_pkts */
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->data_len),
pkt_mb2);
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos]->data_len),
pkt_mb1);
/* C.4 calc avaialbe number of desc */
/* load buf_addr/buf_physaddr in t0 */
t0 = _mm_loadu_si128((__m128i *)&(pkt->buf_addr));
/* load data, ... pkt_len in t1 */
- t1 = _mm_loadu_si128((__m128i *)&(pkt->pkt.data));
+ t1 = _mm_loadu_si128((__m128i *)&(pkt->data));
/* calc offset = (data - buf_adr) */
offset = _mm_sub_epi64(t1, t0);
if (header.len <= buf_size) {
/* pcap packet will fit in the mbuf, go ahead and copy */
- rte_memcpy(mbuf->pkt.data, packet, header.len);
- mbuf->pkt.data_len = (uint16_t)header.len;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ rte_memcpy(mbuf->data, packet, header.len);
+ mbuf->data_len = (uint16_t)header.len;
+ mbuf->pkt_len = mbuf->data_len;
bufs[num_rx] = mbuf;
num_rx++;
} else {
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
calculate_timestamp(&header.ts);
- header.len = mbuf->pkt.data_len;
+ header.len = mbuf->data_len;
header.caplen = header.len;
- pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->pkt.data);
+ pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->data);
rte_pktmbuf_free(mbuf);
num_tx++;
}
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
- ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->pkt.data,
- mbuf->pkt.data_len);
+ ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->data,
+ mbuf->data_len);
if (unlikely(ret != 0))
break;
num_tx++;
}
rte_prefetch0(cookie);
- rte_packet_prefetch(cookie->pkt.data);
+ rte_packet_prefetch(cookie->data);
rx_pkts[i] = cookie;
vq->vq_used_cons_idx++;
vq_ring_free_chain(vq, desc_idx);
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
- uint16_t seg_num = cookie->pkt.nb_segs;
+ uint16_t seg_num = cookie->nb_segs;
uint16_t needed = 1 + seg_num;
uint16_t head_idx, idx;
uint16_t head_size = txvq->hw->vtnet_hdr_size;
for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
idx = start_dp[idx].next;
start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
- start_dp[idx].len = cookie->pkt.data_len;
+ start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = VRING_DESC_F_NEXT;
- cookie = cookie->pkt.next;
+ cookie = cookie->next;
}
start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
continue;
}
- rxm->pkt.in_port = rxvq->port_id;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->in_port = rxvq->port_id;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = (uint32_t)(len[i] - hdr_size);
- rxm->pkt.data_len = (uint16_t)(len[i] - hdr_size);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
- VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
+ VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
rx_pkts[nb_rx++] = rxm;
- rxvq->bytes += rx_pkts[nb_rx - 1]->pkt.pkt_len;
+ rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
}
rxvq->packets += nb_rx;
if (seg_num == 0)
seg_num = 1;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rxm->pkt.nb_segs = seg_num;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = (uint32_t)(len[0] - hdr_size);
- rxm->pkt.data_len = (uint16_t)(len[0] - hdr_size);
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
+ rxm->data_len = (uint16_t)(len[0] - hdr_size);
- rxm->pkt.in_port = rxvq->port_id;
+ rxm->in_port = rxvq->port_id;
rx_pkts[nb_rx] = rxm;
prev = rxm;
while (extra_idx < rcv_cnt) {
rxm = rcv_pkts[extra_idx];
- rxm->pkt.data =
+ rxm->data =
(char *)rxm->buf_addr +
RTE_PKTMBUF_HEADROOM - hdr_size;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = (uint32_t)(len[extra_idx]);
- rxm->pkt.data_len = (uint16_t)(len[extra_idx]);
+ rxm->next = NULL;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
if (prev)
- prev->pkt.next = rxm;
+ prev->next = rxm;
prev = rxm;
- rx_pkts[nb_rx]->pkt.pkt_len += rxm->pkt.pkt_len;
+ rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
extra_idx++;
};
seg_res -= rcv_cnt;
}
VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
- rx_pkts[nb_rx]->pkt.data_len);
+ rx_pkts[nb_rx]->data_len);
- rxvq->bytes += rx_pkts[nb_rx]->pkt.pkt_len;
+ rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
nb_rx++;
}
num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
while (nb_tx < nb_pkts) {
- int need = tx_pkts[nb_tx]->pkt.nb_segs - txvq->vq_free_cnt;
+ int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt;
int deq_cnt = RTE_MIN(need, (int)num);
num -= (deq_cnt > 0) ? deq_cnt : 0;
deq_cnt--;
}
- if (tx_pkts[nb_tx]->pkt.nb_segs <= txvq->vq_free_cnt) {
+ if (tx_pkts[nb_tx]->nb_segs <= txvq->vq_free_cnt) {
txm = tx_pkts[nb_tx];
/* Enqueue Packet buffers */
error = virtqueue_enqueue_xmit(txvq, txm);
break;
}
nb_tx++;
- txvq->bytes += txm->pkt.pkt_len;
+ txvq->bytes += txm->pkt_len;
} else {
PMD_TX_LOG(ERR, "No free tx descriptors to transmit");
break;
#define VIRTQUEUE_MAX_NAME_SZ 32
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
txm = tx_pkts[nb_tx];
/* Don't support scatter packets yet, free them if met */
- if (txm->pkt.nb_segs != 1) {
+ if (txm->nb_segs != 1) {
PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
rte_pktmbuf_free(tx_pkts[nb_tx]);
txq->stats.drop_total++;
}
/* Needs to minus ether header len */
- if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+ if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
rte_pktmbuf_free(tx_pkts[nb_tx]);
txq->stats.drop_total++;
tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
txd->addr = tbi->bufPA;
- txd->len = txm->pkt.data_len;
+ txd->len = txm->data_len;
/* Mark the last descriptor as End of Packet. */
txd->cq = 1;
rte_pktmbuf_mtod(rxm, void *));
#endif
/* Copy vlan tag in packet buffer */
- rxm->pkt.vlan_macip.f.vlan_tci =
+ rxm->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16((uint16_t)rcd->tci);
} else
rxm->ol_flags = 0;
/* Initialize newly received packet buffer */
- rxm->pkt.in_port = rxq->port_id;
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = (uint16_t)rcd->len;
- rxm->pkt.data_len = (uint16_t)rcd->len;
- rxm->pkt.in_port = rxq->port_id;
- rxm->pkt.vlan_macip.f.vlan_tci = 0;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->in_port = rxq->port_id;
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint16_t)rcd->len;
+ rxm->data_len = (uint16_t)rcd->len;
+ rxm->in_port = rxq->port_id;
+ rxm->vlan_macip.f.vlan_tci = 0;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
rx_pkts[nb_rx++] = rxm;
for (i = 0; i < num ; i ++) {
rxm = rx_pkts[i];
PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
- rxm->pkt.next = NULL;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rxm->pkt.data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
- rxm->pkt.nb_segs = 1;
- rxm->pkt.in_port = pi->port_id;
- rxm->pkt.pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
+ rxm->next = NULL;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
+ rxm->nb_segs = 1;
+ rxm->in_port = pi->port_id;
+ rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
}
/* allocate new mbuf for the used descriptor */
while (likely(!virtqueue_full(rxvq))) {
* rather than gpa<->hva in virito spec.
*/
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->pkt.data))
+ ((uint64_t)((mb)->data))
enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
start_dp[idx].addr = (uintptr_t)NULL;
idx = start_dp[idx].next;
start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
- start_dp[idx].len = cookie->pkt.data_len;
+ start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = 0;
idx = start_dp[idx].next;
txvq->vq_desc_head_idx = idx;
p->n_pkts--;
/* If not jumbo, pass current packet to output */
- if (pkt->pkt.pkt_len <= IPV4_MTU_DEFAULT) {
+ if (pkt->pkt_len <= IPV4_MTU_DEFAULT) {
pkts[n_pkts_out++] = pkt;
n_pkts_to_provide = n_pkts - n_pkts_out;
{
struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
uint32_t tc_index = (qindex >> 2) & 0x3;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
s->stats.n_pkts_tc[tc_index] += 1;
s->stats.n_bytes_tc[tc_index] += pkt_len;
{
struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
uint32_t tc_index = (qindex >> 2) & 0x3;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
s->stats.n_pkts_tc_dropped[tc_index] += 1;
s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
{
struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
qe->stats.n_pkts += 1;
qe->stats.n_bytes += pkt_len;
rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
{
struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
qe->stats.n_pkts_dropped += 1;
qe->stats.n_bytes_dropped += pkt_len;
struct rte_sched_pipe *pipe = grinder->pipe;
struct rte_mbuf *pkt = grinder->pkt;
uint32_t tc_index = grinder->tc_index;
- uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
uint32_t subport_tb_credits = subport->tb_credits;
uint32_t subport_tc_credits = subport->tc_credits[tc_index];
uint32_t pipe_tb_credits = pipe->tb_credits;
struct rte_sched_pipe *pipe = grinder->pipe;
struct rte_mbuf *pkt = grinder->pkt;
uint32_t tc_index = grinder->tc_index;
- uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
uint32_t subport_tb_credits = subport->tb_credits;
uint32_t subport_tc_credits = subport->tc_credits[tc_index];
uint32_t pipe_tb_credits = pipe->tb_credits;
struct rte_sched_grinder *grinder = port->grinder + pos;
struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
struct rte_mbuf *pkt = grinder->pkt;
- uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
#if RTE_SCHED_TS_CREDITS_CHECK
if (!grinder_credits_check(port, pos)) {
2. Start of Frame Delimiter (SFD): 1 byte;
3. Frame Check Sequence (FCS): 4 bytes;
4. Inter Frame Gap (IFG): 12 bytes.
-The FCS is considered overhead only if not included in the packet length (field pkt.pkt_len
+The FCS is considered overhead only if not included in the packet length (field pkt_len
of struct rte_mbuf). */
#ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
};
/** Path through the scheduler hierarchy used by the scheduler enqueue operation to
-identify the destination queue for the current packet. Stored in the field pkt.hash.sched
+identify the destination queue for the current packet. Stored in the field hash.sched
of struct rte_mbuf of each packet, typically written by the classification stage and read by
scheduler enqueue.*/
struct rte_sched_port_hierarchy {
rte_sched_port_pkt_write(struct rte_mbuf *pkt,
uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
sched->color = (uint32_t) color;
sched->subport = subport;
static inline void
rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
*subport = sched->subport;
*pipe = sched->pipe;
static inline enum rte_meter_color
rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
return (enum rte_meter_color) sched->color;
}