seg = seg->next;
}
copy_len = seg->data_len - offset;
- seg_buf = ((char *) seg->data + offset);
+ seg_buf = rte_pktmbuf_mtod(seg, char *) + offset;
while (len > copy_len) {
rte_memcpy(seg_buf, buf, (size_t) copy_len);
len -= copy_len;
buf = ((char *) buf + copy_len);
seg = seg->next;
- seg_buf = seg->data;
+ seg_buf = rte_pktmbuf_mtod(seg, void *);
}
rte_memcpy(seg_buf, buf, (size_t) len);
}
copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
if (offset + len <= pkt->data_len) {
- rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
+ rte_memcpy(rte_pktmbuf_mtod(pkt, char *) + offset, buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
void
initialize_eth_header(struct ether_hdr *eth_hdr, struct ether_addr *src_mac,
- struct ether_addr *dst_mac, uint8_t vlan_enabled, uint16_t van_id)
+ struct ether_addr *dst_mac, uint8_t ipv4, uint8_t vlan_enabled,
+ uint16_t van_id)
{
ether_addr_copy(dst_mac, ð_hdr->d_addr);
ether_addr_copy(src_mac, ð_hdr->s_addr);
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
- vhdr->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ if (ipv4)
+ vhdr->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vhdr->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+
vhdr->vlan_tci = van_id;
} else {
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ if (ipv4)
+ eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
}
}
*/
#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-#define TXONLY_DEF_PACKET_LEN 64
-#define TXONLY_DEF_PACKET_LEN_128 128
-
-uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN;
-uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
- TXONLY_DEF_PACKET_LEN_128,
-};
-
-uint8_t tx_pkt_nb_segs = 1;
int
generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
- uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst)
+ uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst,
+ uint8_t pkt_len, uint8_t nb_pkt_segs)
{
int i, nb_pkt = 0;
size_t eth_hdr_size;
break;
}
- pkt->data_len = tx_pkt_seg_lengths[0];
+ pkt->data_len = pkt_len;
pkt_seg = pkt;
- for (i = 1; i < tx_pkt_nb_segs; i++) {
+ for (i = 1; i < nb_pkt_segs; i++) {
pkt_seg->next = rte_pktmbuf_alloc(mp);
if (pkt_seg->next == NULL) {
pkt->nb_segs = i;
goto nomore_mbuf;
}
pkt_seg = pkt_seg->next;
- pkt_seg->data_len = tx_pkt_seg_lengths[i];
+ pkt_seg->data_len = pkt_len;
}
pkt_seg->next = NULL; /* Last segment of packet. */
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
*/
- pkt->nb_segs = tx_pkt_nb_segs;
- pkt->pkt_len = tx_pkt_length;
+ pkt->nb_segs = nb_pkt_segs;
+ pkt->pkt_len = pkt_len;
pkt->l2_len = eth_hdr_size;
if (ipv4) {
pkt->vlan_tci = ETHER_TYPE_IPv4;
pkt->l3_len = sizeof(struct ipv4_hdr);
-
- if (vlan_enabled)
- pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
- else
- pkt->ol_flags = PKT_RX_IPV4_HDR;
} else {
pkt->vlan_tci = ETHER_TYPE_IPv6;
pkt->l3_len = sizeof(struct ipv6_hdr);
-
- if (vlan_enabled)
- pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
- else
- pkt->ol_flags = PKT_RX_IPV6_HDR;
}
pkts_burst[nb_pkt] = pkt;