copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
if (offset + len <= pkt->data_len) {
- rte_memcpy(rte_pktmbuf_mtod(pkt, char *) + offset,
- buf, (size_t) len);
+ rte_memcpy(rte_pktmbuf_mtod(pkt, char *) + offset, buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
vhdr->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
vhdr->vlan_tci = van_id;
} else {
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
}
}
*/
#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-#define TXONLY_DEF_PACKET_LEN 64
-#define TXONLY_DEF_PACKET_LEN_128 128
-
-uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN;
-uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
- TXONLY_DEF_PACKET_LEN_128,
-};
-
-uint8_t tx_pkt_nb_segs = 1;
int
generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
- uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst)
+ uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst,
+ uint8_t pkt_len, uint8_t nb_pkt_segs)
{
int i, nb_pkt = 0;
size_t eth_hdr_size;
break;
}
- pkt->data_len = tx_pkt_seg_lengths[0];
+ pkt->data_len = pkt_len;
pkt_seg = pkt;
- for (i = 1; i < tx_pkt_nb_segs; i++) {
+ for (i = 1; i < nb_pkt_segs; i++) {
pkt_seg->next = rte_pktmbuf_alloc(mp);
if (pkt_seg->next == NULL) {
pkt->nb_segs = i;
goto nomore_mbuf;
}
pkt_seg = pkt_seg->next;
- pkt_seg->data_len = tx_pkt_seg_lengths[i];
+ pkt_seg->data_len = pkt_len;
}
pkt_seg->next = NULL; /* Last segment of packet. */
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
*/
- pkt->nb_segs = tx_pkt_nb_segs;
- pkt->pkt_len = tx_pkt_length;
+ pkt->nb_segs = nb_pkt_segs;
+ pkt->pkt_len = pkt_len;
pkt->l2_len = eth_hdr_size;
if (ipv4) {