1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #include <rte_byteorder.h>
9 #include "packet_burst_generator.h"
11 #define UDP_SRC_PORT 1024
12 #define UDP_DST_PORT 1024
15 #define IP_DEFTTL 64 /* from RFC 1340. */
18 copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
26 while (offset >= seg->data_len) {
27 offset -= seg->data_len;
30 copy_len = seg->data_len - offset;
31 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
32 while (len > copy_len) {
33 rte_memcpy(seg_buf, buf, (size_t) copy_len);
35 buf = ((char *) buf + copy_len);
37 seg_buf = rte_pktmbuf_mtod(seg, void *);
39 rte_memcpy(seg_buf, buf, (size_t) len);
43 copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
45 if (offset + len <= pkt->data_len) {
46 rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), buf,
50 copy_buf_to_pkt_segs(buf, len, pkt, offset);
54 initialize_eth_header(struct rte_ether_hdr *eth_hdr,
55 struct rte_ether_addr *src_mac,
56 struct rte_ether_addr *dst_mac, uint16_t ether_type,
57 uint8_t vlan_enabled, uint16_t van_id)
59 rte_ether_addr_copy(dst_mac, ð_hdr->dst_addr);
60 rte_ether_addr_copy(src_mac, ð_hdr->src_addr);
63 struct rte_vlan_hdr *vhdr = (struct rte_vlan_hdr *)(
64 (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr));
66 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
68 vhdr->eth_proto = rte_cpu_to_be_16(ether_type);
69 vhdr->vlan_tci = van_id;
71 eth_hdr->ether_type = rte_cpu_to_be_16(ether_type);
76 initialize_arp_header(struct rte_arp_hdr *arp_hdr,
77 struct rte_ether_addr *src_mac,
78 struct rte_ether_addr *dst_mac,
79 uint32_t src_ip, uint32_t dst_ip,
82 arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
83 arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
84 arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
85 arp_hdr->arp_plen = sizeof(uint32_t);
86 arp_hdr->arp_opcode = rte_cpu_to_be_16(opcode);
87 rte_ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);
88 arp_hdr->arp_data.arp_sip = src_ip;
89 rte_ether_addr_copy(dst_mac, &arp_hdr->arp_data.arp_tha);
90 arp_hdr->arp_data.arp_tip = dst_ip;
94 initialize_udp_header(struct rte_udp_hdr *udp_hdr, uint16_t src_port,
95 uint16_t dst_port, uint16_t pkt_data_len)
99 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
101 udp_hdr->src_port = rte_cpu_to_be_16(src_port);
102 udp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
103 udp_hdr->dgram_len = rte_cpu_to_be_16(pkt_len);
104 udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
110 initialize_tcp_header(struct rte_tcp_hdr *tcp_hdr, uint16_t src_port,
111 uint16_t dst_port, uint16_t pkt_data_len)
115 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_tcp_hdr));
117 memset(tcp_hdr, 0, sizeof(struct rte_tcp_hdr));
118 tcp_hdr->src_port = rte_cpu_to_be_16(src_port);
119 tcp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
120 tcp_hdr->data_off = (sizeof(struct rte_tcp_hdr) << 2) & 0xF0;
126 initialize_sctp_header(struct rte_sctp_hdr *sctp_hdr, uint16_t src_port,
127 uint16_t dst_port, uint16_t pkt_data_len)
131 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
133 sctp_hdr->src_port = rte_cpu_to_be_16(src_port);
134 sctp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
136 sctp_hdr->cksum = 0; /* No SCTP checksum. */
142 initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
143 uint8_t *dst_addr, uint16_t pkt_data_len)
145 ip_hdr->vtc_flow = rte_cpu_to_be_32(0x60000000); /* Set version to 6. */
146 ip_hdr->payload_len = rte_cpu_to_be_16(pkt_data_len);
147 ip_hdr->proto = IPPROTO_UDP;
148 ip_hdr->hop_limits = IP_DEFTTL;
150 rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
151 rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
153 return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
157 initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
158 uint32_t dst_addr, uint16_t pkt_data_len)
161 unaligned_uint16_t *ptr16;
165 * Initialize IP header.
167 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
169 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
170 ip_hdr->type_of_service = 0;
171 ip_hdr->fragment_offset = 0;
172 ip_hdr->time_to_live = IP_DEFTTL;
173 ip_hdr->next_proto_id = IPPROTO_UDP;
174 ip_hdr->packet_id = 0;
175 ip_hdr->total_length = rte_cpu_to_be_16(pkt_len);
176 ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
177 ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
180 * Compute IP header checksum.
182 ptr16 = (unaligned_uint16_t *)ip_hdr;
184 ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
185 ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
186 ip_cksum += ptr16[4];
187 ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
188 ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
191 * Reduce 32 bit checksum to 16 bits and complement it.
193 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
194 (ip_cksum & 0x0000FFFF);
196 ip_cksum = (~ip_cksum) & 0x0000FFFF;
199 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
205 initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
206 uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto)
209 unaligned_uint16_t *ptr16;
213 * Initialize IP header.
215 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
217 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
218 ip_hdr->type_of_service = 0;
219 ip_hdr->fragment_offset = 0;
220 ip_hdr->time_to_live = IP_DEFTTL;
221 ip_hdr->next_proto_id = proto;
222 ip_hdr->packet_id = 0;
223 ip_hdr->total_length = rte_cpu_to_be_16(pkt_len);
224 ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
225 ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
228 * Compute IP header checksum.
230 ptr16 = (unaligned_uint16_t *)ip_hdr;
232 ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
233 ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
234 ip_cksum += ptr16[4];
235 ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
236 ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
239 * Reduce 32 bit checksum to 16 bits and complement it.
241 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
242 (ip_cksum & 0x0000FFFF);
244 ip_cksum = (~ip_cksum) & 0x0000FFFF;
247 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
253 * The maximum number of segments per packet is used when creating
254 * scattered transmit packets composed of a list of mbufs.
256 #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
260 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
261 struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
262 void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
263 int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
268 struct rte_mbuf *pkt_seg;
269 struct rte_mbuf *pkt;
271 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
272 pkt = rte_pktmbuf_alloc(mp);
280 pkt->data_len = pkt_len;
282 for (i = 1; i < nb_pkt_segs; i++) {
283 pkt_seg->next = rte_pktmbuf_alloc(mp);
284 if (pkt_seg->next == NULL) {
286 rte_pktmbuf_free(pkt);
289 pkt_seg = pkt_seg->next;
290 pkt_seg->data_len = pkt_len;
292 pkt_seg->next = NULL; /* Last segment of packet. */
295 * Copy headers in first packet segment(s).
298 eth_hdr_size = sizeof(struct rte_ether_hdr) +
299 sizeof(struct rte_vlan_hdr);
301 eth_hdr_size = sizeof(struct rte_ether_hdr);
303 copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
306 copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
308 copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
309 eth_hdr_size + sizeof(struct rte_ipv4_hdr));
311 copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
313 copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
314 eth_hdr_size + sizeof(struct rte_ipv6_hdr));
318 * Complete first mbuf of packet and append it to the
319 * burst of packets to be transmitted.
321 pkt->nb_segs = nb_pkt_segs;
322 pkt->pkt_len = pkt_len;
323 pkt->l2_len = eth_hdr_size;
326 pkt->vlan_tci = RTE_ETHER_TYPE_IPV4;
327 pkt->l3_len = sizeof(struct rte_ipv4_hdr);
329 pkt->vlan_tci = RTE_ETHER_TYPE_IPV6;
330 pkt->l3_len = sizeof(struct rte_ipv6_hdr);
333 pkts_burst[nb_pkt] = pkt;
340 generate_packet_burst_proto(struct rte_mempool *mp,
341 struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
342 uint8_t vlan_enabled, void *ip_hdr,
343 uint8_t ipv4, uint8_t proto, void *proto_hdr,
344 int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
349 struct rte_mbuf *pkt_seg;
350 struct rte_mbuf *pkt;
352 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
353 pkt = rte_pktmbuf_alloc(mp);
361 pkt->data_len = pkt_len;
363 for (i = 1; i < nb_pkt_segs; i++) {
364 pkt_seg->next = rte_pktmbuf_alloc(mp);
365 if (pkt_seg->next == NULL) {
367 rte_pktmbuf_free(pkt);
370 pkt_seg = pkt_seg->next;
371 pkt_seg->data_len = pkt_len;
373 pkt_seg->next = NULL; /* Last segment of packet. */
376 * Copy headers in first packet segment(s).
379 eth_hdr_size = sizeof(struct rte_ether_hdr) +
380 sizeof(struct rte_vlan_hdr);
382 eth_hdr_size = sizeof(struct rte_ether_hdr);
384 copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
387 copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
391 copy_buf_to_pkt(proto_hdr,
392 sizeof(struct rte_udp_hdr), pkt,
394 sizeof(struct rte_ipv4_hdr));
397 copy_buf_to_pkt(proto_hdr,
398 sizeof(struct rte_tcp_hdr), pkt,
400 sizeof(struct rte_ipv4_hdr));
403 copy_buf_to_pkt(proto_hdr,
404 sizeof(struct rte_sctp_hdr), pkt,
406 sizeof(struct rte_ipv4_hdr));
412 copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
416 copy_buf_to_pkt(proto_hdr,
417 sizeof(struct rte_udp_hdr), pkt,
419 sizeof(struct rte_ipv6_hdr));
422 copy_buf_to_pkt(proto_hdr,
423 sizeof(struct rte_tcp_hdr), pkt,
425 sizeof(struct rte_ipv6_hdr));
428 copy_buf_to_pkt(proto_hdr,
429 sizeof(struct rte_sctp_hdr), pkt,
431 sizeof(struct rte_ipv6_hdr));
439 * Complete first mbuf of packet and append it to the
440 * burst of packets to be transmitted.
442 pkt->nb_segs = nb_pkt_segs;
443 pkt->pkt_len = pkt_len;
444 pkt->l2_len = eth_hdr_size;
447 pkt->vlan_tci = RTE_ETHER_TYPE_IPV4;
448 pkt->l3_len = sizeof(struct rte_ipv4_hdr);
450 pkt->vlan_tci = RTE_ETHER_TYPE_IPV6;
451 pkt->l3_len = sizeof(struct rte_ipv6_hdr);
454 pkts_burst[nb_pkt] = pkt;