4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
37 #include "packet_burst_generator.h"
39 #define UDP_SRC_PORT 1024
40 #define UDP_DST_PORT 1024
43 #define IP_DEFTTL 64 /* from RFC 1340. */
44 #define IP_VERSION 0x40
45 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
46 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
49 copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
57 while (offset >= seg->data_len) {
58 offset -= seg->data_len;
61 copy_len = seg->data_len - offset;
62 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
63 while (len > copy_len) {
64 rte_memcpy(seg_buf, buf, (size_t) copy_len);
66 buf = ((char *) buf + copy_len);
68 seg_buf = rte_pktmbuf_mtod(seg, void *);
70 rte_memcpy(seg_buf, buf, (size_t) len);
74 copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
76 if (offset + len <= pkt->data_len) {
77 rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), buf,
81 copy_buf_to_pkt_segs(buf, len, pkt, offset);
85 initialize_eth_header(struct ether_hdr *eth_hdr, struct ether_addr *src_mac,
86 struct ether_addr *dst_mac, uint16_t ether_type,
87 uint8_t vlan_enabled, uint16_t van_id)
89 ether_addr_copy(dst_mac, ð_hdr->d_addr);
90 ether_addr_copy(src_mac, ð_hdr->s_addr);
93 struct vlan_hdr *vhdr = (struct vlan_hdr *)((uint8_t *)eth_hdr +
94 sizeof(struct ether_hdr));
96 eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
98 vhdr->eth_proto = rte_cpu_to_be_16(ether_type);
99 vhdr->vlan_tci = van_id;
101 eth_hdr->ether_type = rte_cpu_to_be_16(ether_type);
106 initialize_arp_header(struct arp_hdr *arp_hdr, struct ether_addr *src_mac,
107 struct ether_addr *dst_mac, uint32_t src_ip, uint32_t dst_ip,
110 arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
111 arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
112 arp_hdr->arp_hln = ETHER_ADDR_LEN;
113 arp_hdr->arp_pln = sizeof(uint32_t);
114 arp_hdr->arp_op = rte_cpu_to_be_16(opcode);
115 ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);
116 arp_hdr->arp_data.arp_sip = src_ip;
117 ether_addr_copy(dst_mac, &arp_hdr->arp_data.arp_tha);
118 arp_hdr->arp_data.arp_tip = dst_ip;
122 initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
123 uint16_t dst_port, uint16_t pkt_data_len)
127 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
129 udp_hdr->src_port = rte_cpu_to_be_16(src_port);
130 udp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
131 udp_hdr->dgram_len = rte_cpu_to_be_16(pkt_len);
132 udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
138 initialize_tcp_header(struct tcp_hdr *tcp_hdr, uint16_t src_port,
139 uint16_t dst_port, uint16_t pkt_data_len)
143 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct tcp_hdr));
145 memset(tcp_hdr, 0, sizeof(struct tcp_hdr));
146 tcp_hdr->src_port = rte_cpu_to_be_16(src_port);
147 tcp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
153 initialize_sctp_header(struct sctp_hdr *sctp_hdr, uint16_t src_port,
154 uint16_t dst_port, uint16_t pkt_data_len)
158 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
160 sctp_hdr->src_port = rte_cpu_to_be_16(src_port);
161 sctp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
163 sctp_hdr->cksum = 0; /* No SCTP checksum. */
169 initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
170 uint8_t *dst_addr, uint16_t pkt_data_len)
172 ip_hdr->vtc_flow = 0;
173 ip_hdr->payload_len = pkt_data_len;
174 ip_hdr->proto = IPPROTO_UDP;
175 ip_hdr->hop_limits = IP_DEFTTL;
177 rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
178 rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
180 return (uint16_t) (pkt_data_len + sizeof(struct ipv6_hdr));
184 initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
185 uint32_t dst_addr, uint16_t pkt_data_len)
188 unaligned_uint16_t *ptr16;
192 * Initialize IP header.
194 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
196 ip_hdr->version_ihl = IP_VHL_DEF;
197 ip_hdr->type_of_service = 0;
198 ip_hdr->fragment_offset = 0;
199 ip_hdr->time_to_live = IP_DEFTTL;
200 ip_hdr->next_proto_id = IPPROTO_UDP;
201 ip_hdr->packet_id = 0;
202 ip_hdr->total_length = rte_cpu_to_be_16(pkt_len);
203 ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
204 ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
207 * Compute IP header checksum.
209 ptr16 = (unaligned_uint16_t *)ip_hdr;
211 ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
212 ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
213 ip_cksum += ptr16[4];
214 ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
215 ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
218 * Reduce 32 bit checksum to 16 bits and complement it.
220 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
221 (ip_cksum & 0x0000FFFF);
223 ip_cksum = (~ip_cksum) & 0x0000FFFF;
226 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
232 initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
233 uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto)
236 unaligned_uint16_t *ptr16;
240 * Initialize IP header.
242 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
244 ip_hdr->version_ihl = IP_VHL_DEF;
245 ip_hdr->type_of_service = 0;
246 ip_hdr->fragment_offset = 0;
247 ip_hdr->time_to_live = IP_DEFTTL;
248 ip_hdr->next_proto_id = proto;
249 ip_hdr->packet_id = 0;
250 ip_hdr->total_length = rte_cpu_to_be_16(pkt_len);
251 ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
252 ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
255 * Compute IP header checksum.
257 ptr16 = (unaligned_uint16_t *)ip_hdr;
259 ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
260 ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
261 ip_cksum += ptr16[4];
262 ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
263 ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
266 * Reduce 32 bit checksum to 16 bits and complement it.
268 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
269 (ip_cksum & 0x0000FFFF);
271 ip_cksum = (~ip_cksum) & 0x0000FFFF;
274 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
280 * The maximum number of segments per packet is used when creating
281 * scattered transmit packets composed of a list of mbufs.
283 #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
287 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
288 struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
289 uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst,
290 uint8_t pkt_len, uint8_t nb_pkt_segs)
295 struct rte_mbuf *pkt_seg;
296 struct rte_mbuf *pkt;
298 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
299 pkt = rte_pktmbuf_alloc(mp);
307 pkt->data_len = pkt_len;
309 for (i = 1; i < nb_pkt_segs; i++) {
310 pkt_seg->next = rte_pktmbuf_alloc(mp);
311 if (pkt_seg->next == NULL) {
313 rte_pktmbuf_free(pkt);
316 pkt_seg = pkt_seg->next;
317 pkt_seg->data_len = pkt_len;
319 pkt_seg->next = NULL; /* Last segment of packet. */
322 * Copy headers in first packet segment(s).
325 eth_hdr_size = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
327 eth_hdr_size = sizeof(struct ether_hdr);
329 copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
332 copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt, eth_hdr_size);
333 copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
334 sizeof(struct ipv4_hdr));
336 copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt, eth_hdr_size);
337 copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
338 sizeof(struct ipv6_hdr));
342 * Complete first mbuf of packet and append it to the
343 * burst of packets to be transmitted.
345 pkt->nb_segs = nb_pkt_segs;
346 pkt->pkt_len = pkt_len;
347 pkt->l2_len = eth_hdr_size;
350 pkt->vlan_tci = ETHER_TYPE_IPv4;
351 pkt->l3_len = sizeof(struct ipv4_hdr);
353 pkt->vlan_tci = ETHER_TYPE_IPv6;
354 pkt->l3_len = sizeof(struct ipv6_hdr);
357 pkts_burst[nb_pkt] = pkt;
364 generate_packet_burst_proto(struct rte_mempool *mp,
365 struct rte_mbuf **pkts_burst,
366 struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
367 uint8_t ipv4, uint8_t proto, void *proto_hdr,
368 int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
373 struct rte_mbuf *pkt_seg;
374 struct rte_mbuf *pkt;
376 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
377 pkt = rte_pktmbuf_alloc(mp);
385 pkt->data_len = pkt_len;
387 for (i = 1; i < nb_pkt_segs; i++) {
388 pkt_seg->next = rte_pktmbuf_alloc(mp);
389 if (pkt_seg->next == NULL) {
391 rte_pktmbuf_free(pkt);
394 pkt_seg = pkt_seg->next;
395 pkt_seg->data_len = pkt_len;
397 pkt_seg->next = NULL; /* Last segment of packet. */
400 * Copy headers in first packet segment(s).
403 eth_hdr_size = sizeof(struct ether_hdr) +
404 sizeof(struct vlan_hdr);
406 eth_hdr_size = sizeof(struct ether_hdr);
408 copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
411 copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt,
415 copy_buf_to_pkt(proto_hdr,
416 sizeof(struct udp_hdr), pkt,
417 eth_hdr_size + sizeof(struct ipv4_hdr));
420 copy_buf_to_pkt(proto_hdr,
421 sizeof(struct tcp_hdr), pkt,
422 eth_hdr_size + sizeof(struct ipv4_hdr));
425 copy_buf_to_pkt(proto_hdr,
426 sizeof(struct sctp_hdr), pkt,
427 eth_hdr_size + sizeof(struct ipv4_hdr));
433 copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt,
437 copy_buf_to_pkt(proto_hdr,
438 sizeof(struct udp_hdr), pkt,
439 eth_hdr_size + sizeof(struct ipv6_hdr));
442 copy_buf_to_pkt(proto_hdr,
443 sizeof(struct tcp_hdr), pkt,
444 eth_hdr_size + sizeof(struct ipv6_hdr));
447 copy_buf_to_pkt(proto_hdr,
448 sizeof(struct sctp_hdr), pkt,
449 eth_hdr_size + sizeof(struct ipv6_hdr));
457 * Complete first mbuf of packet and append it to the
458 * burst of packets to be transmitted.
460 pkt->nb_segs = nb_pkt_segs;
461 pkt->pkt_len = pkt_len;
462 pkt->l2_len = eth_hdr_size;
465 pkt->vlan_tci = ETHER_TYPE_IPv4;
466 pkt->l3_len = sizeof(struct ipv4_hdr);
468 pkt->vlan_tci = ETHER_TYPE_IPv6;
469 pkt->l3_len = sizeof(struct ipv6_hdr);
472 pkts_burst[nb_pkt] = pkt;