net/ice: fix perfect match for ACL rule
[dpdk.git] / app / test / packet_burst_generator.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <rte_byteorder.h>
6 #include <rte_mbuf.h>
7 #include <rte_ip.h>
8
9 #include "packet_burst_generator.h"
10
11 #define UDP_SRC_PORT 1024
12 #define UDP_DST_PORT 1024
13
14
15 #define IP_DEFTTL  64   /* from RFC 1340. */
16
17 static void
18 copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
19                 unsigned offset)
20 {
21         struct rte_mbuf *seg;
22         void *seg_buf;
23         unsigned copy_len;
24
25         seg = pkt;
26         while (offset >= seg->data_len) {
27                 offset -= seg->data_len;
28                 seg = seg->next;
29         }
30         copy_len = seg->data_len - offset;
31         seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
32         while (len > copy_len) {
33                 rte_memcpy(seg_buf, buf, (size_t) copy_len);
34                 len -= copy_len;
35                 buf = ((char *) buf + copy_len);
36                 seg = seg->next;
37                 seg_buf = rte_pktmbuf_mtod(seg, void *);
38         }
39         rte_memcpy(seg_buf, buf, (size_t) len);
40 }
41
42 static inline void
43 copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
44 {
45         if (offset + len <= pkt->data_len) {
46                 rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), buf,
47                            (size_t) len);
48                 return;
49         }
50         copy_buf_to_pkt_segs(buf, len, pkt, offset);
51 }
52
53 void
54 initialize_eth_header(struct rte_ether_hdr *eth_hdr,
55                 struct rte_ether_addr *src_mac,
56                 struct rte_ether_addr *dst_mac, uint16_t ether_type,
57                 uint8_t vlan_enabled, uint16_t van_id)
58 {
59         rte_ether_addr_copy(dst_mac, &eth_hdr->d_addr);
60         rte_ether_addr_copy(src_mac, &eth_hdr->s_addr);
61
62         if (vlan_enabled) {
63                 struct rte_vlan_hdr *vhdr = (struct rte_vlan_hdr *)(
64                         (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr));
65
66                 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
67
68                 vhdr->eth_proto =  rte_cpu_to_be_16(ether_type);
69                 vhdr->vlan_tci = van_id;
70         } else {
71                 eth_hdr->ether_type = rte_cpu_to_be_16(ether_type);
72         }
73 }
74
75 void
76 initialize_arp_header(struct rte_arp_hdr *arp_hdr,
77                 struct rte_ether_addr *src_mac,
78                 struct rte_ether_addr *dst_mac,
79                 uint32_t src_ip, uint32_t dst_ip,
80                 uint32_t opcode)
81 {
82         arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
83         arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
84         arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
85         arp_hdr->arp_plen = sizeof(uint32_t);
86         arp_hdr->arp_opcode = rte_cpu_to_be_16(opcode);
87         rte_ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);
88         arp_hdr->arp_data.arp_sip = src_ip;
89         rte_ether_addr_copy(dst_mac, &arp_hdr->arp_data.arp_tha);
90         arp_hdr->arp_data.arp_tip = dst_ip;
91 }
92
93 uint16_t
94 initialize_udp_header(struct rte_udp_hdr *udp_hdr, uint16_t src_port,
95                 uint16_t dst_port, uint16_t pkt_data_len)
96 {
97         uint16_t pkt_len;
98
99         pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
100
101         udp_hdr->src_port = rte_cpu_to_be_16(src_port);
102         udp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
103         udp_hdr->dgram_len = rte_cpu_to_be_16(pkt_len);
104         udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
105
106         return pkt_len;
107 }
108
109 uint16_t
110 initialize_tcp_header(struct rte_tcp_hdr *tcp_hdr, uint16_t src_port,
111                 uint16_t dst_port, uint16_t pkt_data_len)
112 {
113         uint16_t pkt_len;
114
115         pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_tcp_hdr));
116
117         memset(tcp_hdr, 0, sizeof(struct rte_tcp_hdr));
118         tcp_hdr->src_port = rte_cpu_to_be_16(src_port);
119         tcp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
120
121         return pkt_len;
122 }
123
124 uint16_t
125 initialize_sctp_header(struct rte_sctp_hdr *sctp_hdr, uint16_t src_port,
126                 uint16_t dst_port, uint16_t pkt_data_len)
127 {
128         uint16_t pkt_len;
129
130         pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
131
132         sctp_hdr->src_port = rte_cpu_to_be_16(src_port);
133         sctp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
134         sctp_hdr->tag = 0;
135         sctp_hdr->cksum = 0; /* No SCTP checksum. */
136
137         return pkt_len;
138 }
139
140 uint16_t
141 initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
142                 uint8_t *dst_addr, uint16_t pkt_data_len)
143 {
144         ip_hdr->vtc_flow = 0;
145         ip_hdr->payload_len = pkt_data_len;
146         ip_hdr->proto = IPPROTO_UDP;
147         ip_hdr->hop_limits = IP_DEFTTL;
148
149         rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
150         rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
151
152         return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
153 }
154
155 uint16_t
156 initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
157                 uint32_t dst_addr, uint16_t pkt_data_len)
158 {
159         uint16_t pkt_len;
160         unaligned_uint16_t *ptr16;
161         uint32_t ip_cksum;
162
163         /*
164          * Initialize IP header.
165          */
166         pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
167
168         ip_hdr->version_ihl   = RTE_IPV4_VHL_DEF;
169         ip_hdr->type_of_service   = 0;
170         ip_hdr->fragment_offset = 0;
171         ip_hdr->time_to_live   = IP_DEFTTL;
172         ip_hdr->next_proto_id = IPPROTO_UDP;
173         ip_hdr->packet_id = 0;
174         ip_hdr->total_length   = rte_cpu_to_be_16(pkt_len);
175         ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
176         ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
177
178         /*
179          * Compute IP header checksum.
180          */
181         ptr16 = (unaligned_uint16_t *)ip_hdr;
182         ip_cksum = 0;
183         ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
184         ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
185         ip_cksum += ptr16[4];
186         ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
187         ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
188
189         /*
190          * Reduce 32 bit checksum to 16 bits and complement it.
191          */
192         ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
193                 (ip_cksum & 0x0000FFFF);
194         ip_cksum %= 65536;
195         ip_cksum = (~ip_cksum) & 0x0000FFFF;
196         if (ip_cksum == 0)
197                 ip_cksum = 0xFFFF;
198         ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
199
200         return pkt_len;
201 }
202
203 uint16_t
204 initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
205                 uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto)
206 {
207         uint16_t pkt_len;
208         unaligned_uint16_t *ptr16;
209         uint32_t ip_cksum;
210
211         /*
212          * Initialize IP header.
213          */
214         pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
215
216         ip_hdr->version_ihl   = RTE_IPV4_VHL_DEF;
217         ip_hdr->type_of_service   = 0;
218         ip_hdr->fragment_offset = 0;
219         ip_hdr->time_to_live   = IP_DEFTTL;
220         ip_hdr->next_proto_id = proto;
221         ip_hdr->packet_id = 0;
222         ip_hdr->total_length   = rte_cpu_to_be_16(pkt_len);
223         ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
224         ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
225
226         /*
227          * Compute IP header checksum.
228          */
229         ptr16 = (unaligned_uint16_t *)ip_hdr;
230         ip_cksum = 0;
231         ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
232         ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
233         ip_cksum += ptr16[4];
234         ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
235         ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
236
237         /*
238          * Reduce 32 bit checksum to 16 bits and complement it.
239          */
240         ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
241                 (ip_cksum & 0x0000FFFF);
242         ip_cksum %= 65536;
243         ip_cksum = (~ip_cksum) & 0x0000FFFF;
244         if (ip_cksum == 0)
245                 ip_cksum = 0xFFFF;
246         ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
247
248         return pkt_len;
249 }
250
251 /*
252  * The maximum number of segments per packet is used when creating
253  * scattered transmit packets composed of a list of mbufs.
254  */
255 #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
256
257
258 int
259 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
260                 struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
261                 void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
262                 int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
263 {
264         int i, nb_pkt = 0;
265         size_t eth_hdr_size;
266
267         struct rte_mbuf *pkt_seg;
268         struct rte_mbuf *pkt;
269
270         for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
271                 pkt = rte_pktmbuf_alloc(mp);
272                 if (pkt == NULL) {
273 nomore_mbuf:
274                         if (nb_pkt == 0)
275                                 return -1;
276                         break;
277                 }
278
279                 pkt->data_len = pkt_len;
280                 pkt_seg = pkt;
281                 for (i = 1; i < nb_pkt_segs; i++) {
282                         pkt_seg->next = rte_pktmbuf_alloc(mp);
283                         if (pkt_seg->next == NULL) {
284                                 pkt->nb_segs = i;
285                                 rte_pktmbuf_free(pkt);
286                                 goto nomore_mbuf;
287                         }
288                         pkt_seg = pkt_seg->next;
289                         pkt_seg->data_len = pkt_len;
290                 }
291                 pkt_seg->next = NULL; /* Last segment of packet. */
292
293                 /*
294                  * Copy headers in first packet segment(s).
295                  */
296                 if (vlan_enabled)
297                         eth_hdr_size = sizeof(struct rte_ether_hdr) +
298                                 sizeof(struct rte_vlan_hdr);
299                 else
300                         eth_hdr_size = sizeof(struct rte_ether_hdr);
301
302                 copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
303
304                 if (ipv4) {
305                         copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
306                                 pkt, eth_hdr_size);
307                         copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
308                                 eth_hdr_size + sizeof(struct rte_ipv4_hdr));
309                 } else {
310                         copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
311                                 pkt, eth_hdr_size);
312                         copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
313                                 eth_hdr_size + sizeof(struct rte_ipv6_hdr));
314                 }
315
316                 /*
317                  * Complete first mbuf of packet and append it to the
318                  * burst of packets to be transmitted.
319                  */
320                 pkt->nb_segs = nb_pkt_segs;
321                 pkt->pkt_len = pkt_len;
322                 pkt->l2_len = eth_hdr_size;
323
324                 if (ipv4) {
325                         pkt->vlan_tci  = RTE_ETHER_TYPE_IPV4;
326                         pkt->l3_len = sizeof(struct rte_ipv4_hdr);
327                 } else {
328                         pkt->vlan_tci  = RTE_ETHER_TYPE_IPV6;
329                         pkt->l3_len = sizeof(struct rte_ipv6_hdr);
330                 }
331
332                 pkts_burst[nb_pkt] = pkt;
333         }
334
335         return nb_pkt;
336 }
337
338 int
339 generate_packet_burst_proto(struct rte_mempool *mp,
340                 struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
341                 uint8_t vlan_enabled, void *ip_hdr,
342                 uint8_t ipv4, uint8_t proto, void *proto_hdr,
343                 int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
344 {
345         int i, nb_pkt = 0;
346         size_t eth_hdr_size;
347
348         struct rte_mbuf *pkt_seg;
349         struct rte_mbuf *pkt;
350
351         for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
352                 pkt = rte_pktmbuf_alloc(mp);
353                 if (pkt == NULL) {
354 nomore_mbuf:
355                         if (nb_pkt == 0)
356                                 return -1;
357                         break;
358                 }
359
360                 pkt->data_len = pkt_len;
361                 pkt_seg = pkt;
362                 for (i = 1; i < nb_pkt_segs; i++) {
363                         pkt_seg->next = rte_pktmbuf_alloc(mp);
364                         if (pkt_seg->next == NULL) {
365                                 pkt->nb_segs = i;
366                                 rte_pktmbuf_free(pkt);
367                                 goto nomore_mbuf;
368                         }
369                         pkt_seg = pkt_seg->next;
370                         pkt_seg->data_len = pkt_len;
371                 }
372                 pkt_seg->next = NULL; /* Last segment of packet. */
373
374                 /*
375                  * Copy headers in first packet segment(s).
376                  */
377                 if (vlan_enabled)
378                         eth_hdr_size = sizeof(struct rte_ether_hdr) +
379                                 sizeof(struct rte_vlan_hdr);
380                 else
381                         eth_hdr_size = sizeof(struct rte_ether_hdr);
382
383                 copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
384
385                 if (ipv4) {
386                         copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
387                                         pkt, eth_hdr_size);
388                         switch (proto) {
389                         case IPPROTO_UDP:
390                                 copy_buf_to_pkt(proto_hdr,
391                                         sizeof(struct rte_udp_hdr), pkt,
392                                         eth_hdr_size +
393                                                 sizeof(struct rte_ipv4_hdr));
394                                 break;
395                         case IPPROTO_TCP:
396                                 copy_buf_to_pkt(proto_hdr,
397                                         sizeof(struct rte_tcp_hdr), pkt,
398                                         eth_hdr_size +
399                                                 sizeof(struct rte_ipv4_hdr));
400                                 break;
401                         case IPPROTO_SCTP:
402                                 copy_buf_to_pkt(proto_hdr,
403                                         sizeof(struct rte_sctp_hdr), pkt,
404                                         eth_hdr_size +
405                                                 sizeof(struct rte_ipv4_hdr));
406                                 break;
407                         default:
408                                 break;
409                         }
410                 } else {
411                         copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
412                                         pkt, eth_hdr_size);
413                         switch (proto) {
414                         case IPPROTO_UDP:
415                                 copy_buf_to_pkt(proto_hdr,
416                                         sizeof(struct rte_udp_hdr), pkt,
417                                         eth_hdr_size +
418                                                 sizeof(struct rte_ipv6_hdr));
419                                 break;
420                         case IPPROTO_TCP:
421                                 copy_buf_to_pkt(proto_hdr,
422                                         sizeof(struct rte_tcp_hdr), pkt,
423                                         eth_hdr_size +
424                                                 sizeof(struct rte_ipv6_hdr));
425                                 break;
426                         case IPPROTO_SCTP:
427                                 copy_buf_to_pkt(proto_hdr,
428                                         sizeof(struct rte_sctp_hdr), pkt,
429                                         eth_hdr_size +
430                                                 sizeof(struct rte_ipv6_hdr));
431                                 break;
432                         default:
433                                 break;
434                         }
435                 }
436
437                 /*
438                  * Complete first mbuf of packet and append it to the
439                  * burst of packets to be transmitted.
440                  */
441                 pkt->nb_segs = nb_pkt_segs;
442                 pkt->pkt_len = pkt_len;
443                 pkt->l2_len = eth_hdr_size;
444
445                 if (ipv4) {
446                         pkt->vlan_tci  = RTE_ETHER_TYPE_IPV4;
447                         pkt->l3_len = sizeof(struct rte_ipv4_hdr);
448                 } else {
449                         pkt->vlan_tci  = RTE_ETHER_TYPE_IPV6;
450                         pkt->l3_len = sizeof(struct rte_ipv6_hdr);
451                 }
452
453                 pkts_burst[nb_pkt] = pkt;
454         }
455
456         return nb_pkt;
457 }