common/mlx5: add Windows meson file
[dpdk.git] / app / test-flow-perf / actions_gen.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * The file contains the implementations of actions generators.
5  * Each generator is responsible for preparing it's action instance
6  * and initializing it with needed data.
7  */
8
9 #include <sys/types.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
14 #include <rte_gtp.h>
15 #include <rte_gre.h>
16 #include <rte_geneve.h>
17
18 #include "actions_gen.h"
19 #include "flow_gen.h"
20 #include "config.h"
21
22
23 /* Storage for additional parameters for actions */
24 struct additional_para {
25         uint16_t queue;
26         uint16_t next_table;
27         uint16_t *queues;
28         uint16_t queues_number;
29         uint32_t counter;
30         uint64_t encap_data;
31         uint64_t decap_data;
32         uint8_t core_idx;
33 };
34
35 /* Storage for struct rte_flow_action_raw_encap including external data. */
36 struct action_raw_encap_data {
37         struct rte_flow_action_raw_encap conf;
38         uint8_t data[128];
39         uint8_t preserve[128];
40         uint16_t idx;
41 };
42
43 /* Storage for struct rte_flow_action_raw_decap including external data. */
44 struct action_raw_decap_data {
45         struct rte_flow_action_raw_decap conf;
46         uint8_t data[128];
47         uint16_t idx;
48 };
49
50 /* Storage for struct rte_flow_action_rss including external data. */
51 struct action_rss_data {
52         struct rte_flow_action_rss conf;
53         uint8_t key[40];
54         uint16_t queue[128];
55 };
56
57 static void
58 add_mark(struct rte_flow_action *actions,
59         uint8_t actions_counter,
60         struct additional_para para)
61 {
62         static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
63         uint32_t counter = para.counter;
64
65         do {
66                 /* Random values from 1 to 256 */
67                 mark_actions[para.core_idx].id = (counter % 255) + 1;
68         } while (0);
69
70         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
71         actions[actions_counter].conf = &mark_actions[para.core_idx];
72 }
73
74 static void
75 add_queue(struct rte_flow_action *actions,
76         uint8_t actions_counter,
77         struct additional_para para)
78 {
79         static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
80
81         do {
82                 queue_actions[para.core_idx].index = para.queue;
83         } while (0);
84
85         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
86         actions[actions_counter].conf = &queue_actions[para.core_idx];
87 }
88
89 static void
90 add_jump(struct rte_flow_action *actions,
91         uint8_t actions_counter,
92         struct additional_para para)
93 {
94         static struct rte_flow_action_jump jump_action;
95
96         do {
97                 jump_action.group = para.next_table;
98         } while (0);
99
100         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
101         actions[actions_counter].conf = &jump_action;
102 }
103
104 static void
105 add_rss(struct rte_flow_action *actions,
106         uint8_t actions_counter,
107         struct additional_para para)
108 {
109         static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
110
111         uint16_t queue;
112
113         if (rss_data[para.core_idx] == NULL)
114                 rss_data[para.core_idx] = rte_malloc("rss_data",
115                         sizeof(struct action_rss_data), 0);
116
117         if (rss_data[para.core_idx] == NULL)
118                 rte_exit(EXIT_FAILURE, "No Memory available!");
119
120         *rss_data[para.core_idx] = (struct action_rss_data){
121                 .conf = (struct rte_flow_action_rss){
122                         .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
123                         .level = 0,
124                         .types = GET_RSS_HF(),
125                         .key_len = sizeof(rss_data[para.core_idx]->key),
126                         .queue_num = para.queues_number,
127                         .key = rss_data[para.core_idx]->key,
128                         .queue = rss_data[para.core_idx]->queue,
129                 },
130                 .key = { 1 },
131                 .queue = { 0 },
132         };
133
134         for (queue = 0; queue < para.queues_number; queue++)
135                 rss_data[para.core_idx]->queue[queue] = para.queues[queue];
136
137         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
138         actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
139 }
140
141 static void
142 add_set_meta(struct rte_flow_action *actions,
143         uint8_t actions_counter,
144         __rte_unused struct additional_para para)
145 {
146         static struct rte_flow_action_set_meta meta_action = {
147                 .data = RTE_BE32(META_DATA),
148                 .mask = RTE_BE32(0xffffffff),
149         };
150
151         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
152         actions[actions_counter].conf = &meta_action;
153 }
154
155 static void
156 add_set_tag(struct rte_flow_action *actions,
157         uint8_t actions_counter,
158         __rte_unused struct additional_para para)
159 {
160         static struct rte_flow_action_set_tag tag_action = {
161                 .data = RTE_BE32(META_DATA),
162                 .mask = RTE_BE32(0xffffffff),
163                 .index = TAG_INDEX,
164         };
165
166         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
167         actions[actions_counter].conf = &tag_action;
168 }
169
170 static void
171 add_port_id(struct rte_flow_action *actions,
172         uint8_t actions_counter,
173         __rte_unused struct additional_para para)
174 {
175         static struct rte_flow_action_port_id port_id = {
176                 .id = PORT_ID_DST,
177         };
178
179         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
180         actions[actions_counter].conf = &port_id;
181 }
182
183 static void
184 add_drop(struct rte_flow_action *actions,
185         uint8_t actions_counter,
186         __rte_unused struct additional_para para)
187 {
188         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
189 }
190
191 static void
192 add_count(struct rte_flow_action *actions,
193         uint8_t actions_counter,
194         __rte_unused struct additional_para para)
195 {
196         static struct rte_flow_action_count count_action;
197
198         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
199         actions[actions_counter].conf = &count_action;
200 }
201
202 static void
203 add_set_src_mac(struct rte_flow_action *actions,
204         uint8_t actions_counter,
205         __rte_unused struct additional_para para)
206 {
207         static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
208         uint32_t mac = para.counter;
209         uint16_t i;
210
211         /* Fixed value */
212         if (FIXED_VALUES)
213                 mac = 1;
214
215         /* Mac address to be set is random each time */
216         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
217                 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
218                 mac = mac >> 8;
219         }
220
221         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
222         actions[actions_counter].conf = &set_macs[para.core_idx];
223 }
224
225 static void
226 add_set_dst_mac(struct rte_flow_action *actions,
227         uint8_t actions_counter,
228         __rte_unused struct additional_para para)
229 {
230         static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
231         uint32_t mac = para.counter;
232         uint16_t i;
233
234         /* Fixed value */
235         if (FIXED_VALUES)
236                 mac = 1;
237
238         /* Mac address to be set is random each time */
239         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
240                 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
241                 mac = mac >> 8;
242         }
243
244         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
245         actions[actions_counter].conf = &set_macs[para.core_idx];
246 }
247
248 static void
249 add_set_src_ipv4(struct rte_flow_action *actions,
250         uint8_t actions_counter,
251         __rte_unused struct additional_para para)
252 {
253         static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
254         uint32_t ip = para.counter;
255
256         /* Fixed value */
257         if (FIXED_VALUES)
258                 ip = 1;
259
260         /* IPv4 value to be set is random each time */
261         set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
262
263         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
264         actions[actions_counter].conf = &set_ipv4[para.core_idx];
265 }
266
267 static void
268 add_set_dst_ipv4(struct rte_flow_action *actions,
269         uint8_t actions_counter,
270         __rte_unused struct additional_para para)
271 {
272         static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
273         uint32_t ip = para.counter;
274
275         /* Fixed value */
276         if (FIXED_VALUES)
277                 ip = 1;
278
279         /* IPv4 value to be set is random each time */
280         set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
281
282         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
283         actions[actions_counter].conf = &set_ipv4[para.core_idx];
284 }
285
286 static void
287 add_set_src_ipv6(struct rte_flow_action *actions,
288         uint8_t actions_counter,
289         __rte_unused struct additional_para para)
290 {
291         static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
292         uint32_t ipv6 = para.counter;
293         uint8_t i;
294
295         /* Fixed value */
296         if (FIXED_VALUES)
297                 ipv6 = 1;
298
299         /* IPv6 value to set is random each time */
300         for (i = 0; i < 16; i++) {
301                 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
302                 ipv6 = ipv6 >> 8;
303         }
304
305         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
306         actions[actions_counter].conf = &set_ipv6[para.core_idx];
307 }
308
309 static void
310 add_set_dst_ipv6(struct rte_flow_action *actions,
311         uint8_t actions_counter,
312         __rte_unused struct additional_para para)
313 {
314         static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
315         uint32_t ipv6 = para.counter;
316         uint8_t i;
317
318         /* Fixed value */
319         if (FIXED_VALUES)
320                 ipv6 = 1;
321
322         /* IPv6 value to set is random each time */
323         for (i = 0; i < 16; i++) {
324                 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
325                 ipv6 = ipv6 >> 8;
326         }
327
328         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
329         actions[actions_counter].conf = &set_ipv6[para.core_idx];
330 }
331
332 static void
333 add_set_src_tp(struct rte_flow_action *actions,
334         uint8_t actions_counter,
335         __rte_unused struct additional_para para)
336 {
337         static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
338         uint32_t tp = para.counter;
339
340         /* Fixed value */
341         if (FIXED_VALUES)
342                 tp = 100;
343
344         /* TP src port is random each time */
345         tp = tp % 0xffff;
346
347         set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
348
349         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
350         actions[actions_counter].conf = &set_tp[para.core_idx];
351 }
352
353 static void
354 add_set_dst_tp(struct rte_flow_action *actions,
355         uint8_t actions_counter,
356         __rte_unused struct additional_para para)
357 {
358         static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
359         uint32_t tp = para.counter;
360
361         /* Fixed value */
362         if (FIXED_VALUES)
363                 tp = 100;
364
365         /* TP src port is random each time */
366         if (tp > 0xffff)
367                 tp = tp >> 16;
368
369         set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
370
371         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
372         actions[actions_counter].conf = &set_tp[para.core_idx];
373 }
374
375 static void
376 add_inc_tcp_ack(struct rte_flow_action *actions,
377         uint8_t actions_counter,
378         __rte_unused struct additional_para para)
379 {
380         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
381         uint32_t ack_value = para.counter;
382
383         /* Fixed value */
384         if (FIXED_VALUES)
385                 ack_value = 1;
386
387         value[para.core_idx] = RTE_BE32(ack_value);
388
389         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
390         actions[actions_counter].conf = &value[para.core_idx];
391 }
392
393 static void
394 add_dec_tcp_ack(struct rte_flow_action *actions,
395         uint8_t actions_counter,
396         __rte_unused struct additional_para para)
397 {
398         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
399         uint32_t ack_value = para.counter;
400
401         /* Fixed value */
402         if (FIXED_VALUES)
403                 ack_value = 1;
404
405         value[para.core_idx] = RTE_BE32(ack_value);
406
407         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
408         actions[actions_counter].conf = &value[para.core_idx];
409 }
410
411 static void
412 add_inc_tcp_seq(struct rte_flow_action *actions,
413         uint8_t actions_counter,
414         __rte_unused struct additional_para para)
415 {
416         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
417         uint32_t seq_value = para.counter;
418
419         /* Fixed value */
420         if (FIXED_VALUES)
421                 seq_value = 1;
422
423         value[para.core_idx] = RTE_BE32(seq_value);
424
425         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
426         actions[actions_counter].conf = &value[para.core_idx];
427 }
428
429 static void
430 add_dec_tcp_seq(struct rte_flow_action *actions,
431         uint8_t actions_counter,
432         __rte_unused struct additional_para para)
433 {
434         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
435         uint32_t seq_value = para.counter;
436
437         /* Fixed value */
438         if (FIXED_VALUES)
439                 seq_value = 1;
440
441         value[para.core_idx] = RTE_BE32(seq_value);
442
443         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
444         actions[actions_counter].conf = &value[para.core_idx];
445 }
446
447 static void
448 add_set_ttl(struct rte_flow_action *actions,
449         uint8_t actions_counter,
450         __rte_unused struct additional_para para)
451 {
452         static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
453         uint32_t ttl_value = para.counter;
454
455         /* Fixed value */
456         if (FIXED_VALUES)
457                 ttl_value = 1;
458
459         /* Set ttl to random value each time */
460         ttl_value = ttl_value % 0xff;
461
462         set_ttl[para.core_idx].ttl_value = ttl_value;
463
464         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
465         actions[actions_counter].conf = &set_ttl[para.core_idx];
466 }
467
468 static void
469 add_dec_ttl(struct rte_flow_action *actions,
470         uint8_t actions_counter,
471         __rte_unused struct additional_para para)
472 {
473         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
474 }
475
476 static void
477 add_set_ipv4_dscp(struct rte_flow_action *actions,
478         uint8_t actions_counter,
479         __rte_unused struct additional_para para)
480 {
481         static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
482         uint32_t dscp_value = para.counter;
483
484         /* Fixed value */
485         if (FIXED_VALUES)
486                 dscp_value = 1;
487
488         /* Set dscp to random value each time */
489         dscp_value = dscp_value % 0xff;
490
491         set_dscp[para.core_idx].dscp = dscp_value;
492
493         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
494         actions[actions_counter].conf = &set_dscp[para.core_idx];
495 }
496
497 static void
498 add_set_ipv6_dscp(struct rte_flow_action *actions,
499         uint8_t actions_counter,
500         __rte_unused struct additional_para para)
501 {
502         static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
503         uint32_t dscp_value = para.counter;
504
505         /* Fixed value */
506         if (FIXED_VALUES)
507                 dscp_value = 1;
508
509         /* Set dscp to random value each time */
510         dscp_value = dscp_value % 0xff;
511
512         set_dscp[para.core_idx].dscp = dscp_value;
513
514         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
515         actions[actions_counter].conf = &set_dscp[para.core_idx];
516 }
517
518 static void
519 add_flag(struct rte_flow_action *actions,
520         uint8_t actions_counter,
521         __rte_unused struct additional_para para)
522 {
523         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
524 }
525
526 static void
527 add_ether_header(uint8_t **header, uint64_t data,
528         __rte_unused struct additional_para para)
529 {
530         struct rte_ether_hdr eth_hdr;
531
532         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
533                 return;
534
535         memset(&eth_hdr, 0, sizeof(struct rte_ether_hdr));
536         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
537                 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
538         else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
539                 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
540         else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
541                 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
542         memcpy(*header, &eth_hdr, sizeof(eth_hdr));
543         *header += sizeof(eth_hdr);
544 }
545
546 static void
547 add_vlan_header(uint8_t **header, uint64_t data,
548         __rte_unused struct additional_para para)
549 {
550         struct rte_vlan_hdr vlan_hdr;
551         uint16_t vlan_value;
552
553         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
554                 return;
555
556         vlan_value = VLAN_VALUE;
557
558         memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
559         vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
560
561         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
562                 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
563         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
564                 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
565         memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
566         *header += sizeof(vlan_hdr);
567 }
568
569 static void
570 add_ipv4_header(uint8_t **header, uint64_t data,
571         struct additional_para para)
572 {
573         struct rte_ipv4_hdr ipv4_hdr;
574         uint32_t ip_dst = para.counter;
575
576         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
577                 return;
578
579         /* Fixed value */
580         if (FIXED_VALUES)
581                 ip_dst = 1;
582
583         memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
584         ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
585         ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
586         ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
587         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
588                 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
589         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
590                 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
591         memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
592         *header += sizeof(ipv4_hdr);
593 }
594
595 static void
596 add_ipv6_header(uint8_t **header, uint64_t data,
597         __rte_unused struct additional_para para)
598 {
599         struct rte_ipv6_hdr ipv6_hdr;
600
601         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
602                 return;
603
604         memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
605         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
606                 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
607         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
608                 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
609         memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
610         *header += sizeof(ipv6_hdr);
611 }
612
613 static void
614 add_udp_header(uint8_t **header, uint64_t data,
615         __rte_unused struct additional_para para)
616 {
617         struct rte_udp_hdr udp_hdr;
618
619         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
620                 return;
621
622         memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
623         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
624                 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
625         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
626                 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
627         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
628                 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
629         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
630                 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
631          memcpy(*header, &udp_hdr, sizeof(udp_hdr));
632          *header += sizeof(udp_hdr);
633 }
634
635 static void
636 add_vxlan_header(uint8_t **header, uint64_t data,
637         struct additional_para para)
638 {
639         struct rte_vxlan_hdr vxlan_hdr;
640         uint32_t vni_value = para.counter;
641
642         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
643                 return;
644
645         /* Fixed value */
646         if (FIXED_VALUES)
647                 vni_value = 1;
648
649         memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
650
651         vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
652         vxlan_hdr.vx_flags = 0x8;
653
654         memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
655         *header += sizeof(vxlan_hdr);
656 }
657
658 static void
659 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
660         struct additional_para para)
661 {
662         struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
663         uint32_t vni_value = para.counter;
664
665         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
666                 return;
667
668         /* Fixed value */
669         if (FIXED_VALUES)
670                 vni_value = 1;
671
672         memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
673
674         vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
675         vxlan_gpe_hdr.vx_flags = 0x0c;
676
677         memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
678         *header += sizeof(vxlan_gpe_hdr);
679 }
680
681 static void
682 add_gre_header(uint8_t **header, uint64_t data,
683         __rte_unused struct additional_para para)
684 {
685         struct rte_gre_hdr gre_hdr;
686
687         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
688                 return;
689
690         memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
691
692         gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
693
694         memcpy(*header, &gre_hdr, sizeof(gre_hdr));
695         *header += sizeof(gre_hdr);
696 }
697
698 static void
699 add_geneve_header(uint8_t **header, uint64_t data,
700         struct additional_para para)
701 {
702         struct rte_geneve_hdr geneve_hdr;
703         uint32_t vni_value = para.counter;
704         uint8_t i;
705
706         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
707                 return;
708
709         /* Fixed value */
710         if (FIXED_VALUES)
711                 vni_value = 1;
712
713         memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
714
715         for (i = 0; i < 3; i++)
716                 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
717
718         memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
719         *header += sizeof(geneve_hdr);
720 }
721
722 static void
723 add_gtp_header(uint8_t **header, uint64_t data,
724         struct additional_para para)
725 {
726         struct rte_gtp_hdr gtp_hdr;
727         uint32_t teid_value = para.counter;
728
729         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
730                 return;
731
732         /* Fixed value */
733         if (FIXED_VALUES)
734                 teid_value = 1;
735
736         memset(&gtp_hdr, 0, sizeof(struct rte_flow_item_gtp));
737
738         gtp_hdr.teid = RTE_BE32(teid_value);
739         gtp_hdr.msg_type = 255;
740
741         memcpy(*header, &gtp_hdr, sizeof(gtp_hdr));
742         *header += sizeof(gtp_hdr);
743 }
744
745 static const struct encap_decap_headers {
746         void (*funct)(
747                 uint8_t **header,
748                 uint64_t data,
749                 struct additional_para para
750                 );
751 } headers[] = {
752         {.funct = add_ether_header},
753         {.funct = add_vlan_header},
754         {.funct = add_ipv4_header},
755         {.funct = add_ipv6_header},
756         {.funct = add_udp_header},
757         {.funct = add_vxlan_header},
758         {.funct = add_vxlan_gpe_header},
759         {.funct = add_gre_header},
760         {.funct = add_geneve_header},
761         {.funct = add_gtp_header},
762 };
763
764 static void
765 add_raw_encap(struct rte_flow_action *actions,
766         uint8_t actions_counter,
767         struct additional_para para)
768 {
769         static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
770         uint64_t encap_data = para.encap_data;
771         uint8_t *header;
772         uint8_t i;
773
774         /* Avoid double allocation. */
775         if (action_encap_data[para.core_idx] == NULL)
776                 action_encap_data[para.core_idx] = rte_malloc("encap_data",
777                         sizeof(struct action_raw_encap_data), 0);
778
779         /* Check if allocation failed. */
780         if (action_encap_data[para.core_idx] == NULL)
781                 rte_exit(EXIT_FAILURE, "No Memory available!");
782
783         *action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
784                 .conf = (struct rte_flow_action_raw_encap) {
785                         .data = action_encap_data[para.core_idx]->data,
786                 },
787                         .data = {},
788         };
789         header = action_encap_data[para.core_idx]->data;
790
791         for (i = 0; i < RTE_DIM(headers); i++)
792                 headers[i].funct(&header, encap_data, para);
793
794         action_encap_data[para.core_idx]->conf.size = header -
795                 action_encap_data[para.core_idx]->data;
796
797         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
798         actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
799 }
800
801 static void
802 add_raw_decap(struct rte_flow_action *actions,
803         uint8_t actions_counter,
804         struct additional_para para)
805 {
806         static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
807         uint64_t decap_data = para.decap_data;
808         uint8_t *header;
809         uint8_t i;
810
811         /* Avoid double allocation. */
812         if (action_decap_data[para.core_idx] == NULL)
813                 action_decap_data[para.core_idx] = rte_malloc("decap_data",
814                         sizeof(struct action_raw_decap_data), 0);
815
816         /* Check if allocation failed. */
817         if (action_decap_data[para.core_idx] == NULL)
818                 rte_exit(EXIT_FAILURE, "No Memory available!");
819
820         *action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
821                 .conf = (struct rte_flow_action_raw_decap) {
822                         .data = action_decap_data[para.core_idx]->data,
823                 },
824                         .data = {},
825         };
826         header = action_decap_data[para.core_idx]->data;
827
828         for (i = 0; i < RTE_DIM(headers); i++)
829                 headers[i].funct(&header, decap_data, para);
830
831         action_decap_data[para.core_idx]->conf.size = header -
832                 action_decap_data[para.core_idx]->data;
833
834         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
835         actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
836 }
837
838 static void
839 add_vxlan_encap(struct rte_flow_action *actions,
840         uint8_t actions_counter,
841         __rte_unused struct additional_para para)
842 {
843         static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
844         static struct rte_flow_item items[5];
845         static struct rte_flow_item_eth item_eth;
846         static struct rte_flow_item_ipv4 item_ipv4;
847         static struct rte_flow_item_udp item_udp;
848         static struct rte_flow_item_vxlan item_vxlan;
849         uint32_t ip_dst = para.counter;
850
851         /* Fixed value */
852         if (FIXED_VALUES)
853                 ip_dst = 1;
854
855         items[0].spec = &item_eth;
856         items[0].mask = &item_eth;
857         items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
858
859         item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
860         item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
861         item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
862         items[1].spec = &item_ipv4;
863         items[1].mask = &item_ipv4;
864         items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
865
866
867         item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
868         items[2].spec = &item_udp;
869         items[2].mask = &item_udp;
870         items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
871
872
873         item_vxlan.vni[2] = 1;
874         items[3].spec = &item_vxlan;
875         items[3].mask = &item_vxlan;
876         items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
877
878         items[4].type = RTE_FLOW_ITEM_TYPE_END;
879
880         vxlan_encap[para.core_idx].definition = items;
881
882         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
883         actions[actions_counter].conf = &vxlan_encap[para.core_idx];
884 }
885
886 static void
887 add_vxlan_decap(struct rte_flow_action *actions,
888         uint8_t actions_counter,
889         __rte_unused struct additional_para para)
890 {
891         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
892 }
893
894 void
895 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
896         uint32_t counter, uint16_t next_table, uint16_t hairpinq,
897         uint64_t encap_data, uint64_t decap_data, uint8_t core_idx)
898 {
899         struct additional_para additional_para_data;
900         uint8_t actions_counter = 0;
901         uint16_t hairpin_queues[hairpinq];
902         uint16_t queues[RXQ_NUM];
903         uint16_t i, j;
904
905         for (i = 0; i < RXQ_NUM; i++)
906                 queues[i] = i;
907
908         for (i = 0; i < hairpinq; i++)
909                 hairpin_queues[i] = i + RXQ_NUM;
910
911         additional_para_data = (struct additional_para){
912                 .queue = counter % RXQ_NUM,
913                 .next_table = next_table,
914                 .queues = queues,
915                 .queues_number = RXQ_NUM,
916                 .counter = counter,
917                 .encap_data = encap_data,
918                 .decap_data = decap_data,
919                 .core_idx = core_idx,
920         };
921
922         if (hairpinq != 0) {
923                 additional_para_data.queues = hairpin_queues;
924                 additional_para_data.queues_number = hairpinq;
925                 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
926         }
927
928         static const struct actions_dict {
929                 uint64_t mask;
930                 void (*funct)(
931                         struct rte_flow_action *actions,
932                         uint8_t actions_counter,
933                         struct additional_para para
934                         );
935         } actions_list[] = {
936                 {
937                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
938                         .funct = add_mark,
939                 },
940                 {
941                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
942                         .funct = add_count,
943                 },
944                 {
945                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
946                         .funct = add_set_meta,
947                 },
948                 {
949                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
950                         .funct = add_set_tag,
951                 },
952                 {
953                         .mask = FLOW_ACTION_MASK(
954                                 RTE_FLOW_ACTION_TYPE_FLAG
955                         ),
956                         .funct = add_flag,
957                 },
958                 {
959                         .mask = FLOW_ACTION_MASK(
960                                 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
961                         ),
962                         .funct = add_set_src_mac,
963                 },
964                 {
965                         .mask = FLOW_ACTION_MASK(
966                                 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
967                         ),
968                         .funct = add_set_dst_mac,
969                 },
970                 {
971                         .mask = FLOW_ACTION_MASK(
972                                 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
973                         ),
974                         .funct = add_set_src_ipv4,
975                 },
976                 {
977                         .mask = FLOW_ACTION_MASK(
978                                 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
979                         ),
980                         .funct = add_set_dst_ipv4,
981                 },
982                 {
983                         .mask = FLOW_ACTION_MASK(
984                                 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
985                         ),
986                         .funct = add_set_src_ipv6,
987                 },
988                 {
989                         .mask = FLOW_ACTION_MASK(
990                                 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
991                         ),
992                         .funct = add_set_dst_ipv6,
993                 },
994                 {
995                         .mask = FLOW_ACTION_MASK(
996                                 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
997                         ),
998                         .funct = add_set_src_tp,
999                 },
1000                 {
1001                         .mask = FLOW_ACTION_MASK(
1002                                 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1003                         ),
1004                         .funct = add_set_dst_tp,
1005                 },
1006                 {
1007                         .mask = FLOW_ACTION_MASK(
1008                                 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1009                         ),
1010                         .funct = add_inc_tcp_ack,
1011                 },
1012                 {
1013                         .mask = FLOW_ACTION_MASK(
1014                                 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1015                         ),
1016                         .funct = add_dec_tcp_ack,
1017                 },
1018                 {
1019                         .mask = FLOW_ACTION_MASK(
1020                                 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1021                         ),
1022                         .funct = add_inc_tcp_seq,
1023                 },
1024                 {
1025                         .mask = FLOW_ACTION_MASK(
1026                                 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1027                         ),
1028                         .funct = add_dec_tcp_seq,
1029                 },
1030                 {
1031                         .mask = FLOW_ACTION_MASK(
1032                                 RTE_FLOW_ACTION_TYPE_SET_TTL
1033                         ),
1034                         .funct = add_set_ttl,
1035                 },
1036                 {
1037                         .mask = FLOW_ACTION_MASK(
1038                                 RTE_FLOW_ACTION_TYPE_DEC_TTL
1039                         ),
1040                         .funct = add_dec_ttl,
1041                 },
1042                 {
1043                         .mask = FLOW_ACTION_MASK(
1044                                 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1045                         ),
1046                         .funct = add_set_ipv4_dscp,
1047                 },
1048                 {
1049                         .mask = FLOW_ACTION_MASK(
1050                                 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1051                         ),
1052                         .funct = add_set_ipv6_dscp,
1053                 },
1054                 {
1055                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1056                         .funct = add_queue,
1057                 },
1058                 {
1059                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1060                         .funct = add_rss,
1061                 },
1062                 {
1063                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1064                         .funct = add_jump,
1065                 },
1066                 {
1067                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1068                         .funct = add_port_id
1069                 },
1070                 {
1071                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1072                         .funct = add_drop,
1073                 },
1074                 {
1075                         .mask = HAIRPIN_QUEUE_ACTION,
1076                         .funct = add_queue,
1077                 },
1078                 {
1079                         .mask = HAIRPIN_RSS_ACTION,
1080                         .funct = add_rss,
1081                 },
1082                 {
1083                         .mask = FLOW_ACTION_MASK(
1084                                 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1085                         ),
1086                         .funct = add_raw_encap,
1087                 },
1088                 {
1089                         .mask = FLOW_ACTION_MASK(
1090                                 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1091                         ),
1092                         .funct = add_raw_decap,
1093                 },
1094                 {
1095                         .mask = FLOW_ACTION_MASK(
1096                                 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1097                         ),
1098                         .funct = add_vxlan_encap,
1099                 },
1100                 {
1101                         .mask = FLOW_ACTION_MASK(
1102                                 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1103                         ),
1104                         .funct = add_vxlan_decap,
1105                 },
1106         };
1107
1108         for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1109                 if (flow_actions[j] == 0)
1110                         break;
1111                 for (i = 0; i < RTE_DIM(actions_list); i++) {
1112                         if ((flow_actions[j] &
1113                                 actions_list[i].mask) == 0)
1114                                 continue;
1115                         actions_list[i].funct(
1116                                 actions, actions_counter++,
1117                                 additional_para_data
1118                         );
1119                         break;
1120                 }
1121         }
1122         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;
1123 }