app/flow-perf: add multi-core rule insertion and deletion
[dpdk.git] / app / test-flow-perf / actions_gen.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * The file contains the implementations of actions generators.
5  * Each generator is responsible for preparing it's action instance
6  * and initializing it with needed data.
7  */
8
9 #include <sys/types.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
14 #include <rte_gtp.h>
15 #include <rte_gre.h>
16 #include <rte_geneve.h>
17
18 #include "actions_gen.h"
19 #include "flow_gen.h"
20 #include "config.h"
21
22
23 /* Storage for additional parameters for actions */
24 struct additional_para {
25         uint16_t queue;
26         uint16_t next_table;
27         uint16_t *queues;
28         uint16_t queues_number;
29         uint32_t counter;
30         uint64_t encap_data;
31         uint64_t decap_data;
32         uint8_t core_idx;
33 };
34
35 /* Storage for struct rte_flow_action_raw_encap including external data. */
36 struct action_raw_encap_data {
37         struct rte_flow_action_raw_encap conf;
38         uint8_t data[128];
39         uint8_t preserve[128];
40         uint16_t idx;
41 };
42
43 /* Storage for struct rte_flow_action_raw_decap including external data. */
44 struct action_raw_decap_data {
45         struct rte_flow_action_raw_decap conf;
46         uint8_t data[128];
47         uint16_t idx;
48 };
49
50 /* Storage for struct rte_flow_action_rss including external data. */
51 struct action_rss_data {
52         struct rte_flow_action_rss conf;
53         uint8_t key[40];
54         uint16_t queue[128];
55 };
56
57 static void
58 add_mark(struct rte_flow_action *actions,
59         uint8_t actions_counter,
60         struct additional_para para)
61 {
62         static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
63         uint32_t counter = para.counter;
64
65         do {
66                 /* Random values from 1 to 256 */
67                 mark_actions[para.core_idx].id = (counter % 255) + 1;
68         } while (0);
69
70         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
71         actions[actions_counter].conf = &mark_actions[para.core_idx];
72 }
73
74 static void
75 add_queue(struct rte_flow_action *actions,
76         uint8_t actions_counter,
77         struct additional_para para)
78 {
79         static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
80
81         do {
82                 queue_actions[para.core_idx].index = para.queue;
83         } while (0);
84
85         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
86         actions[actions_counter].conf = &queue_actions[para.core_idx];
87 }
88
89 static void
90 add_jump(struct rte_flow_action *actions,
91         uint8_t actions_counter,
92         struct additional_para para)
93 {
94         static struct rte_flow_action_jump jump_action;
95
96         do {
97                 jump_action.group = para.next_table;
98         } while (0);
99
100         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
101         actions[actions_counter].conf = &jump_action;
102 }
103
104 static void
105 add_rss(struct rte_flow_action *actions,
106         uint8_t actions_counter,
107         struct additional_para para)
108 {
109         static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
110
111         uint16_t queue;
112
113         if (rss_data[para.core_idx] == NULL)
114                 rss_data[para.core_idx] = rte_malloc("rss_data",
115                         sizeof(struct action_rss_data), 0);
116
117         if (rss_data[para.core_idx] == NULL)
118                 rte_exit(EXIT_FAILURE, "No Memory available!");
119
120         *rss_data[para.core_idx] = (struct action_rss_data){
121                 .conf = (struct rte_flow_action_rss){
122                         .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
123                         .level = 0,
124                         .types = GET_RSS_HF(),
125                         .key_len = sizeof(rss_data[para.core_idx]->key),
126                         .queue_num = para.queues_number,
127                         .key = rss_data[para.core_idx]->key,
128                         .queue = rss_data[para.core_idx]->queue,
129                 },
130                 .key = { 1 },
131                 .queue = { 0 },
132         };
133
134         for (queue = 0; queue < para.queues_number; queue++)
135                 rss_data[para.core_idx]->queue[queue] = para.queues[queue];
136
137         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
138         actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
139 }
140
141 static void
142 add_set_meta(struct rte_flow_action *actions,
143         uint8_t actions_counter,
144         __rte_unused struct additional_para para)
145 {
146         static struct rte_flow_action_set_meta meta_action;
147
148         do {
149                 meta_action.data = RTE_BE32(META_DATA);
150                 meta_action.mask = RTE_BE32(0xffffffff);
151         } while (0);
152
153         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
154         actions[actions_counter].conf = &meta_action;
155 }
156
157 static void
158 add_set_tag(struct rte_flow_action *actions,
159         uint8_t actions_counter,
160         __rte_unused struct additional_para para)
161 {
162         static struct rte_flow_action_set_tag tag_action;
163
164         do {
165                 tag_action.data = RTE_BE32(META_DATA);
166                 tag_action.mask = RTE_BE32(0xffffffff);
167                 tag_action.index = TAG_INDEX;
168         } while (0);
169
170         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
171         actions[actions_counter].conf = &tag_action;
172 }
173
174 static void
175 add_port_id(struct rte_flow_action *actions,
176         uint8_t actions_counter,
177         __rte_unused struct additional_para para)
178 {
179         static struct rte_flow_action_port_id port_id;
180
181         do {
182                 port_id.id = PORT_ID_DST;
183         } while (0);
184
185         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
186         actions[actions_counter].conf = &port_id;
187 }
188
189 static void
190 add_drop(struct rte_flow_action *actions,
191         uint8_t actions_counter,
192         __rte_unused struct additional_para para)
193 {
194         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
195 }
196
197 static void
198 add_count(struct rte_flow_action *actions,
199         uint8_t actions_counter,
200         __rte_unused struct additional_para para)
201 {
202         static struct rte_flow_action_count count_action;
203
204         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
205         actions[actions_counter].conf = &count_action;
206 }
207
208 static void
209 add_set_src_mac(struct rte_flow_action *actions,
210         uint8_t actions_counter,
211         __rte_unused struct additional_para para)
212 {
213         static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
214         uint32_t mac = para.counter;
215         uint16_t i;
216
217         /* Fixed value */
218         if (FIXED_VALUES)
219                 mac = 1;
220
221         /* Mac address to be set is random each time */
222         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
223                 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
224                 mac = mac >> 8;
225         }
226
227         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
228         actions[actions_counter].conf = &set_macs[para.core_idx];
229 }
230
231 static void
232 add_set_dst_mac(struct rte_flow_action *actions,
233         uint8_t actions_counter,
234         __rte_unused struct additional_para para)
235 {
236         static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
237         uint32_t mac = para.counter;
238         uint16_t i;
239
240         /* Fixed value */
241         if (FIXED_VALUES)
242                 mac = 1;
243
244         /* Mac address to be set is random each time */
245         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
246                 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
247                 mac = mac >> 8;
248         }
249
250         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
251         actions[actions_counter].conf = &set_macs[para.core_idx];
252 }
253
254 static void
255 add_set_src_ipv4(struct rte_flow_action *actions,
256         uint8_t actions_counter,
257         __rte_unused struct additional_para para)
258 {
259         static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
260         uint32_t ip = para.counter;
261
262         /* Fixed value */
263         if (FIXED_VALUES)
264                 ip = 1;
265
266         /* IPv4 value to be set is random each time */
267         set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
268
269         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
270         actions[actions_counter].conf = &set_ipv4[para.core_idx];
271 }
272
273 static void
274 add_set_dst_ipv4(struct rte_flow_action *actions,
275         uint8_t actions_counter,
276         __rte_unused struct additional_para para)
277 {
278         static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
279         uint32_t ip = para.counter;
280
281         /* Fixed value */
282         if (FIXED_VALUES)
283                 ip = 1;
284
285         /* IPv4 value to be set is random each time */
286         set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
287
288         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
289         actions[actions_counter].conf = &set_ipv4[para.core_idx];
290 }
291
292 static void
293 add_set_src_ipv6(struct rte_flow_action *actions,
294         uint8_t actions_counter,
295         __rte_unused struct additional_para para)
296 {
297         static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
298         uint32_t ipv6 = para.counter;
299         uint8_t i;
300
301         /* Fixed value */
302         if (FIXED_VALUES)
303                 ipv6 = 1;
304
305         /* IPv6 value to set is random each time */
306         for (i = 0; i < 16; i++) {
307                 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
308                 ipv6 = ipv6 >> 8;
309         }
310
311         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
312         actions[actions_counter].conf = &set_ipv6[para.core_idx];
313 }
314
315 static void
316 add_set_dst_ipv6(struct rte_flow_action *actions,
317         uint8_t actions_counter,
318         __rte_unused struct additional_para para)
319 {
320         static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
321         uint32_t ipv6 = para.counter;
322         uint8_t i;
323
324         /* Fixed value */
325         if (FIXED_VALUES)
326                 ipv6 = 1;
327
328         /* IPv6 value to set is random each time */
329         for (i = 0; i < 16; i++) {
330                 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
331                 ipv6 = ipv6 >> 8;
332         }
333
334         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
335         actions[actions_counter].conf = &set_ipv6[para.core_idx];
336 }
337
338 static void
339 add_set_src_tp(struct rte_flow_action *actions,
340         uint8_t actions_counter,
341         __rte_unused struct additional_para para)
342 {
343         static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
344         uint32_t tp = para.counter;
345
346         /* Fixed value */
347         if (FIXED_VALUES)
348                 tp = 100;
349
350         /* TP src port is random each time */
351         tp = tp % 0xffff;
352
353         set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
354
355         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
356         actions[actions_counter].conf = &set_tp[para.core_idx];
357 }
358
359 static void
360 add_set_dst_tp(struct rte_flow_action *actions,
361         uint8_t actions_counter,
362         __rte_unused struct additional_para para)
363 {
364         static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
365         uint32_t tp = para.counter;
366
367         /* Fixed value */
368         if (FIXED_VALUES)
369                 tp = 100;
370
371         /* TP src port is random each time */
372         if (tp > 0xffff)
373                 tp = tp >> 16;
374
375         set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
376
377         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
378         actions[actions_counter].conf = &set_tp[para.core_idx];
379 }
380
381 static void
382 add_inc_tcp_ack(struct rte_flow_action *actions,
383         uint8_t actions_counter,
384         __rte_unused struct additional_para para)
385 {
386         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
387         uint32_t ack_value = para.counter;
388
389         /* Fixed value */
390         if (FIXED_VALUES)
391                 ack_value = 1;
392
393         value[para.core_idx] = RTE_BE32(ack_value);
394
395         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
396         actions[actions_counter].conf = &value[para.core_idx];
397 }
398
399 static void
400 add_dec_tcp_ack(struct rte_flow_action *actions,
401         uint8_t actions_counter,
402         __rte_unused struct additional_para para)
403 {
404         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
405         uint32_t ack_value = para.counter;
406
407         /* Fixed value */
408         if (FIXED_VALUES)
409                 ack_value = 1;
410
411         value[para.core_idx] = RTE_BE32(ack_value);
412
413         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
414         actions[actions_counter].conf = &value[para.core_idx];
415 }
416
417 static void
418 add_inc_tcp_seq(struct rte_flow_action *actions,
419         uint8_t actions_counter,
420         __rte_unused struct additional_para para)
421 {
422         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
423         uint32_t seq_value = para.counter;
424
425         /* Fixed value */
426         if (FIXED_VALUES)
427                 seq_value = 1;
428
429         value[para.core_idx] = RTE_BE32(seq_value);
430
431         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
432         actions[actions_counter].conf = &value[para.core_idx];
433 }
434
435 static void
436 add_dec_tcp_seq(struct rte_flow_action *actions,
437         uint8_t actions_counter,
438         __rte_unused struct additional_para para)
439 {
440         static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
441         uint32_t seq_value = para.counter;
442
443         /* Fixed value */
444         if (FIXED_VALUES)
445                 seq_value = 1;
446
447         value[para.core_idx] = RTE_BE32(seq_value);
448
449         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
450         actions[actions_counter].conf = &value[para.core_idx];
451 }
452
453 static void
454 add_set_ttl(struct rte_flow_action *actions,
455         uint8_t actions_counter,
456         __rte_unused struct additional_para para)
457 {
458         static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
459         uint32_t ttl_value = para.counter;
460
461         /* Fixed value */
462         if (FIXED_VALUES)
463                 ttl_value = 1;
464
465         /* Set ttl to random value each time */
466         ttl_value = ttl_value % 0xff;
467
468         set_ttl[para.core_idx].ttl_value = ttl_value;
469
470         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
471         actions[actions_counter].conf = &set_ttl[para.core_idx];
472 }
473
474 static void
475 add_dec_ttl(struct rte_flow_action *actions,
476         uint8_t actions_counter,
477         __rte_unused struct additional_para para)
478 {
479         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
480 }
481
482 static void
483 add_set_ipv4_dscp(struct rte_flow_action *actions,
484         uint8_t actions_counter,
485         __rte_unused struct additional_para para)
486 {
487         static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
488         uint32_t dscp_value = para.counter;
489
490         /* Fixed value */
491         if (FIXED_VALUES)
492                 dscp_value = 1;
493
494         /* Set dscp to random value each time */
495         dscp_value = dscp_value % 0xff;
496
497         set_dscp[para.core_idx].dscp = dscp_value;
498
499         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
500         actions[actions_counter].conf = &set_dscp[para.core_idx];
501 }
502
503 static void
504 add_set_ipv6_dscp(struct rte_flow_action *actions,
505         uint8_t actions_counter,
506         __rte_unused struct additional_para para)
507 {
508         static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
509         uint32_t dscp_value = para.counter;
510
511         /* Fixed value */
512         if (FIXED_VALUES)
513                 dscp_value = 1;
514
515         /* Set dscp to random value each time */
516         dscp_value = dscp_value % 0xff;
517
518         set_dscp[para.core_idx].dscp = dscp_value;
519
520         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
521         actions[actions_counter].conf = &set_dscp[para.core_idx];
522 }
523
524 static void
525 add_flag(struct rte_flow_action *actions,
526         uint8_t actions_counter,
527         __rte_unused struct additional_para para)
528 {
529         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
530 }
531
532 static void
533 add_ether_header(uint8_t **header, uint64_t data,
534         __rte_unused struct additional_para para)
535 {
536         struct rte_ether_hdr eth_hdr;
537
538         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
539                 return;
540
541         memset(&eth_hdr, 0, sizeof(struct rte_ether_hdr));
542         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
543                 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
544         else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
545                 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
546         else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
547                 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
548         memcpy(*header, &eth_hdr, sizeof(eth_hdr));
549         *header += sizeof(eth_hdr);
550 }
551
552 static void
553 add_vlan_header(uint8_t **header, uint64_t data,
554         __rte_unused struct additional_para para)
555 {
556         struct rte_vlan_hdr vlan_hdr;
557         uint16_t vlan_value;
558
559         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
560                 return;
561
562         vlan_value = VLAN_VALUE;
563
564         memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
565         vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
566
567         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
568                 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
569         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
570                 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
571         memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
572         *header += sizeof(vlan_hdr);
573 }
574
575 static void
576 add_ipv4_header(uint8_t **header, uint64_t data,
577         struct additional_para para)
578 {
579         struct rte_ipv4_hdr ipv4_hdr;
580         uint32_t ip_dst = para.counter;
581
582         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
583                 return;
584
585         /* Fixed value */
586         if (FIXED_VALUES)
587                 ip_dst = 1;
588
589         memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
590         ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
591         ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
592         ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
593         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
594                 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
595         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
596                 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
597         memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
598         *header += sizeof(ipv4_hdr);
599 }
600
601 static void
602 add_ipv6_header(uint8_t **header, uint64_t data,
603         __rte_unused struct additional_para para)
604 {
605         struct rte_ipv6_hdr ipv6_hdr;
606
607         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
608                 return;
609
610         memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
611         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
612                 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
613         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
614                 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
615         memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
616         *header += sizeof(ipv6_hdr);
617 }
618
619 static void
620 add_udp_header(uint8_t **header, uint64_t data,
621         __rte_unused struct additional_para para)
622 {
623         struct rte_udp_hdr udp_hdr;
624
625         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
626                 return;
627
628         memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
629         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
630                 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
631         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
632                 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
633         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
634                 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
635         if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
636                 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
637          memcpy(*header, &udp_hdr, sizeof(udp_hdr));
638          *header += sizeof(udp_hdr);
639 }
640
641 static void
642 add_vxlan_header(uint8_t **header, uint64_t data,
643         struct additional_para para)
644 {
645         struct rte_vxlan_hdr vxlan_hdr;
646         uint32_t vni_value = para.counter;
647
648         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
649                 return;
650
651         /* Fixed value */
652         if (FIXED_VALUES)
653                 vni_value = 1;
654
655         memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
656
657         vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
658         vxlan_hdr.vx_flags = 0x8;
659
660         memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
661         *header += sizeof(vxlan_hdr);
662 }
663
664 static void
665 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
666         struct additional_para para)
667 {
668         struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
669         uint32_t vni_value = para.counter;
670
671         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
672                 return;
673
674         /* Fixed value */
675         if (FIXED_VALUES)
676                 vni_value = 1;
677
678         memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
679
680         vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
681         vxlan_gpe_hdr.vx_flags = 0x0c;
682
683         memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
684         *header += sizeof(vxlan_gpe_hdr);
685 }
686
687 static void
688 add_gre_header(uint8_t **header, uint64_t data,
689         __rte_unused struct additional_para para)
690 {
691         struct rte_gre_hdr gre_hdr;
692
693         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
694                 return;
695
696         memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
697
698         gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
699
700         memcpy(*header, &gre_hdr, sizeof(gre_hdr));
701         *header += sizeof(gre_hdr);
702 }
703
704 static void
705 add_geneve_header(uint8_t **header, uint64_t data,
706         struct additional_para para)
707 {
708         struct rte_geneve_hdr geneve_hdr;
709         uint32_t vni_value = para.counter;
710         uint8_t i;
711
712         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
713                 return;
714
715         /* Fixed value */
716         if (FIXED_VALUES)
717                 vni_value = 1;
718
719         memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
720
721         for (i = 0; i < 3; i++)
722                 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
723
724         memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
725         *header += sizeof(geneve_hdr);
726 }
727
728 static void
729 add_gtp_header(uint8_t **header, uint64_t data,
730         struct additional_para para)
731 {
732         struct rte_gtp_hdr gtp_hdr;
733         uint32_t teid_value = para.counter;
734
735         if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
736                 return;
737
738         /* Fixed value */
739         if (FIXED_VALUES)
740                 teid_value = 1;
741
742         memset(&gtp_hdr, 0, sizeof(struct rte_flow_item_gtp));
743
744         gtp_hdr.teid = RTE_BE32(teid_value);
745         gtp_hdr.msg_type = 255;
746
747         memcpy(*header, &gtp_hdr, sizeof(gtp_hdr));
748         *header += sizeof(gtp_hdr);
749 }
750
751 static const struct encap_decap_headers {
752         void (*funct)(
753                 uint8_t **header,
754                 uint64_t data,
755                 struct additional_para para
756                 );
757 } headers[] = {
758         {.funct = add_ether_header},
759         {.funct = add_vlan_header},
760         {.funct = add_ipv4_header},
761         {.funct = add_ipv6_header},
762         {.funct = add_udp_header},
763         {.funct = add_vxlan_header},
764         {.funct = add_vxlan_gpe_header},
765         {.funct = add_gre_header},
766         {.funct = add_geneve_header},
767         {.funct = add_gtp_header},
768 };
769
770 static void
771 add_raw_encap(struct rte_flow_action *actions,
772         uint8_t actions_counter,
773         struct additional_para para)
774 {
775         static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
776         uint64_t encap_data = para.encap_data;
777         uint8_t *header;
778         uint8_t i;
779
780         /* Avoid double allocation. */
781         if (action_encap_data[para.core_idx] == NULL)
782                 action_encap_data[para.core_idx] = rte_malloc("encap_data",
783                         sizeof(struct action_raw_encap_data), 0);
784
785         /* Check if allocation failed. */
786         if (action_encap_data[para.core_idx] == NULL)
787                 rte_exit(EXIT_FAILURE, "No Memory available!");
788
789         *action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
790                 .conf = (struct rte_flow_action_raw_encap) {
791                         .data = action_encap_data[para.core_idx]->data,
792                 },
793                         .data = {},
794         };
795         header = action_encap_data[para.core_idx]->data;
796
797         for (i = 0; i < RTE_DIM(headers); i++)
798                 headers[i].funct(&header, encap_data, para);
799
800         action_encap_data[para.core_idx]->conf.size = header -
801                 action_encap_data[para.core_idx]->data;
802
803         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
804         actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
805 }
806
807 static void
808 add_raw_decap(struct rte_flow_action *actions,
809         uint8_t actions_counter,
810         struct additional_para para)
811 {
812         static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
813         uint64_t decap_data = para.decap_data;
814         uint8_t *header;
815         uint8_t i;
816
817         /* Avoid double allocation. */
818         if (action_decap_data[para.core_idx] == NULL)
819                 action_decap_data[para.core_idx] = rte_malloc("decap_data",
820                         sizeof(struct action_raw_decap_data), 0);
821
822         /* Check if allocation failed. */
823         if (action_decap_data[para.core_idx] == NULL)
824                 rte_exit(EXIT_FAILURE, "No Memory available!");
825
826         *action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
827                 .conf = (struct rte_flow_action_raw_decap) {
828                         .data = action_decap_data[para.core_idx]->data,
829                 },
830                         .data = {},
831         };
832         header = action_decap_data[para.core_idx]->data;
833
834         for (i = 0; i < RTE_DIM(headers); i++)
835                 headers[i].funct(&header, decap_data, para);
836
837         action_decap_data[para.core_idx]->conf.size = header -
838                 action_decap_data[para.core_idx]->data;
839
840         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
841         actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
842 }
843
844 static void
845 add_vxlan_encap(struct rte_flow_action *actions,
846         uint8_t actions_counter,
847         __rte_unused struct additional_para para)
848 {
849         static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
850         static struct rte_flow_item items[5];
851         static struct rte_flow_item_eth item_eth;
852         static struct rte_flow_item_ipv4 item_ipv4;
853         static struct rte_flow_item_udp item_udp;
854         static struct rte_flow_item_vxlan item_vxlan;
855         uint32_t ip_dst = para.counter;
856
857         /* Fixed value */
858         if (FIXED_VALUES)
859                 ip_dst = 1;
860
861         items[0].spec = &item_eth;
862         items[0].mask = &item_eth;
863         items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
864
865         item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
866         item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
867         item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
868         items[1].spec = &item_ipv4;
869         items[1].mask = &item_ipv4;
870         items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
871
872
873         item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
874         items[2].spec = &item_udp;
875         items[2].mask = &item_udp;
876         items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
877
878
879         item_vxlan.vni[2] = 1;
880         items[3].spec = &item_vxlan;
881         items[3].mask = &item_vxlan;
882         items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
883
884         items[4].type = RTE_FLOW_ITEM_TYPE_END;
885
886         vxlan_encap[para.core_idx].definition = items;
887
888         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
889         actions[actions_counter].conf = &vxlan_encap[para.core_idx];
890 }
891
892 static void
893 add_vxlan_decap(struct rte_flow_action *actions,
894         uint8_t actions_counter,
895         __rte_unused struct additional_para para)
896 {
897         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
898 }
899
900 void
901 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
902         uint32_t counter, uint16_t next_table, uint16_t hairpinq,
903         uint64_t encap_data, uint64_t decap_data, uint8_t core_idx)
904 {
905         struct additional_para additional_para_data;
906         uint8_t actions_counter = 0;
907         uint16_t hairpin_queues[hairpinq];
908         uint16_t queues[RXQ_NUM];
909         uint16_t i, j;
910
911         for (i = 0; i < RXQ_NUM; i++)
912                 queues[i] = i;
913
914         for (i = 0; i < hairpinq; i++)
915                 hairpin_queues[i] = i + RXQ_NUM;
916
917         additional_para_data = (struct additional_para){
918                 .queue = counter % RXQ_NUM,
919                 .next_table = next_table,
920                 .queues = queues,
921                 .queues_number = RXQ_NUM,
922                 .counter = counter,
923                 .encap_data = encap_data,
924                 .decap_data = decap_data,
925                 .core_idx = core_idx,
926         };
927
928         if (hairpinq != 0) {
929                 additional_para_data.queues = hairpin_queues;
930                 additional_para_data.queues_number = hairpinq;
931                 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
932         }
933
934         static const struct actions_dict {
935                 uint64_t mask;
936                 void (*funct)(
937                         struct rte_flow_action *actions,
938                         uint8_t actions_counter,
939                         struct additional_para para
940                         );
941         } actions_list[] = {
942                 {
943                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
944                         .funct = add_mark,
945                 },
946                 {
947                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
948                         .funct = add_count,
949                 },
950                 {
951                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
952                         .funct = add_set_meta,
953                 },
954                 {
955                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
956                         .funct = add_set_tag,
957                 },
958                 {
959                         .mask = FLOW_ACTION_MASK(
960                                 RTE_FLOW_ACTION_TYPE_FLAG
961                         ),
962                         .funct = add_flag,
963                 },
964                 {
965                         .mask = FLOW_ACTION_MASK(
966                                 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
967                         ),
968                         .funct = add_set_src_mac,
969                 },
970                 {
971                         .mask = FLOW_ACTION_MASK(
972                                 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
973                         ),
974                         .funct = add_set_dst_mac,
975                 },
976                 {
977                         .mask = FLOW_ACTION_MASK(
978                                 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
979                         ),
980                         .funct = add_set_src_ipv4,
981                 },
982                 {
983                         .mask = FLOW_ACTION_MASK(
984                                 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
985                         ),
986                         .funct = add_set_dst_ipv4,
987                 },
988                 {
989                         .mask = FLOW_ACTION_MASK(
990                                 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
991                         ),
992                         .funct = add_set_src_ipv6,
993                 },
994                 {
995                         .mask = FLOW_ACTION_MASK(
996                                 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
997                         ),
998                         .funct = add_set_dst_ipv6,
999                 },
1000                 {
1001                         .mask = FLOW_ACTION_MASK(
1002                                 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1003                         ),
1004                         .funct = add_set_src_tp,
1005                 },
1006                 {
1007                         .mask = FLOW_ACTION_MASK(
1008                                 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1009                         ),
1010                         .funct = add_set_dst_tp,
1011                 },
1012                 {
1013                         .mask = FLOW_ACTION_MASK(
1014                                 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1015                         ),
1016                         .funct = add_inc_tcp_ack,
1017                 },
1018                 {
1019                         .mask = FLOW_ACTION_MASK(
1020                                 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1021                         ),
1022                         .funct = add_dec_tcp_ack,
1023                 },
1024                 {
1025                         .mask = FLOW_ACTION_MASK(
1026                                 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1027                         ),
1028                         .funct = add_inc_tcp_seq,
1029                 },
1030                 {
1031                         .mask = FLOW_ACTION_MASK(
1032                                 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1033                         ),
1034                         .funct = add_dec_tcp_seq,
1035                 },
1036                 {
1037                         .mask = FLOW_ACTION_MASK(
1038                                 RTE_FLOW_ACTION_TYPE_SET_TTL
1039                         ),
1040                         .funct = add_set_ttl,
1041                 },
1042                 {
1043                         .mask = FLOW_ACTION_MASK(
1044                                 RTE_FLOW_ACTION_TYPE_DEC_TTL
1045                         ),
1046                         .funct = add_dec_ttl,
1047                 },
1048                 {
1049                         .mask = FLOW_ACTION_MASK(
1050                                 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1051                         ),
1052                         .funct = add_set_ipv4_dscp,
1053                 },
1054                 {
1055                         .mask = FLOW_ACTION_MASK(
1056                                 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1057                         ),
1058                         .funct = add_set_ipv6_dscp,
1059                 },
1060                 {
1061                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1062                         .funct = add_queue,
1063                 },
1064                 {
1065                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1066                         .funct = add_rss,
1067                 },
1068                 {
1069                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1070                         .funct = add_jump,
1071                 },
1072                 {
1073                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1074                         .funct = add_port_id
1075                 },
1076                 {
1077                         .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1078                         .funct = add_drop,
1079                 },
1080                 {
1081                         .mask = HAIRPIN_QUEUE_ACTION,
1082                         .funct = add_queue,
1083                 },
1084                 {
1085                         .mask = HAIRPIN_RSS_ACTION,
1086                         .funct = add_rss,
1087                 },
1088                 {
1089                         .mask = FLOW_ACTION_MASK(
1090                                 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1091                         ),
1092                         .funct = add_raw_encap,
1093                 },
1094                 {
1095                         .mask = FLOW_ACTION_MASK(
1096                                 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1097                         ),
1098                         .funct = add_raw_decap,
1099                 },
1100                 {
1101                         .mask = FLOW_ACTION_MASK(
1102                                 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1103                         ),
1104                         .funct = add_vxlan_encap,
1105                 },
1106                 {
1107                         .mask = FLOW_ACTION_MASK(
1108                                 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1109                         ),
1110                         .funct = add_vxlan_decap,
1111                 },
1112         };
1113
1114         for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1115                 if (flow_actions[j] == 0)
1116                         break;
1117                 for (i = 0; i < RTE_DIM(actions_list); i++) {
1118                         if ((flow_actions[j] &
1119                                 actions_list[i].mask) == 0)
1120                                 continue;
1121                         actions_list[i].funct(
1122                                 actions, actions_counter++,
1123                                 additional_para_data
1124                         );
1125                         break;
1126                 }
1127         }
1128         actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;
1129 }