1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include <rte_geneve.h>
18 #include "actions_gen.h"
23 /* Storage for additional parameters for actions */
24 struct additional_para {
28 uint16_t queues_number;
35 /* Storage for struct rte_flow_action_raw_encap including external data. */
36 struct action_raw_encap_data {
37 struct rte_flow_action_raw_encap conf;
39 uint8_t preserve[128];
43 /* Storage for struct rte_flow_action_raw_decap including external data. */
44 struct action_raw_decap_data {
45 struct rte_flow_action_raw_decap conf;
50 /* Storage for struct rte_flow_action_rss including external data. */
51 struct action_rss_data {
52 struct rte_flow_action_rss conf;
58 add_mark(struct rte_flow_action *actions,
59 uint8_t actions_counter,
60 struct additional_para para)
62 static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
63 uint32_t counter = para.counter;
66 /* Random values from 1 to 256 */
67 mark_actions[para.core_idx].id = (counter % 255) + 1;
70 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
71 actions[actions_counter].conf = &mark_actions[para.core_idx];
75 add_queue(struct rte_flow_action *actions,
76 uint8_t actions_counter,
77 struct additional_para para)
79 static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
82 queue_actions[para.core_idx].index = para.queue;
85 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
86 actions[actions_counter].conf = &queue_actions[para.core_idx];
90 add_jump(struct rte_flow_action *actions,
91 uint8_t actions_counter,
92 struct additional_para para)
94 static struct rte_flow_action_jump jump_action;
97 jump_action.group = para.next_table;
100 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
101 actions[actions_counter].conf = &jump_action;
105 add_rss(struct rte_flow_action *actions,
106 uint8_t actions_counter,
107 struct additional_para para)
109 static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
113 if (rss_data[para.core_idx] == NULL)
114 rss_data[para.core_idx] = rte_malloc("rss_data",
115 sizeof(struct action_rss_data), 0);
117 if (rss_data[para.core_idx] == NULL)
118 rte_exit(EXIT_FAILURE, "No Memory available!");
120 *rss_data[para.core_idx] = (struct action_rss_data){
121 .conf = (struct rte_flow_action_rss){
122 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
124 .types = GET_RSS_HF(),
125 .key_len = sizeof(rss_data[para.core_idx]->key),
126 .queue_num = para.queues_number,
127 .key = rss_data[para.core_idx]->key,
128 .queue = rss_data[para.core_idx]->queue,
134 for (queue = 0; queue < para.queues_number; queue++)
135 rss_data[para.core_idx]->queue[queue] = para.queues[queue];
137 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
138 actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
142 add_set_meta(struct rte_flow_action *actions,
143 uint8_t actions_counter,
144 __rte_unused struct additional_para para)
146 static struct rte_flow_action_set_meta meta_action;
149 meta_action.data = RTE_BE32(META_DATA);
150 meta_action.mask = RTE_BE32(0xffffffff);
153 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
154 actions[actions_counter].conf = &meta_action;
158 add_set_tag(struct rte_flow_action *actions,
159 uint8_t actions_counter,
160 __rte_unused struct additional_para para)
162 static struct rte_flow_action_set_tag tag_action;
165 tag_action.data = RTE_BE32(META_DATA);
166 tag_action.mask = RTE_BE32(0xffffffff);
167 tag_action.index = TAG_INDEX;
170 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
171 actions[actions_counter].conf = &tag_action;
175 add_port_id(struct rte_flow_action *actions,
176 uint8_t actions_counter,
177 __rte_unused struct additional_para para)
179 static struct rte_flow_action_port_id port_id;
182 port_id.id = PORT_ID_DST;
185 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
186 actions[actions_counter].conf = &port_id;
190 add_drop(struct rte_flow_action *actions,
191 uint8_t actions_counter,
192 __rte_unused struct additional_para para)
194 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
198 add_count(struct rte_flow_action *actions,
199 uint8_t actions_counter,
200 __rte_unused struct additional_para para)
202 static struct rte_flow_action_count count_action;
204 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
205 actions[actions_counter].conf = &count_action;
209 add_set_src_mac(struct rte_flow_action *actions,
210 uint8_t actions_counter,
211 __rte_unused struct additional_para para)
213 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
214 uint32_t mac = para.counter;
221 /* Mac address to be set is random each time */
222 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
223 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
227 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
228 actions[actions_counter].conf = &set_macs[para.core_idx];
232 add_set_dst_mac(struct rte_flow_action *actions,
233 uint8_t actions_counter,
234 __rte_unused struct additional_para para)
236 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
237 uint32_t mac = para.counter;
244 /* Mac address to be set is random each time */
245 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
246 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
250 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
251 actions[actions_counter].conf = &set_macs[para.core_idx];
255 add_set_src_ipv4(struct rte_flow_action *actions,
256 uint8_t actions_counter,
257 __rte_unused struct additional_para para)
259 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
260 uint32_t ip = para.counter;
266 /* IPv4 value to be set is random each time */
267 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
269 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
270 actions[actions_counter].conf = &set_ipv4[para.core_idx];
274 add_set_dst_ipv4(struct rte_flow_action *actions,
275 uint8_t actions_counter,
276 __rte_unused struct additional_para para)
278 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
279 uint32_t ip = para.counter;
285 /* IPv4 value to be set is random each time */
286 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
288 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
289 actions[actions_counter].conf = &set_ipv4[para.core_idx];
293 add_set_src_ipv6(struct rte_flow_action *actions,
294 uint8_t actions_counter,
295 __rte_unused struct additional_para para)
297 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
298 uint32_t ipv6 = para.counter;
305 /* IPv6 value to set is random each time */
306 for (i = 0; i < 16; i++) {
307 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
311 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
312 actions[actions_counter].conf = &set_ipv6[para.core_idx];
316 add_set_dst_ipv6(struct rte_flow_action *actions,
317 uint8_t actions_counter,
318 __rte_unused struct additional_para para)
320 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
321 uint32_t ipv6 = para.counter;
328 /* IPv6 value to set is random each time */
329 for (i = 0; i < 16; i++) {
330 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
334 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
335 actions[actions_counter].conf = &set_ipv6[para.core_idx];
339 add_set_src_tp(struct rte_flow_action *actions,
340 uint8_t actions_counter,
341 __rte_unused struct additional_para para)
343 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
344 uint32_t tp = para.counter;
350 /* TP src port is random each time */
353 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
355 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
356 actions[actions_counter].conf = &set_tp[para.core_idx];
360 add_set_dst_tp(struct rte_flow_action *actions,
361 uint8_t actions_counter,
362 __rte_unused struct additional_para para)
364 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
365 uint32_t tp = para.counter;
371 /* TP src port is random each time */
375 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
377 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
378 actions[actions_counter].conf = &set_tp[para.core_idx];
382 add_inc_tcp_ack(struct rte_flow_action *actions,
383 uint8_t actions_counter,
384 __rte_unused struct additional_para para)
386 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
387 uint32_t ack_value = para.counter;
393 value[para.core_idx] = RTE_BE32(ack_value);
395 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
396 actions[actions_counter].conf = &value[para.core_idx];
400 add_dec_tcp_ack(struct rte_flow_action *actions,
401 uint8_t actions_counter,
402 __rte_unused struct additional_para para)
404 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
405 uint32_t ack_value = para.counter;
411 value[para.core_idx] = RTE_BE32(ack_value);
413 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
414 actions[actions_counter].conf = &value[para.core_idx];
418 add_inc_tcp_seq(struct rte_flow_action *actions,
419 uint8_t actions_counter,
420 __rte_unused struct additional_para para)
422 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
423 uint32_t seq_value = para.counter;
429 value[para.core_idx] = RTE_BE32(seq_value);
431 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
432 actions[actions_counter].conf = &value[para.core_idx];
436 add_dec_tcp_seq(struct rte_flow_action *actions,
437 uint8_t actions_counter,
438 __rte_unused struct additional_para para)
440 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
441 uint32_t seq_value = para.counter;
447 value[para.core_idx] = RTE_BE32(seq_value);
449 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
450 actions[actions_counter].conf = &value[para.core_idx];
454 add_set_ttl(struct rte_flow_action *actions,
455 uint8_t actions_counter,
456 __rte_unused struct additional_para para)
458 static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
459 uint32_t ttl_value = para.counter;
465 /* Set ttl to random value each time */
466 ttl_value = ttl_value % 0xff;
468 set_ttl[para.core_idx].ttl_value = ttl_value;
470 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
471 actions[actions_counter].conf = &set_ttl[para.core_idx];
475 add_dec_ttl(struct rte_flow_action *actions,
476 uint8_t actions_counter,
477 __rte_unused struct additional_para para)
479 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
483 add_set_ipv4_dscp(struct rte_flow_action *actions,
484 uint8_t actions_counter,
485 __rte_unused struct additional_para para)
487 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
488 uint32_t dscp_value = para.counter;
494 /* Set dscp to random value each time */
495 dscp_value = dscp_value % 0xff;
497 set_dscp[para.core_idx].dscp = dscp_value;
499 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
500 actions[actions_counter].conf = &set_dscp[para.core_idx];
504 add_set_ipv6_dscp(struct rte_flow_action *actions,
505 uint8_t actions_counter,
506 __rte_unused struct additional_para para)
508 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
509 uint32_t dscp_value = para.counter;
515 /* Set dscp to random value each time */
516 dscp_value = dscp_value % 0xff;
518 set_dscp[para.core_idx].dscp = dscp_value;
520 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
521 actions[actions_counter].conf = &set_dscp[para.core_idx];
525 add_flag(struct rte_flow_action *actions,
526 uint8_t actions_counter,
527 __rte_unused struct additional_para para)
529 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
533 add_ether_header(uint8_t **header, uint64_t data,
534 __rte_unused struct additional_para para)
536 struct rte_ether_hdr eth_hdr;
538 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
541 memset(ð_hdr, 0, sizeof(struct rte_ether_hdr));
542 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
543 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
544 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
545 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
546 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
547 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
548 memcpy(*header, ð_hdr, sizeof(eth_hdr));
549 *header += sizeof(eth_hdr);
553 add_vlan_header(uint8_t **header, uint64_t data,
554 __rte_unused struct additional_para para)
556 struct rte_vlan_hdr vlan_hdr;
559 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
562 vlan_value = VLAN_VALUE;
564 memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
565 vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
567 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
568 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
569 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
570 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
571 memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
572 *header += sizeof(vlan_hdr);
576 add_ipv4_header(uint8_t **header, uint64_t data,
577 struct additional_para para)
579 struct rte_ipv4_hdr ipv4_hdr;
580 uint32_t ip_dst = para.counter;
582 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
589 memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
590 ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
591 ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
592 ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
593 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
594 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
595 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
596 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
597 memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
598 *header += sizeof(ipv4_hdr);
602 add_ipv6_header(uint8_t **header, uint64_t data,
603 __rte_unused struct additional_para para)
605 struct rte_ipv6_hdr ipv6_hdr;
607 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
610 memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
611 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
612 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
613 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
614 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
615 memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
616 *header += sizeof(ipv6_hdr);
620 add_udp_header(uint8_t **header, uint64_t data,
621 __rte_unused struct additional_para para)
623 struct rte_udp_hdr udp_hdr;
625 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
628 memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
629 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
630 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
631 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
632 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
633 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
634 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
635 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
636 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
637 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
638 *header += sizeof(udp_hdr);
642 add_vxlan_header(uint8_t **header, uint64_t data,
643 struct additional_para para)
645 struct rte_vxlan_hdr vxlan_hdr;
646 uint32_t vni_value = para.counter;
648 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
655 memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
657 vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
658 vxlan_hdr.vx_flags = 0x8;
660 memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
661 *header += sizeof(vxlan_hdr);
665 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
666 struct additional_para para)
668 struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
669 uint32_t vni_value = para.counter;
671 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
678 memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
680 vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
681 vxlan_gpe_hdr.vx_flags = 0x0c;
683 memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
684 *header += sizeof(vxlan_gpe_hdr);
688 add_gre_header(uint8_t **header, uint64_t data,
689 __rte_unused struct additional_para para)
691 struct rte_gre_hdr gre_hdr;
693 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
696 memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
698 gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
700 memcpy(*header, &gre_hdr, sizeof(gre_hdr));
701 *header += sizeof(gre_hdr);
705 add_geneve_header(uint8_t **header, uint64_t data,
706 struct additional_para para)
708 struct rte_geneve_hdr geneve_hdr;
709 uint32_t vni_value = para.counter;
712 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
719 memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
721 for (i = 0; i < 3; i++)
722 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
724 memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
725 *header += sizeof(geneve_hdr);
729 add_gtp_header(uint8_t **header, uint64_t data,
730 struct additional_para para)
732 struct rte_gtp_hdr gtp_hdr;
733 uint32_t teid_value = para.counter;
735 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
742 memset(>p_hdr, 0, sizeof(struct rte_flow_item_gtp));
744 gtp_hdr.teid = RTE_BE32(teid_value);
745 gtp_hdr.msg_type = 255;
747 memcpy(*header, >p_hdr, sizeof(gtp_hdr));
748 *header += sizeof(gtp_hdr);
751 static const struct encap_decap_headers {
755 struct additional_para para
758 {.funct = add_ether_header},
759 {.funct = add_vlan_header},
760 {.funct = add_ipv4_header},
761 {.funct = add_ipv6_header},
762 {.funct = add_udp_header},
763 {.funct = add_vxlan_header},
764 {.funct = add_vxlan_gpe_header},
765 {.funct = add_gre_header},
766 {.funct = add_geneve_header},
767 {.funct = add_gtp_header},
771 add_raw_encap(struct rte_flow_action *actions,
772 uint8_t actions_counter,
773 struct additional_para para)
775 static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
776 uint64_t encap_data = para.encap_data;
780 /* Avoid double allocation. */
781 if (action_encap_data[para.core_idx] == NULL)
782 action_encap_data[para.core_idx] = rte_malloc("encap_data",
783 sizeof(struct action_raw_encap_data), 0);
785 /* Check if allocation failed. */
786 if (action_encap_data[para.core_idx] == NULL)
787 rte_exit(EXIT_FAILURE, "No Memory available!");
789 *action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
790 .conf = (struct rte_flow_action_raw_encap) {
791 .data = action_encap_data[para.core_idx]->data,
795 header = action_encap_data[para.core_idx]->data;
797 for (i = 0; i < RTE_DIM(headers); i++)
798 headers[i].funct(&header, encap_data, para);
800 action_encap_data[para.core_idx]->conf.size = header -
801 action_encap_data[para.core_idx]->data;
803 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
804 actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
808 add_raw_decap(struct rte_flow_action *actions,
809 uint8_t actions_counter,
810 struct additional_para para)
812 static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
813 uint64_t decap_data = para.decap_data;
817 /* Avoid double allocation. */
818 if (action_decap_data[para.core_idx] == NULL)
819 action_decap_data[para.core_idx] = rte_malloc("decap_data",
820 sizeof(struct action_raw_decap_data), 0);
822 /* Check if allocation failed. */
823 if (action_decap_data[para.core_idx] == NULL)
824 rte_exit(EXIT_FAILURE, "No Memory available!");
826 *action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
827 .conf = (struct rte_flow_action_raw_decap) {
828 .data = action_decap_data[para.core_idx]->data,
832 header = action_decap_data[para.core_idx]->data;
834 for (i = 0; i < RTE_DIM(headers); i++)
835 headers[i].funct(&header, decap_data, para);
837 action_decap_data[para.core_idx]->conf.size = header -
838 action_decap_data[para.core_idx]->data;
840 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
841 actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
845 add_vxlan_encap(struct rte_flow_action *actions,
846 uint8_t actions_counter,
847 __rte_unused struct additional_para para)
849 static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
850 static struct rte_flow_item items[5];
851 static struct rte_flow_item_eth item_eth;
852 static struct rte_flow_item_ipv4 item_ipv4;
853 static struct rte_flow_item_udp item_udp;
854 static struct rte_flow_item_vxlan item_vxlan;
855 uint32_t ip_dst = para.counter;
861 items[0].spec = &item_eth;
862 items[0].mask = &item_eth;
863 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
865 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
866 item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
867 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
868 items[1].spec = &item_ipv4;
869 items[1].mask = &item_ipv4;
870 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
873 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
874 items[2].spec = &item_udp;
875 items[2].mask = &item_udp;
876 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
879 item_vxlan.vni[2] = 1;
880 items[3].spec = &item_vxlan;
881 items[3].mask = &item_vxlan;
882 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
884 items[4].type = RTE_FLOW_ITEM_TYPE_END;
886 vxlan_encap[para.core_idx].definition = items;
888 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
889 actions[actions_counter].conf = &vxlan_encap[para.core_idx];
893 add_vxlan_decap(struct rte_flow_action *actions,
894 uint8_t actions_counter,
895 __rte_unused struct additional_para para)
897 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
901 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
902 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
903 uint64_t encap_data, uint64_t decap_data, uint8_t core_idx)
905 struct additional_para additional_para_data;
906 uint8_t actions_counter = 0;
907 uint16_t hairpin_queues[hairpinq];
908 uint16_t queues[RXQ_NUM];
911 for (i = 0; i < RXQ_NUM; i++)
914 for (i = 0; i < hairpinq; i++)
915 hairpin_queues[i] = i + RXQ_NUM;
917 additional_para_data = (struct additional_para){
918 .queue = counter % RXQ_NUM,
919 .next_table = next_table,
921 .queues_number = RXQ_NUM,
923 .encap_data = encap_data,
924 .decap_data = decap_data,
925 .core_idx = core_idx,
929 additional_para_data.queues = hairpin_queues;
930 additional_para_data.queues_number = hairpinq;
931 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
934 static const struct actions_dict {
937 struct rte_flow_action *actions,
938 uint8_t actions_counter,
939 struct additional_para para
943 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
947 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
951 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
952 .funct = add_set_meta,
955 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
956 .funct = add_set_tag,
959 .mask = FLOW_ACTION_MASK(
960 RTE_FLOW_ACTION_TYPE_FLAG
965 .mask = FLOW_ACTION_MASK(
966 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
968 .funct = add_set_src_mac,
971 .mask = FLOW_ACTION_MASK(
972 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
974 .funct = add_set_dst_mac,
977 .mask = FLOW_ACTION_MASK(
978 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
980 .funct = add_set_src_ipv4,
983 .mask = FLOW_ACTION_MASK(
984 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
986 .funct = add_set_dst_ipv4,
989 .mask = FLOW_ACTION_MASK(
990 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
992 .funct = add_set_src_ipv6,
995 .mask = FLOW_ACTION_MASK(
996 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
998 .funct = add_set_dst_ipv6,
1001 .mask = FLOW_ACTION_MASK(
1002 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1004 .funct = add_set_src_tp,
1007 .mask = FLOW_ACTION_MASK(
1008 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1010 .funct = add_set_dst_tp,
1013 .mask = FLOW_ACTION_MASK(
1014 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1016 .funct = add_inc_tcp_ack,
1019 .mask = FLOW_ACTION_MASK(
1020 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1022 .funct = add_dec_tcp_ack,
1025 .mask = FLOW_ACTION_MASK(
1026 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1028 .funct = add_inc_tcp_seq,
1031 .mask = FLOW_ACTION_MASK(
1032 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1034 .funct = add_dec_tcp_seq,
1037 .mask = FLOW_ACTION_MASK(
1038 RTE_FLOW_ACTION_TYPE_SET_TTL
1040 .funct = add_set_ttl,
1043 .mask = FLOW_ACTION_MASK(
1044 RTE_FLOW_ACTION_TYPE_DEC_TTL
1046 .funct = add_dec_ttl,
1049 .mask = FLOW_ACTION_MASK(
1050 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1052 .funct = add_set_ipv4_dscp,
1055 .mask = FLOW_ACTION_MASK(
1056 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1058 .funct = add_set_ipv6_dscp,
1061 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1065 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1069 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1073 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1074 .funct = add_port_id
1077 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1081 .mask = HAIRPIN_QUEUE_ACTION,
1085 .mask = HAIRPIN_RSS_ACTION,
1089 .mask = FLOW_ACTION_MASK(
1090 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1092 .funct = add_raw_encap,
1095 .mask = FLOW_ACTION_MASK(
1096 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1098 .funct = add_raw_decap,
1101 .mask = FLOW_ACTION_MASK(
1102 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1104 .funct = add_vxlan_encap,
1107 .mask = FLOW_ACTION_MASK(
1108 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1110 .funct = add_vxlan_decap,
1114 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1115 if (flow_actions[j] == 0)
1117 for (i = 0; i < RTE_DIM(actions_list); i++) {
1118 if ((flow_actions[j] &
1119 actions_list[i].mask) == 0)
1121 actions_list[i].funct(
1122 actions, actions_counter++,
1123 additional_para_data
1128 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;