1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include <rte_geneve.h>
18 #include "actions_gen.h"
23 /* Storage for additional parameters for actions */
24 struct additional_para {
28 uint16_t queues_number;
35 /* Storage for struct rte_flow_action_raw_encap including external data. */
36 struct action_raw_encap_data {
37 struct rte_flow_action_raw_encap conf;
39 uint8_t preserve[128];
43 /* Storage for struct rte_flow_action_raw_decap including external data. */
44 struct action_raw_decap_data {
45 struct rte_flow_action_raw_decap conf;
50 /* Storage for struct rte_flow_action_rss including external data. */
51 struct action_rss_data {
52 struct rte_flow_action_rss conf;
58 add_mark(struct rte_flow_action *actions,
59 uint8_t actions_counter,
60 struct additional_para para)
62 static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
63 uint32_t counter = para.counter;
66 /* Random values from 1 to 256 */
67 mark_actions[para.core_idx].id = (counter % 255) + 1;
70 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
71 actions[actions_counter].conf = &mark_actions[para.core_idx];
75 add_queue(struct rte_flow_action *actions,
76 uint8_t actions_counter,
77 struct additional_para para)
79 static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
82 queue_actions[para.core_idx].index = para.queue;
85 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
86 actions[actions_counter].conf = &queue_actions[para.core_idx];
90 add_jump(struct rte_flow_action *actions,
91 uint8_t actions_counter,
92 struct additional_para para)
94 static struct rte_flow_action_jump jump_action;
97 jump_action.group = para.next_table;
100 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
101 actions[actions_counter].conf = &jump_action;
105 add_rss(struct rte_flow_action *actions,
106 uint8_t actions_counter,
107 struct additional_para para)
109 static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
113 if (rss_data[para.core_idx] == NULL)
114 rss_data[para.core_idx] = rte_malloc("rss_data",
115 sizeof(struct action_rss_data), 0);
117 if (rss_data[para.core_idx] == NULL)
118 rte_exit(EXIT_FAILURE, "No Memory available!");
120 *rss_data[para.core_idx] = (struct action_rss_data){
121 .conf = (struct rte_flow_action_rss){
122 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
124 .types = GET_RSS_HF(),
125 .key_len = sizeof(rss_data[para.core_idx]->key),
126 .queue_num = para.queues_number,
127 .key = rss_data[para.core_idx]->key,
128 .queue = rss_data[para.core_idx]->queue,
134 for (queue = 0; queue < para.queues_number; queue++)
135 rss_data[para.core_idx]->queue[queue] = para.queues[queue];
137 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
138 actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
142 add_set_meta(struct rte_flow_action *actions,
143 uint8_t actions_counter,
144 __rte_unused struct additional_para para)
146 static struct rte_flow_action_set_meta meta_action = {
147 .data = RTE_BE32(META_DATA),
148 .mask = RTE_BE32(0xffffffff),
151 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
152 actions[actions_counter].conf = &meta_action;
156 add_set_tag(struct rte_flow_action *actions,
157 uint8_t actions_counter,
158 __rte_unused struct additional_para para)
160 static struct rte_flow_action_set_tag tag_action = {
161 .data = RTE_BE32(META_DATA),
162 .mask = RTE_BE32(0xffffffff),
166 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
167 actions[actions_counter].conf = &tag_action;
171 add_port_id(struct rte_flow_action *actions,
172 uint8_t actions_counter,
173 __rte_unused struct additional_para para)
175 static struct rte_flow_action_port_id port_id = {
179 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
180 actions[actions_counter].conf = &port_id;
184 add_drop(struct rte_flow_action *actions,
185 uint8_t actions_counter,
186 __rte_unused struct additional_para para)
188 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
192 add_count(struct rte_flow_action *actions,
193 uint8_t actions_counter,
194 __rte_unused struct additional_para para)
196 static struct rte_flow_action_count count_action;
198 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
199 actions[actions_counter].conf = &count_action;
203 add_set_src_mac(struct rte_flow_action *actions,
204 uint8_t actions_counter,
205 __rte_unused struct additional_para para)
207 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
208 uint32_t mac = para.counter;
215 /* Mac address to be set is random each time */
216 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
217 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
221 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
222 actions[actions_counter].conf = &set_macs[para.core_idx];
226 add_set_dst_mac(struct rte_flow_action *actions,
227 uint8_t actions_counter,
228 __rte_unused struct additional_para para)
230 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
231 uint32_t mac = para.counter;
238 /* Mac address to be set is random each time */
239 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
240 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
244 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
245 actions[actions_counter].conf = &set_macs[para.core_idx];
249 add_set_src_ipv4(struct rte_flow_action *actions,
250 uint8_t actions_counter,
251 __rte_unused struct additional_para para)
253 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
254 uint32_t ip = para.counter;
260 /* IPv4 value to be set is random each time */
261 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
263 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
264 actions[actions_counter].conf = &set_ipv4[para.core_idx];
268 add_set_dst_ipv4(struct rte_flow_action *actions,
269 uint8_t actions_counter,
270 __rte_unused struct additional_para para)
272 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
273 uint32_t ip = para.counter;
279 /* IPv4 value to be set is random each time */
280 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
282 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
283 actions[actions_counter].conf = &set_ipv4[para.core_idx];
287 add_set_src_ipv6(struct rte_flow_action *actions,
288 uint8_t actions_counter,
289 __rte_unused struct additional_para para)
291 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
292 uint32_t ipv6 = para.counter;
299 /* IPv6 value to set is random each time */
300 for (i = 0; i < 16; i++) {
301 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
305 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
306 actions[actions_counter].conf = &set_ipv6[para.core_idx];
310 add_set_dst_ipv6(struct rte_flow_action *actions,
311 uint8_t actions_counter,
312 __rte_unused struct additional_para para)
314 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
315 uint32_t ipv6 = para.counter;
322 /* IPv6 value to set is random each time */
323 for (i = 0; i < 16; i++) {
324 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
328 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
329 actions[actions_counter].conf = &set_ipv6[para.core_idx];
333 add_set_src_tp(struct rte_flow_action *actions,
334 uint8_t actions_counter,
335 __rte_unused struct additional_para para)
337 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
338 uint32_t tp = para.counter;
344 /* TP src port is random each time */
347 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
349 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
350 actions[actions_counter].conf = &set_tp[para.core_idx];
354 add_set_dst_tp(struct rte_flow_action *actions,
355 uint8_t actions_counter,
356 __rte_unused struct additional_para para)
358 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
359 uint32_t tp = para.counter;
365 /* TP src port is random each time */
369 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
371 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
372 actions[actions_counter].conf = &set_tp[para.core_idx];
376 add_inc_tcp_ack(struct rte_flow_action *actions,
377 uint8_t actions_counter,
378 __rte_unused struct additional_para para)
380 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
381 uint32_t ack_value = para.counter;
387 value[para.core_idx] = RTE_BE32(ack_value);
389 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
390 actions[actions_counter].conf = &value[para.core_idx];
394 add_dec_tcp_ack(struct rte_flow_action *actions,
395 uint8_t actions_counter,
396 __rte_unused struct additional_para para)
398 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
399 uint32_t ack_value = para.counter;
405 value[para.core_idx] = RTE_BE32(ack_value);
407 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
408 actions[actions_counter].conf = &value[para.core_idx];
412 add_inc_tcp_seq(struct rte_flow_action *actions,
413 uint8_t actions_counter,
414 __rte_unused struct additional_para para)
416 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
417 uint32_t seq_value = para.counter;
423 value[para.core_idx] = RTE_BE32(seq_value);
425 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
426 actions[actions_counter].conf = &value[para.core_idx];
430 add_dec_tcp_seq(struct rte_flow_action *actions,
431 uint8_t actions_counter,
432 __rte_unused struct additional_para para)
434 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
435 uint32_t seq_value = para.counter;
441 value[para.core_idx] = RTE_BE32(seq_value);
443 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
444 actions[actions_counter].conf = &value[para.core_idx];
448 add_set_ttl(struct rte_flow_action *actions,
449 uint8_t actions_counter,
450 __rte_unused struct additional_para para)
452 static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
453 uint32_t ttl_value = para.counter;
459 /* Set ttl to random value each time */
460 ttl_value = ttl_value % 0xff;
462 set_ttl[para.core_idx].ttl_value = ttl_value;
464 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
465 actions[actions_counter].conf = &set_ttl[para.core_idx];
469 add_dec_ttl(struct rte_flow_action *actions,
470 uint8_t actions_counter,
471 __rte_unused struct additional_para para)
473 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
477 add_set_ipv4_dscp(struct rte_flow_action *actions,
478 uint8_t actions_counter,
479 __rte_unused struct additional_para para)
481 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
482 uint32_t dscp_value = para.counter;
488 /* Set dscp to random value each time */
489 dscp_value = dscp_value % 0xff;
491 set_dscp[para.core_idx].dscp = dscp_value;
493 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
494 actions[actions_counter].conf = &set_dscp[para.core_idx];
498 add_set_ipv6_dscp(struct rte_flow_action *actions,
499 uint8_t actions_counter,
500 __rte_unused struct additional_para para)
502 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
503 uint32_t dscp_value = para.counter;
509 /* Set dscp to random value each time */
510 dscp_value = dscp_value % 0xff;
512 set_dscp[para.core_idx].dscp = dscp_value;
514 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
515 actions[actions_counter].conf = &set_dscp[para.core_idx];
519 add_flag(struct rte_flow_action *actions,
520 uint8_t actions_counter,
521 __rte_unused struct additional_para para)
523 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
527 add_ether_header(uint8_t **header, uint64_t data,
528 __rte_unused struct additional_para para)
530 struct rte_ether_hdr eth_hdr;
532 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
535 memset(ð_hdr, 0, sizeof(struct rte_ether_hdr));
536 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
537 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
538 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
539 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
540 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
541 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
542 memcpy(*header, ð_hdr, sizeof(eth_hdr));
543 *header += sizeof(eth_hdr);
547 add_vlan_header(uint8_t **header, uint64_t data,
548 __rte_unused struct additional_para para)
550 struct rte_vlan_hdr vlan_hdr;
553 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
556 vlan_value = VLAN_VALUE;
558 memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
559 vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
561 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
562 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
563 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
564 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
565 memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
566 *header += sizeof(vlan_hdr);
570 add_ipv4_header(uint8_t **header, uint64_t data,
571 struct additional_para para)
573 struct rte_ipv4_hdr ipv4_hdr;
574 uint32_t ip_dst = para.counter;
576 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
583 memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
584 ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
585 ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
586 ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
587 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
588 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
589 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
590 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
591 memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
592 *header += sizeof(ipv4_hdr);
596 add_ipv6_header(uint8_t **header, uint64_t data,
597 __rte_unused struct additional_para para)
599 struct rte_ipv6_hdr ipv6_hdr;
601 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
604 memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
605 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
606 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
607 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
608 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
609 memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
610 *header += sizeof(ipv6_hdr);
614 add_udp_header(uint8_t **header, uint64_t data,
615 __rte_unused struct additional_para para)
617 struct rte_udp_hdr udp_hdr;
619 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
622 memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
623 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
624 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
625 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
626 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
627 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
628 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
629 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
630 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
631 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
632 *header += sizeof(udp_hdr);
636 add_vxlan_header(uint8_t **header, uint64_t data,
637 struct additional_para para)
639 struct rte_vxlan_hdr vxlan_hdr;
640 uint32_t vni_value = para.counter;
642 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
649 memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
651 vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
652 vxlan_hdr.vx_flags = 0x8;
654 memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
655 *header += sizeof(vxlan_hdr);
659 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
660 struct additional_para para)
662 struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
663 uint32_t vni_value = para.counter;
665 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
672 memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
674 vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
675 vxlan_gpe_hdr.vx_flags = 0x0c;
677 memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
678 *header += sizeof(vxlan_gpe_hdr);
682 add_gre_header(uint8_t **header, uint64_t data,
683 __rte_unused struct additional_para para)
685 struct rte_gre_hdr gre_hdr;
687 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
690 memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
692 gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
694 memcpy(*header, &gre_hdr, sizeof(gre_hdr));
695 *header += sizeof(gre_hdr);
699 add_geneve_header(uint8_t **header, uint64_t data,
700 struct additional_para para)
702 struct rte_geneve_hdr geneve_hdr;
703 uint32_t vni_value = para.counter;
706 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
713 memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
715 for (i = 0; i < 3; i++)
716 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
718 memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
719 *header += sizeof(geneve_hdr);
723 add_gtp_header(uint8_t **header, uint64_t data,
724 struct additional_para para)
726 struct rte_gtp_hdr gtp_hdr;
727 uint32_t teid_value = para.counter;
729 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
736 memset(>p_hdr, 0, sizeof(struct rte_flow_item_gtp));
738 gtp_hdr.teid = RTE_BE32(teid_value);
739 gtp_hdr.msg_type = 255;
741 memcpy(*header, >p_hdr, sizeof(gtp_hdr));
742 *header += sizeof(gtp_hdr);
745 static const struct encap_decap_headers {
749 struct additional_para para
752 {.funct = add_ether_header},
753 {.funct = add_vlan_header},
754 {.funct = add_ipv4_header},
755 {.funct = add_ipv6_header},
756 {.funct = add_udp_header},
757 {.funct = add_vxlan_header},
758 {.funct = add_vxlan_gpe_header},
759 {.funct = add_gre_header},
760 {.funct = add_geneve_header},
761 {.funct = add_gtp_header},
765 add_raw_encap(struct rte_flow_action *actions,
766 uint8_t actions_counter,
767 struct additional_para para)
769 static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
770 uint64_t encap_data = para.encap_data;
774 /* Avoid double allocation. */
775 if (action_encap_data[para.core_idx] == NULL)
776 action_encap_data[para.core_idx] = rte_malloc("encap_data",
777 sizeof(struct action_raw_encap_data), 0);
779 /* Check if allocation failed. */
780 if (action_encap_data[para.core_idx] == NULL)
781 rte_exit(EXIT_FAILURE, "No Memory available!");
783 *action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
784 .conf = (struct rte_flow_action_raw_encap) {
785 .data = action_encap_data[para.core_idx]->data,
789 header = action_encap_data[para.core_idx]->data;
791 for (i = 0; i < RTE_DIM(headers); i++)
792 headers[i].funct(&header, encap_data, para);
794 action_encap_data[para.core_idx]->conf.size = header -
795 action_encap_data[para.core_idx]->data;
797 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
798 actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
802 add_raw_decap(struct rte_flow_action *actions,
803 uint8_t actions_counter,
804 struct additional_para para)
806 static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
807 uint64_t decap_data = para.decap_data;
811 /* Avoid double allocation. */
812 if (action_decap_data[para.core_idx] == NULL)
813 action_decap_data[para.core_idx] = rte_malloc("decap_data",
814 sizeof(struct action_raw_decap_data), 0);
816 /* Check if allocation failed. */
817 if (action_decap_data[para.core_idx] == NULL)
818 rte_exit(EXIT_FAILURE, "No Memory available!");
820 *action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
821 .conf = (struct rte_flow_action_raw_decap) {
822 .data = action_decap_data[para.core_idx]->data,
826 header = action_decap_data[para.core_idx]->data;
828 for (i = 0; i < RTE_DIM(headers); i++)
829 headers[i].funct(&header, decap_data, para);
831 action_decap_data[para.core_idx]->conf.size = header -
832 action_decap_data[para.core_idx]->data;
834 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
835 actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
839 add_vxlan_encap(struct rte_flow_action *actions,
840 uint8_t actions_counter,
841 __rte_unused struct additional_para para)
843 static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
844 static struct rte_flow_item items[5];
845 static struct rte_flow_item_eth item_eth;
846 static struct rte_flow_item_ipv4 item_ipv4;
847 static struct rte_flow_item_udp item_udp;
848 static struct rte_flow_item_vxlan item_vxlan;
849 uint32_t ip_dst = para.counter;
855 items[0].spec = &item_eth;
856 items[0].mask = &item_eth;
857 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
859 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
860 item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
861 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
862 items[1].spec = &item_ipv4;
863 items[1].mask = &item_ipv4;
864 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
867 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
868 items[2].spec = &item_udp;
869 items[2].mask = &item_udp;
870 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
873 item_vxlan.vni[2] = 1;
874 items[3].spec = &item_vxlan;
875 items[3].mask = &item_vxlan;
876 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
878 items[4].type = RTE_FLOW_ITEM_TYPE_END;
880 vxlan_encap[para.core_idx].definition = items;
882 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
883 actions[actions_counter].conf = &vxlan_encap[para.core_idx];
887 add_vxlan_decap(struct rte_flow_action *actions,
888 uint8_t actions_counter,
889 __rte_unused struct additional_para para)
891 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
895 add_meter(struct rte_flow_action *actions,
896 uint8_t actions_counter,
897 __rte_unused struct additional_para para)
899 static struct rte_flow_action_meter
900 meters[RTE_MAX_LCORE] __rte_cache_aligned;
902 meters[para.core_idx].mtr_id = para.counter;
903 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_METER;
904 actions[actions_counter].conf = &meters[para.core_idx];
908 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
909 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
910 uint64_t encap_data, uint64_t decap_data, uint8_t core_idx)
912 struct additional_para additional_para_data;
913 uint8_t actions_counter = 0;
914 uint16_t hairpin_queues[hairpinq];
915 uint16_t queues[RXQ_NUM];
918 for (i = 0; i < RXQ_NUM; i++)
921 for (i = 0; i < hairpinq; i++)
922 hairpin_queues[i] = i + RXQ_NUM;
924 additional_para_data = (struct additional_para){
925 .queue = counter % RXQ_NUM,
926 .next_table = next_table,
928 .queues_number = RXQ_NUM,
930 .encap_data = encap_data,
931 .decap_data = decap_data,
932 .core_idx = core_idx,
936 additional_para_data.queues = hairpin_queues;
937 additional_para_data.queues_number = hairpinq;
938 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
941 static const struct actions_dict {
944 struct rte_flow_action *actions,
945 uint8_t actions_counter,
946 struct additional_para para
950 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
954 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
958 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
959 .funct = add_set_meta,
962 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
963 .funct = add_set_tag,
966 .mask = FLOW_ACTION_MASK(
967 RTE_FLOW_ACTION_TYPE_FLAG
972 .mask = FLOW_ACTION_MASK(
973 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
975 .funct = add_set_src_mac,
978 .mask = FLOW_ACTION_MASK(
979 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
981 .funct = add_set_dst_mac,
984 .mask = FLOW_ACTION_MASK(
985 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
987 .funct = add_set_src_ipv4,
990 .mask = FLOW_ACTION_MASK(
991 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
993 .funct = add_set_dst_ipv4,
996 .mask = FLOW_ACTION_MASK(
997 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
999 .funct = add_set_src_ipv6,
1002 .mask = FLOW_ACTION_MASK(
1003 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
1005 .funct = add_set_dst_ipv6,
1008 .mask = FLOW_ACTION_MASK(
1009 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1011 .funct = add_set_src_tp,
1014 .mask = FLOW_ACTION_MASK(
1015 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1017 .funct = add_set_dst_tp,
1020 .mask = FLOW_ACTION_MASK(
1021 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1023 .funct = add_inc_tcp_ack,
1026 .mask = FLOW_ACTION_MASK(
1027 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1029 .funct = add_dec_tcp_ack,
1032 .mask = FLOW_ACTION_MASK(
1033 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1035 .funct = add_inc_tcp_seq,
1038 .mask = FLOW_ACTION_MASK(
1039 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1041 .funct = add_dec_tcp_seq,
1044 .mask = FLOW_ACTION_MASK(
1045 RTE_FLOW_ACTION_TYPE_SET_TTL
1047 .funct = add_set_ttl,
1050 .mask = FLOW_ACTION_MASK(
1051 RTE_FLOW_ACTION_TYPE_DEC_TTL
1053 .funct = add_dec_ttl,
1056 .mask = FLOW_ACTION_MASK(
1057 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1059 .funct = add_set_ipv4_dscp,
1062 .mask = FLOW_ACTION_MASK(
1063 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1065 .funct = add_set_ipv6_dscp,
1068 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1072 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1076 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1080 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1081 .funct = add_port_id
1084 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1088 .mask = HAIRPIN_QUEUE_ACTION,
1092 .mask = HAIRPIN_RSS_ACTION,
1096 .mask = FLOW_ACTION_MASK(
1097 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1099 .funct = add_raw_encap,
1102 .mask = FLOW_ACTION_MASK(
1103 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1105 .funct = add_raw_decap,
1108 .mask = FLOW_ACTION_MASK(
1109 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1111 .funct = add_vxlan_encap,
1114 .mask = FLOW_ACTION_MASK(
1115 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1117 .funct = add_vxlan_decap,
1120 .mask = FLOW_ACTION_MASK(
1121 RTE_FLOW_ACTION_TYPE_METER
1127 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1128 if (flow_actions[j] == 0)
1130 for (i = 0; i < RTE_DIM(actions_list); i++) {
1131 if ((flow_actions[j] &
1132 actions_list[i].mask) == 0)
1134 actions_list[i].funct(
1135 actions, actions_counter++,
1136 additional_para_data
1141 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;