1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include <rte_geneve.h>
18 #include "actions_gen.h"
23 /* Storage for additional parameters for actions */
24 struct additional_para {
28 uint16_t queues_number;
37 /* Storage for struct rte_flow_action_raw_encap including external data. */
38 struct action_raw_encap_data {
39 struct rte_flow_action_raw_encap conf;
41 uint8_t preserve[128];
45 /* Storage for struct rte_flow_action_raw_decap including external data. */
46 struct action_raw_decap_data {
47 struct rte_flow_action_raw_decap conf;
52 /* Storage for struct rte_flow_action_rss including external data. */
53 struct action_rss_data {
54 struct rte_flow_action_rss conf;
60 add_mark(struct rte_flow_action *actions,
61 uint8_t actions_counter,
62 struct additional_para para)
64 static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
65 uint32_t counter = para.counter;
68 /* Random values from 1 to 256 */
69 mark_actions[para.core_idx].id = (counter % 255) + 1;
72 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
73 actions[actions_counter].conf = &mark_actions[para.core_idx];
77 add_queue(struct rte_flow_action *actions,
78 uint8_t actions_counter,
79 struct additional_para para)
81 static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
84 queue_actions[para.core_idx].index = para.queue;
87 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
88 actions[actions_counter].conf = &queue_actions[para.core_idx];
92 add_jump(struct rte_flow_action *actions,
93 uint8_t actions_counter,
94 struct additional_para para)
96 static struct rte_flow_action_jump jump_action;
99 jump_action.group = para.next_table;
102 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
103 actions[actions_counter].conf = &jump_action;
107 add_rss(struct rte_flow_action *actions,
108 uint8_t actions_counter,
109 struct additional_para para)
111 static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
115 if (rss_data[para.core_idx] == NULL)
116 rss_data[para.core_idx] = rte_malloc("rss_data",
117 sizeof(struct action_rss_data), 0);
119 if (rss_data[para.core_idx] == NULL)
120 rte_exit(EXIT_FAILURE, "No Memory available!");
122 *rss_data[para.core_idx] = (struct action_rss_data){
123 .conf = (struct rte_flow_action_rss){
124 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
126 .types = GET_RSS_HF(),
127 .key_len = sizeof(rss_data[para.core_idx]->key),
128 .queue_num = para.queues_number,
129 .key = rss_data[para.core_idx]->key,
130 .queue = rss_data[para.core_idx]->queue,
136 for (queue = 0; queue < para.queues_number; queue++)
137 rss_data[para.core_idx]->queue[queue] = para.queues[queue];
139 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
140 actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
144 add_set_meta(struct rte_flow_action *actions,
145 uint8_t actions_counter,
146 __rte_unused struct additional_para para)
148 static struct rte_flow_action_set_meta meta_action = {
149 .data = RTE_BE32(META_DATA),
150 .mask = RTE_BE32(0xffffffff),
153 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
154 actions[actions_counter].conf = &meta_action;
158 add_set_tag(struct rte_flow_action *actions,
159 uint8_t actions_counter,
160 __rte_unused struct additional_para para)
162 static struct rte_flow_action_set_tag tag_action = {
163 .data = RTE_BE32(META_DATA),
164 .mask = RTE_BE32(0xffffffff),
168 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
169 actions[actions_counter].conf = &tag_action;
173 add_port_id(struct rte_flow_action *actions,
174 uint8_t actions_counter,
175 struct additional_para para)
177 static struct rte_flow_action_port_id port_id = {
181 port_id.id = para.dst_port;
182 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
183 actions[actions_counter].conf = &port_id;
187 add_drop(struct rte_flow_action *actions,
188 uint8_t actions_counter,
189 __rte_unused struct additional_para para)
191 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
195 add_count(struct rte_flow_action *actions,
196 uint8_t actions_counter,
197 __rte_unused struct additional_para para)
199 static struct rte_flow_action_count count_action;
201 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
202 actions[actions_counter].conf = &count_action;
206 add_set_src_mac(struct rte_flow_action *actions,
207 uint8_t actions_counter,
208 struct additional_para para)
210 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
211 uint32_t mac = para.counter;
215 if (!para.unique_data)
218 /* Mac address to be set is random each time */
219 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
220 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
224 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
225 actions[actions_counter].conf = &set_macs[para.core_idx];
229 add_set_dst_mac(struct rte_flow_action *actions,
230 uint8_t actions_counter,
231 struct additional_para para)
233 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
234 uint32_t mac = para.counter;
238 if (!para.unique_data)
241 /* Mac address to be set is random each time */
242 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
243 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
247 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
248 actions[actions_counter].conf = &set_macs[para.core_idx];
252 add_set_src_ipv4(struct rte_flow_action *actions,
253 uint8_t actions_counter,
254 struct additional_para para)
256 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
257 uint32_t ip = para.counter;
260 if (!para.unique_data)
263 /* IPv4 value to be set is random each time */
264 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
266 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
267 actions[actions_counter].conf = &set_ipv4[para.core_idx];
271 add_set_dst_ipv4(struct rte_flow_action *actions,
272 uint8_t actions_counter,
273 struct additional_para para)
275 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
276 uint32_t ip = para.counter;
279 if (!para.unique_data)
282 /* IPv4 value to be set is random each time */
283 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
285 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
286 actions[actions_counter].conf = &set_ipv4[para.core_idx];
290 add_set_src_ipv6(struct rte_flow_action *actions,
291 uint8_t actions_counter,
292 struct additional_para para)
294 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
295 uint32_t ipv6 = para.counter;
299 if (!para.unique_data)
302 /* IPv6 value to set is random each time */
303 for (i = 0; i < 16; i++) {
304 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
308 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
309 actions[actions_counter].conf = &set_ipv6[para.core_idx];
313 add_set_dst_ipv6(struct rte_flow_action *actions,
314 uint8_t actions_counter,
315 struct additional_para para)
317 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
318 uint32_t ipv6 = para.counter;
322 if (!para.unique_data)
325 /* IPv6 value to set is random each time */
326 for (i = 0; i < 16; i++) {
327 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
331 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
332 actions[actions_counter].conf = &set_ipv6[para.core_idx];
336 add_set_src_tp(struct rte_flow_action *actions,
337 uint8_t actions_counter,
338 struct additional_para para)
340 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
341 uint32_t tp = para.counter;
344 if (!para.unique_data)
347 /* TP src port is random each time */
350 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
352 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
353 actions[actions_counter].conf = &set_tp[para.core_idx];
357 add_set_dst_tp(struct rte_flow_action *actions,
358 uint8_t actions_counter,
359 struct additional_para para)
361 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
362 uint32_t tp = para.counter;
365 if (!para.unique_data)
368 /* TP src port is random each time */
372 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
374 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
375 actions[actions_counter].conf = &set_tp[para.core_idx];
379 add_inc_tcp_ack(struct rte_flow_action *actions,
380 uint8_t actions_counter,
381 struct additional_para para)
383 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
384 uint32_t ack_value = para.counter;
387 if (!para.unique_data)
390 value[para.core_idx] = RTE_BE32(ack_value);
392 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
393 actions[actions_counter].conf = &value[para.core_idx];
397 add_dec_tcp_ack(struct rte_flow_action *actions,
398 uint8_t actions_counter,
399 struct additional_para para)
401 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
402 uint32_t ack_value = para.counter;
405 if (!para.unique_data)
408 value[para.core_idx] = RTE_BE32(ack_value);
410 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
411 actions[actions_counter].conf = &value[para.core_idx];
415 add_inc_tcp_seq(struct rte_flow_action *actions,
416 uint8_t actions_counter,
417 struct additional_para para)
419 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
420 uint32_t seq_value = para.counter;
423 if (!para.unique_data)
426 value[para.core_idx] = RTE_BE32(seq_value);
428 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
429 actions[actions_counter].conf = &value[para.core_idx];
433 add_dec_tcp_seq(struct rte_flow_action *actions,
434 uint8_t actions_counter,
435 struct additional_para para)
437 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
438 uint32_t seq_value = para.counter;
441 if (!para.unique_data)
444 value[para.core_idx] = RTE_BE32(seq_value);
446 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
447 actions[actions_counter].conf = &value[para.core_idx];
451 add_set_ttl(struct rte_flow_action *actions,
452 uint8_t actions_counter,
453 struct additional_para para)
455 static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
456 uint32_t ttl_value = para.counter;
459 if (!para.unique_data)
462 /* Set ttl to random value each time */
463 ttl_value = ttl_value % 0xff;
465 set_ttl[para.core_idx].ttl_value = ttl_value;
467 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
468 actions[actions_counter].conf = &set_ttl[para.core_idx];
472 add_dec_ttl(struct rte_flow_action *actions,
473 uint8_t actions_counter,
474 __rte_unused struct additional_para para)
476 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
480 add_set_ipv4_dscp(struct rte_flow_action *actions,
481 uint8_t actions_counter,
482 struct additional_para para)
484 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
485 uint32_t dscp_value = para.counter;
488 if (!para.unique_data)
491 /* Set dscp to random value each time */
492 dscp_value = dscp_value % 0xff;
494 set_dscp[para.core_idx].dscp = dscp_value;
496 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
497 actions[actions_counter].conf = &set_dscp[para.core_idx];
501 add_set_ipv6_dscp(struct rte_flow_action *actions,
502 uint8_t actions_counter,
503 struct additional_para para)
505 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
506 uint32_t dscp_value = para.counter;
509 if (!para.unique_data)
512 /* Set dscp to random value each time */
513 dscp_value = dscp_value % 0xff;
515 set_dscp[para.core_idx].dscp = dscp_value;
517 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
518 actions[actions_counter].conf = &set_dscp[para.core_idx];
522 add_flag(struct rte_flow_action *actions,
523 uint8_t actions_counter,
524 __rte_unused struct additional_para para)
526 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
530 add_ether_header(uint8_t **header, uint64_t data,
531 __rte_unused struct additional_para para)
533 struct rte_ether_hdr eth_hdr;
535 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
538 memset(ð_hdr, 0, sizeof(struct rte_ether_hdr));
539 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
540 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
541 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
542 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
543 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
544 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
545 memcpy(*header, ð_hdr, sizeof(eth_hdr));
546 *header += sizeof(eth_hdr);
550 add_vlan_header(uint8_t **header, uint64_t data,
551 __rte_unused struct additional_para para)
553 struct rte_vlan_hdr vlan_hdr;
556 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
559 vlan_value = VLAN_VALUE;
561 memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
562 vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
564 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
565 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
566 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
567 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
568 memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
569 *header += sizeof(vlan_hdr);
573 add_ipv4_header(uint8_t **header, uint64_t data,
574 struct additional_para para)
576 struct rte_ipv4_hdr ipv4_hdr;
577 uint32_t ip_dst = para.counter;
579 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
583 if (!para.unique_data)
586 memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
587 ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
588 ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
589 ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
590 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
591 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
592 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
593 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
594 memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
595 *header += sizeof(ipv4_hdr);
599 add_ipv6_header(uint8_t **header, uint64_t data,
600 __rte_unused struct additional_para para)
602 struct rte_ipv6_hdr ipv6_hdr;
604 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
607 memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
608 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
609 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
610 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
611 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
612 memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
613 *header += sizeof(ipv6_hdr);
617 add_udp_header(uint8_t **header, uint64_t data,
618 __rte_unused struct additional_para para)
620 struct rte_udp_hdr udp_hdr;
622 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
625 memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
626 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
627 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
628 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
629 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
630 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
631 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
632 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
633 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
634 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
635 *header += sizeof(udp_hdr);
639 add_vxlan_header(uint8_t **header, uint64_t data,
640 struct additional_para para)
642 struct rte_vxlan_hdr vxlan_hdr;
643 uint32_t vni_value = para.counter;
645 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
649 if (!para.unique_data)
652 memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
654 vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
655 vxlan_hdr.vx_flags = 0x8;
657 memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
658 *header += sizeof(vxlan_hdr);
662 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
663 struct additional_para para)
665 struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
666 uint32_t vni_value = para.counter;
668 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
672 if (!para.unique_data)
675 memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
677 vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
678 vxlan_gpe_hdr.vx_flags = 0x0c;
680 memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
681 *header += sizeof(vxlan_gpe_hdr);
685 add_gre_header(uint8_t **header, uint64_t data,
686 __rte_unused struct additional_para para)
688 struct rte_gre_hdr gre_hdr;
690 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
693 memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
695 gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
697 memcpy(*header, &gre_hdr, sizeof(gre_hdr));
698 *header += sizeof(gre_hdr);
702 add_geneve_header(uint8_t **header, uint64_t data,
703 struct additional_para para)
705 struct rte_geneve_hdr geneve_hdr;
706 uint32_t vni_value = para.counter;
709 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
713 if (!para.unique_data)
716 memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
718 for (i = 0; i < 3; i++)
719 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
721 memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
722 *header += sizeof(geneve_hdr);
726 add_gtp_header(uint8_t **header, uint64_t data,
727 struct additional_para para)
729 struct rte_gtp_hdr gtp_hdr;
730 uint32_t teid_value = para.counter;
732 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
736 if (!para.unique_data)
739 memset(>p_hdr, 0, sizeof(struct rte_flow_item_gtp));
741 gtp_hdr.teid = RTE_BE32(teid_value);
742 gtp_hdr.msg_type = 255;
744 memcpy(*header, >p_hdr, sizeof(gtp_hdr));
745 *header += sizeof(gtp_hdr);
748 static const struct encap_decap_headers {
752 struct additional_para para
755 {.funct = add_ether_header},
756 {.funct = add_vlan_header},
757 {.funct = add_ipv4_header},
758 {.funct = add_ipv6_header},
759 {.funct = add_udp_header},
760 {.funct = add_vxlan_header},
761 {.funct = add_vxlan_gpe_header},
762 {.funct = add_gre_header},
763 {.funct = add_geneve_header},
764 {.funct = add_gtp_header},
768 add_raw_encap(struct rte_flow_action *actions,
769 uint8_t actions_counter,
770 struct additional_para para)
772 static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
773 uint64_t encap_data = para.encap_data;
777 /* Avoid double allocation. */
778 if (action_encap_data[para.core_idx] == NULL)
779 action_encap_data[para.core_idx] = rte_malloc("encap_data",
780 sizeof(struct action_raw_encap_data), 0);
782 /* Check if allocation failed. */
783 if (action_encap_data[para.core_idx] == NULL)
784 rte_exit(EXIT_FAILURE, "No Memory available!");
786 *action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
787 .conf = (struct rte_flow_action_raw_encap) {
788 .data = action_encap_data[para.core_idx]->data,
792 header = action_encap_data[para.core_idx]->data;
794 for (i = 0; i < RTE_DIM(headers); i++)
795 headers[i].funct(&header, encap_data, para);
797 action_encap_data[para.core_idx]->conf.size = header -
798 action_encap_data[para.core_idx]->data;
800 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
801 actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
805 add_raw_decap(struct rte_flow_action *actions,
806 uint8_t actions_counter,
807 struct additional_para para)
809 static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
810 uint64_t decap_data = para.decap_data;
814 /* Avoid double allocation. */
815 if (action_decap_data[para.core_idx] == NULL)
816 action_decap_data[para.core_idx] = rte_malloc("decap_data",
817 sizeof(struct action_raw_decap_data), 0);
819 /* Check if allocation failed. */
820 if (action_decap_data[para.core_idx] == NULL)
821 rte_exit(EXIT_FAILURE, "No Memory available!");
823 *action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
824 .conf = (struct rte_flow_action_raw_decap) {
825 .data = action_decap_data[para.core_idx]->data,
829 header = action_decap_data[para.core_idx]->data;
831 for (i = 0; i < RTE_DIM(headers); i++)
832 headers[i].funct(&header, decap_data, para);
834 action_decap_data[para.core_idx]->conf.size = header -
835 action_decap_data[para.core_idx]->data;
837 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
838 actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
842 add_vxlan_encap(struct rte_flow_action *actions,
843 uint8_t actions_counter,
844 __rte_unused struct additional_para para)
846 static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
847 static struct rte_flow_item items[5];
848 static struct rte_flow_item_eth item_eth;
849 static struct rte_flow_item_ipv4 item_ipv4;
850 static struct rte_flow_item_udp item_udp;
851 static struct rte_flow_item_vxlan item_vxlan;
852 uint32_t ip_dst = para.counter;
855 if (!para.unique_data)
858 items[0].spec = &item_eth;
859 items[0].mask = &item_eth;
860 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
862 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
863 item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
864 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
865 items[1].spec = &item_ipv4;
866 items[1].mask = &item_ipv4;
867 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
870 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
871 items[2].spec = &item_udp;
872 items[2].mask = &item_udp;
873 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
876 item_vxlan.vni[2] = 1;
877 items[3].spec = &item_vxlan;
878 items[3].mask = &item_vxlan;
879 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
881 items[4].type = RTE_FLOW_ITEM_TYPE_END;
883 vxlan_encap[para.core_idx].definition = items;
885 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
886 actions[actions_counter].conf = &vxlan_encap[para.core_idx];
890 add_vxlan_decap(struct rte_flow_action *actions,
891 uint8_t actions_counter,
892 __rte_unused struct additional_para para)
894 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
898 add_meter(struct rte_flow_action *actions,
899 uint8_t actions_counter,
900 __rte_unused struct additional_para para)
902 static struct rte_flow_action_meter
903 meters[RTE_MAX_LCORE] __rte_cache_aligned;
905 meters[para.core_idx].mtr_id = para.counter;
906 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_METER;
907 actions[actions_counter].conf = &meters[para.core_idx];
911 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
912 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
913 uint64_t encap_data, uint64_t decap_data, uint8_t core_idx,
914 bool unique_data, uint8_t rx_queues_count, uint16_t dst_port)
916 struct additional_para additional_para_data;
917 uint8_t actions_counter = 0;
918 uint16_t hairpin_queues[hairpinq];
919 uint16_t queues[rx_queues_count];
922 for (i = 0; i < rx_queues_count; i++)
925 for (i = 0; i < hairpinq; i++)
926 hairpin_queues[i] = i + rx_queues_count;
928 additional_para_data = (struct additional_para){
929 .queue = counter % rx_queues_count,
930 .next_table = next_table,
932 .queues_number = rx_queues_count,
934 .encap_data = encap_data,
935 .decap_data = decap_data,
936 .core_idx = core_idx,
937 .unique_data = unique_data,
938 .dst_port = dst_port,
942 additional_para_data.queues = hairpin_queues;
943 additional_para_data.queues_number = hairpinq;
944 additional_para_data.queue = (counter % hairpinq) + rx_queues_count;
947 static const struct actions_dict {
950 struct rte_flow_action *actions,
951 uint8_t actions_counter,
952 struct additional_para para
956 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
960 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
964 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
965 .funct = add_set_meta,
968 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
969 .funct = add_set_tag,
972 .mask = FLOW_ACTION_MASK(
973 RTE_FLOW_ACTION_TYPE_FLAG
978 .mask = FLOW_ACTION_MASK(
979 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
981 .funct = add_set_src_mac,
984 .mask = FLOW_ACTION_MASK(
985 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
987 .funct = add_set_dst_mac,
990 .mask = FLOW_ACTION_MASK(
991 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
993 .funct = add_set_src_ipv4,
996 .mask = FLOW_ACTION_MASK(
997 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
999 .funct = add_set_dst_ipv4,
1002 .mask = FLOW_ACTION_MASK(
1003 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
1005 .funct = add_set_src_ipv6,
1008 .mask = FLOW_ACTION_MASK(
1009 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
1011 .funct = add_set_dst_ipv6,
1014 .mask = FLOW_ACTION_MASK(
1015 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1017 .funct = add_set_src_tp,
1020 .mask = FLOW_ACTION_MASK(
1021 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1023 .funct = add_set_dst_tp,
1026 .mask = FLOW_ACTION_MASK(
1027 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1029 .funct = add_inc_tcp_ack,
1032 .mask = FLOW_ACTION_MASK(
1033 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1035 .funct = add_dec_tcp_ack,
1038 .mask = FLOW_ACTION_MASK(
1039 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1041 .funct = add_inc_tcp_seq,
1044 .mask = FLOW_ACTION_MASK(
1045 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1047 .funct = add_dec_tcp_seq,
1050 .mask = FLOW_ACTION_MASK(
1051 RTE_FLOW_ACTION_TYPE_SET_TTL
1053 .funct = add_set_ttl,
1056 .mask = FLOW_ACTION_MASK(
1057 RTE_FLOW_ACTION_TYPE_DEC_TTL
1059 .funct = add_dec_ttl,
1062 .mask = FLOW_ACTION_MASK(
1063 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1065 .funct = add_set_ipv4_dscp,
1068 .mask = FLOW_ACTION_MASK(
1069 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1071 .funct = add_set_ipv6_dscp,
1074 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1078 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1082 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1086 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1087 .funct = add_port_id
1090 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1094 .mask = HAIRPIN_QUEUE_ACTION,
1098 .mask = HAIRPIN_RSS_ACTION,
1102 .mask = FLOW_ACTION_MASK(
1103 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1105 .funct = add_raw_encap,
1108 .mask = FLOW_ACTION_MASK(
1109 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1111 .funct = add_raw_decap,
1114 .mask = FLOW_ACTION_MASK(
1115 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1117 .funct = add_vxlan_encap,
1120 .mask = FLOW_ACTION_MASK(
1121 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1123 .funct = add_vxlan_decap,
1126 .mask = FLOW_ACTION_MASK(
1127 RTE_FLOW_ACTION_TYPE_METER
1133 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1134 if (flow_actions[j] == 0)
1136 for (i = 0; i < RTE_DIM(actions_list); i++) {
1137 if ((flow_actions[j] &
1138 actions_list[i].mask) == 0)
1140 actions_list[i].funct(
1141 actions, actions_counter++,
1142 additional_para_data
1147 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;