1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include <rte_geneve.h>
18 #include "actions_gen.h"
23 /* Storage for additional parameters for actions */
24 struct additional_para {
28 uint16_t queues_number;
36 /* Storage for struct rte_flow_action_raw_encap including external data. */
37 struct action_raw_encap_data {
38 struct rte_flow_action_raw_encap conf;
40 uint8_t preserve[128];
44 /* Storage for struct rte_flow_action_raw_decap including external data. */
45 struct action_raw_decap_data {
46 struct rte_flow_action_raw_decap conf;
51 /* Storage for struct rte_flow_action_rss including external data. */
52 struct action_rss_data {
53 struct rte_flow_action_rss conf;
59 add_mark(struct rte_flow_action *actions,
60 uint8_t actions_counter,
61 struct additional_para para)
63 static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
64 uint32_t counter = para.counter;
67 /* Random values from 1 to 256 */
68 mark_actions[para.core_idx].id = (counter % 255) + 1;
71 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
72 actions[actions_counter].conf = &mark_actions[para.core_idx];
76 add_queue(struct rte_flow_action *actions,
77 uint8_t actions_counter,
78 struct additional_para para)
80 static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
83 queue_actions[para.core_idx].index = para.queue;
86 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
87 actions[actions_counter].conf = &queue_actions[para.core_idx];
91 add_jump(struct rte_flow_action *actions,
92 uint8_t actions_counter,
93 struct additional_para para)
95 static struct rte_flow_action_jump jump_action;
98 jump_action.group = para.next_table;
101 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
102 actions[actions_counter].conf = &jump_action;
106 add_rss(struct rte_flow_action *actions,
107 uint8_t actions_counter,
108 struct additional_para para)
110 static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
114 if (rss_data[para.core_idx] == NULL)
115 rss_data[para.core_idx] = rte_malloc("rss_data",
116 sizeof(struct action_rss_data), 0);
118 if (rss_data[para.core_idx] == NULL)
119 rte_exit(EXIT_FAILURE, "No Memory available!");
121 *rss_data[para.core_idx] = (struct action_rss_data){
122 .conf = (struct rte_flow_action_rss){
123 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
125 .types = GET_RSS_HF(),
126 .key_len = sizeof(rss_data[para.core_idx]->key),
127 .queue_num = para.queues_number,
128 .key = rss_data[para.core_idx]->key,
129 .queue = rss_data[para.core_idx]->queue,
135 for (queue = 0; queue < para.queues_number; queue++)
136 rss_data[para.core_idx]->queue[queue] = para.queues[queue];
138 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
139 actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
143 add_set_meta(struct rte_flow_action *actions,
144 uint8_t actions_counter,
145 __rte_unused struct additional_para para)
147 static struct rte_flow_action_set_meta meta_action = {
148 .data = RTE_BE32(META_DATA),
149 .mask = RTE_BE32(0xffffffff),
152 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
153 actions[actions_counter].conf = &meta_action;
157 add_set_tag(struct rte_flow_action *actions,
158 uint8_t actions_counter,
159 __rte_unused struct additional_para para)
161 static struct rte_flow_action_set_tag tag_action = {
162 .data = RTE_BE32(META_DATA),
163 .mask = RTE_BE32(0xffffffff),
167 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
168 actions[actions_counter].conf = &tag_action;
172 add_port_id(struct rte_flow_action *actions,
173 uint8_t actions_counter,
174 __rte_unused struct additional_para para)
176 static struct rte_flow_action_port_id port_id = {
180 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
181 actions[actions_counter].conf = &port_id;
185 add_drop(struct rte_flow_action *actions,
186 uint8_t actions_counter,
187 __rte_unused struct additional_para para)
189 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
193 add_count(struct rte_flow_action *actions,
194 uint8_t actions_counter,
195 __rte_unused struct additional_para para)
197 static struct rte_flow_action_count count_action;
199 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
200 actions[actions_counter].conf = &count_action;
204 add_set_src_mac(struct rte_flow_action *actions,
205 uint8_t actions_counter,
206 struct additional_para para)
208 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
209 uint32_t mac = para.counter;
213 if (!para.unique_data)
216 /* Mac address to be set is random each time */
217 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
218 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
222 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
223 actions[actions_counter].conf = &set_macs[para.core_idx];
227 add_set_dst_mac(struct rte_flow_action *actions,
228 uint8_t actions_counter,
229 struct additional_para para)
231 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
232 uint32_t mac = para.counter;
236 if (!para.unique_data)
239 /* Mac address to be set is random each time */
240 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
241 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
245 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
246 actions[actions_counter].conf = &set_macs[para.core_idx];
250 add_set_src_ipv4(struct rte_flow_action *actions,
251 uint8_t actions_counter,
252 struct additional_para para)
254 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
255 uint32_t ip = para.counter;
258 if (!para.unique_data)
261 /* IPv4 value to be set is random each time */
262 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
264 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
265 actions[actions_counter].conf = &set_ipv4[para.core_idx];
269 add_set_dst_ipv4(struct rte_flow_action *actions,
270 uint8_t actions_counter,
271 struct additional_para para)
273 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
274 uint32_t ip = para.counter;
277 if (!para.unique_data)
280 /* IPv4 value to be set is random each time */
281 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
283 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
284 actions[actions_counter].conf = &set_ipv4[para.core_idx];
288 add_set_src_ipv6(struct rte_flow_action *actions,
289 uint8_t actions_counter,
290 struct additional_para para)
292 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
293 uint32_t ipv6 = para.counter;
297 if (!para.unique_data)
300 /* IPv6 value to set is random each time */
301 for (i = 0; i < 16; i++) {
302 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
306 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
307 actions[actions_counter].conf = &set_ipv6[para.core_idx];
311 add_set_dst_ipv6(struct rte_flow_action *actions,
312 uint8_t actions_counter,
313 struct additional_para para)
315 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
316 uint32_t ipv6 = para.counter;
320 if (!para.unique_data)
323 /* IPv6 value to set is random each time */
324 for (i = 0; i < 16; i++) {
325 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
329 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
330 actions[actions_counter].conf = &set_ipv6[para.core_idx];
334 add_set_src_tp(struct rte_flow_action *actions,
335 uint8_t actions_counter,
336 struct additional_para para)
338 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
339 uint32_t tp = para.counter;
342 if (!para.unique_data)
345 /* TP src port is random each time */
348 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
350 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
351 actions[actions_counter].conf = &set_tp[para.core_idx];
355 add_set_dst_tp(struct rte_flow_action *actions,
356 uint8_t actions_counter,
357 struct additional_para para)
359 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
360 uint32_t tp = para.counter;
363 if (!para.unique_data)
366 /* TP src port is random each time */
370 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
372 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
373 actions[actions_counter].conf = &set_tp[para.core_idx];
377 add_inc_tcp_ack(struct rte_flow_action *actions,
378 uint8_t actions_counter,
379 struct additional_para para)
381 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
382 uint32_t ack_value = para.counter;
385 if (!para.unique_data)
388 value[para.core_idx] = RTE_BE32(ack_value);
390 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
391 actions[actions_counter].conf = &value[para.core_idx];
395 add_dec_tcp_ack(struct rte_flow_action *actions,
396 uint8_t actions_counter,
397 struct additional_para para)
399 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
400 uint32_t ack_value = para.counter;
403 if (!para.unique_data)
406 value[para.core_idx] = RTE_BE32(ack_value);
408 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
409 actions[actions_counter].conf = &value[para.core_idx];
413 add_inc_tcp_seq(struct rte_flow_action *actions,
414 uint8_t actions_counter,
415 struct additional_para para)
417 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
418 uint32_t seq_value = para.counter;
421 if (!para.unique_data)
424 value[para.core_idx] = RTE_BE32(seq_value);
426 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
427 actions[actions_counter].conf = &value[para.core_idx];
431 add_dec_tcp_seq(struct rte_flow_action *actions,
432 uint8_t actions_counter,
433 struct additional_para para)
435 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
436 uint32_t seq_value = para.counter;
439 if (!para.unique_data)
442 value[para.core_idx] = RTE_BE32(seq_value);
444 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
445 actions[actions_counter].conf = &value[para.core_idx];
449 add_set_ttl(struct rte_flow_action *actions,
450 uint8_t actions_counter,
451 struct additional_para para)
453 static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
454 uint32_t ttl_value = para.counter;
457 if (!para.unique_data)
460 /* Set ttl to random value each time */
461 ttl_value = ttl_value % 0xff;
463 set_ttl[para.core_idx].ttl_value = ttl_value;
465 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
466 actions[actions_counter].conf = &set_ttl[para.core_idx];
470 add_dec_ttl(struct rte_flow_action *actions,
471 uint8_t actions_counter,
472 __rte_unused struct additional_para para)
474 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
478 add_set_ipv4_dscp(struct rte_flow_action *actions,
479 uint8_t actions_counter,
480 struct additional_para para)
482 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
483 uint32_t dscp_value = para.counter;
486 if (!para.unique_data)
489 /* Set dscp to random value each time */
490 dscp_value = dscp_value % 0xff;
492 set_dscp[para.core_idx].dscp = dscp_value;
494 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
495 actions[actions_counter].conf = &set_dscp[para.core_idx];
499 add_set_ipv6_dscp(struct rte_flow_action *actions,
500 uint8_t actions_counter,
501 struct additional_para para)
503 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
504 uint32_t dscp_value = para.counter;
507 if (!para.unique_data)
510 /* Set dscp to random value each time */
511 dscp_value = dscp_value % 0xff;
513 set_dscp[para.core_idx].dscp = dscp_value;
515 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
516 actions[actions_counter].conf = &set_dscp[para.core_idx];
520 add_flag(struct rte_flow_action *actions,
521 uint8_t actions_counter,
522 __rte_unused struct additional_para para)
524 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
528 add_ether_header(uint8_t **header, uint64_t data,
529 __rte_unused struct additional_para para)
531 struct rte_ether_hdr eth_hdr;
533 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
536 memset(ð_hdr, 0, sizeof(struct rte_ether_hdr));
537 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
538 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
539 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
540 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
541 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
542 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
543 memcpy(*header, ð_hdr, sizeof(eth_hdr));
544 *header += sizeof(eth_hdr);
548 add_vlan_header(uint8_t **header, uint64_t data,
549 __rte_unused struct additional_para para)
551 struct rte_vlan_hdr vlan_hdr;
554 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
557 vlan_value = VLAN_VALUE;
559 memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
560 vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
562 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
563 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
564 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
565 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
566 memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
567 *header += sizeof(vlan_hdr);
571 add_ipv4_header(uint8_t **header, uint64_t data,
572 struct additional_para para)
574 struct rte_ipv4_hdr ipv4_hdr;
575 uint32_t ip_dst = para.counter;
577 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
581 if (!para.unique_data)
584 memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
585 ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
586 ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
587 ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
588 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
589 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
590 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
591 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
592 memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
593 *header += sizeof(ipv4_hdr);
597 add_ipv6_header(uint8_t **header, uint64_t data,
598 __rte_unused struct additional_para para)
600 struct rte_ipv6_hdr ipv6_hdr;
602 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
605 memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
606 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
607 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
608 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
609 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
610 memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
611 *header += sizeof(ipv6_hdr);
615 add_udp_header(uint8_t **header, uint64_t data,
616 __rte_unused struct additional_para para)
618 struct rte_udp_hdr udp_hdr;
620 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
623 memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
624 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
625 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
626 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
627 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
628 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
629 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
630 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
631 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
632 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
633 *header += sizeof(udp_hdr);
637 add_vxlan_header(uint8_t **header, uint64_t data,
638 struct additional_para para)
640 struct rte_vxlan_hdr vxlan_hdr;
641 uint32_t vni_value = para.counter;
643 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
647 if (!para.unique_data)
650 memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
652 vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
653 vxlan_hdr.vx_flags = 0x8;
655 memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
656 *header += sizeof(vxlan_hdr);
660 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
661 struct additional_para para)
663 struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
664 uint32_t vni_value = para.counter;
666 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
670 if (!para.unique_data)
673 memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
675 vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
676 vxlan_gpe_hdr.vx_flags = 0x0c;
678 memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
679 *header += sizeof(vxlan_gpe_hdr);
683 add_gre_header(uint8_t **header, uint64_t data,
684 __rte_unused struct additional_para para)
686 struct rte_gre_hdr gre_hdr;
688 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
691 memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
693 gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
695 memcpy(*header, &gre_hdr, sizeof(gre_hdr));
696 *header += sizeof(gre_hdr);
700 add_geneve_header(uint8_t **header, uint64_t data,
701 struct additional_para para)
703 struct rte_geneve_hdr geneve_hdr;
704 uint32_t vni_value = para.counter;
707 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
711 if (!para.unique_data)
714 memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
716 for (i = 0; i < 3; i++)
717 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
719 memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
720 *header += sizeof(geneve_hdr);
724 add_gtp_header(uint8_t **header, uint64_t data,
725 struct additional_para para)
727 struct rte_gtp_hdr gtp_hdr;
728 uint32_t teid_value = para.counter;
730 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
734 if (!para.unique_data)
737 memset(>p_hdr, 0, sizeof(struct rte_flow_item_gtp));
739 gtp_hdr.teid = RTE_BE32(teid_value);
740 gtp_hdr.msg_type = 255;
742 memcpy(*header, >p_hdr, sizeof(gtp_hdr));
743 *header += sizeof(gtp_hdr);
746 static const struct encap_decap_headers {
750 struct additional_para para
753 {.funct = add_ether_header},
754 {.funct = add_vlan_header},
755 {.funct = add_ipv4_header},
756 {.funct = add_ipv6_header},
757 {.funct = add_udp_header},
758 {.funct = add_vxlan_header},
759 {.funct = add_vxlan_gpe_header},
760 {.funct = add_gre_header},
761 {.funct = add_geneve_header},
762 {.funct = add_gtp_header},
766 add_raw_encap(struct rte_flow_action *actions,
767 uint8_t actions_counter,
768 struct additional_para para)
770 static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
771 uint64_t encap_data = para.encap_data;
775 /* Avoid double allocation. */
776 if (action_encap_data[para.core_idx] == NULL)
777 action_encap_data[para.core_idx] = rte_malloc("encap_data",
778 sizeof(struct action_raw_encap_data), 0);
780 /* Check if allocation failed. */
781 if (action_encap_data[para.core_idx] == NULL)
782 rte_exit(EXIT_FAILURE, "No Memory available!");
784 *action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
785 .conf = (struct rte_flow_action_raw_encap) {
786 .data = action_encap_data[para.core_idx]->data,
790 header = action_encap_data[para.core_idx]->data;
792 for (i = 0; i < RTE_DIM(headers); i++)
793 headers[i].funct(&header, encap_data, para);
795 action_encap_data[para.core_idx]->conf.size = header -
796 action_encap_data[para.core_idx]->data;
798 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
799 actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
803 add_raw_decap(struct rte_flow_action *actions,
804 uint8_t actions_counter,
805 struct additional_para para)
807 static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
808 uint64_t decap_data = para.decap_data;
812 /* Avoid double allocation. */
813 if (action_decap_data[para.core_idx] == NULL)
814 action_decap_data[para.core_idx] = rte_malloc("decap_data",
815 sizeof(struct action_raw_decap_data), 0);
817 /* Check if allocation failed. */
818 if (action_decap_data[para.core_idx] == NULL)
819 rte_exit(EXIT_FAILURE, "No Memory available!");
821 *action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
822 .conf = (struct rte_flow_action_raw_decap) {
823 .data = action_decap_data[para.core_idx]->data,
827 header = action_decap_data[para.core_idx]->data;
829 for (i = 0; i < RTE_DIM(headers); i++)
830 headers[i].funct(&header, decap_data, para);
832 action_decap_data[para.core_idx]->conf.size = header -
833 action_decap_data[para.core_idx]->data;
835 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
836 actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
840 add_vxlan_encap(struct rte_flow_action *actions,
841 uint8_t actions_counter,
842 __rte_unused struct additional_para para)
844 static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
845 static struct rte_flow_item items[5];
846 static struct rte_flow_item_eth item_eth;
847 static struct rte_flow_item_ipv4 item_ipv4;
848 static struct rte_flow_item_udp item_udp;
849 static struct rte_flow_item_vxlan item_vxlan;
850 uint32_t ip_dst = para.counter;
853 if (!para.unique_data)
856 items[0].spec = &item_eth;
857 items[0].mask = &item_eth;
858 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
860 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
861 item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
862 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
863 items[1].spec = &item_ipv4;
864 items[1].mask = &item_ipv4;
865 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
868 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
869 items[2].spec = &item_udp;
870 items[2].mask = &item_udp;
871 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
874 item_vxlan.vni[2] = 1;
875 items[3].spec = &item_vxlan;
876 items[3].mask = &item_vxlan;
877 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
879 items[4].type = RTE_FLOW_ITEM_TYPE_END;
881 vxlan_encap[para.core_idx].definition = items;
883 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
884 actions[actions_counter].conf = &vxlan_encap[para.core_idx];
888 add_vxlan_decap(struct rte_flow_action *actions,
889 uint8_t actions_counter,
890 __rte_unused struct additional_para para)
892 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
896 add_meter(struct rte_flow_action *actions,
897 uint8_t actions_counter,
898 __rte_unused struct additional_para para)
900 static struct rte_flow_action_meter
901 meters[RTE_MAX_LCORE] __rte_cache_aligned;
903 meters[para.core_idx].mtr_id = para.counter;
904 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_METER;
905 actions[actions_counter].conf = &meters[para.core_idx];
909 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
910 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
911 uint64_t encap_data, uint64_t decap_data, uint8_t core_idx,
914 struct additional_para additional_para_data;
915 uint8_t actions_counter = 0;
916 uint16_t hairpin_queues[hairpinq];
917 uint16_t queues[RXQ_NUM];
920 for (i = 0; i < RXQ_NUM; i++)
923 for (i = 0; i < hairpinq; i++)
924 hairpin_queues[i] = i + RXQ_NUM;
926 additional_para_data = (struct additional_para){
927 .queue = counter % RXQ_NUM,
928 .next_table = next_table,
930 .queues_number = RXQ_NUM,
932 .encap_data = encap_data,
933 .decap_data = decap_data,
934 .core_idx = core_idx,
935 .unique_data = unique_data,
939 additional_para_data.queues = hairpin_queues;
940 additional_para_data.queues_number = hairpinq;
941 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
944 static const struct actions_dict {
947 struct rte_flow_action *actions,
948 uint8_t actions_counter,
949 struct additional_para para
953 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
957 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
961 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
962 .funct = add_set_meta,
965 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
966 .funct = add_set_tag,
969 .mask = FLOW_ACTION_MASK(
970 RTE_FLOW_ACTION_TYPE_FLAG
975 .mask = FLOW_ACTION_MASK(
976 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
978 .funct = add_set_src_mac,
981 .mask = FLOW_ACTION_MASK(
982 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
984 .funct = add_set_dst_mac,
987 .mask = FLOW_ACTION_MASK(
988 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
990 .funct = add_set_src_ipv4,
993 .mask = FLOW_ACTION_MASK(
994 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
996 .funct = add_set_dst_ipv4,
999 .mask = FLOW_ACTION_MASK(
1000 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
1002 .funct = add_set_src_ipv6,
1005 .mask = FLOW_ACTION_MASK(
1006 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
1008 .funct = add_set_dst_ipv6,
1011 .mask = FLOW_ACTION_MASK(
1012 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1014 .funct = add_set_src_tp,
1017 .mask = FLOW_ACTION_MASK(
1018 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1020 .funct = add_set_dst_tp,
1023 .mask = FLOW_ACTION_MASK(
1024 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1026 .funct = add_inc_tcp_ack,
1029 .mask = FLOW_ACTION_MASK(
1030 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1032 .funct = add_dec_tcp_ack,
1035 .mask = FLOW_ACTION_MASK(
1036 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1038 .funct = add_inc_tcp_seq,
1041 .mask = FLOW_ACTION_MASK(
1042 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1044 .funct = add_dec_tcp_seq,
1047 .mask = FLOW_ACTION_MASK(
1048 RTE_FLOW_ACTION_TYPE_SET_TTL
1050 .funct = add_set_ttl,
1053 .mask = FLOW_ACTION_MASK(
1054 RTE_FLOW_ACTION_TYPE_DEC_TTL
1056 .funct = add_dec_ttl,
1059 .mask = FLOW_ACTION_MASK(
1060 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1062 .funct = add_set_ipv4_dscp,
1065 .mask = FLOW_ACTION_MASK(
1066 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1068 .funct = add_set_ipv6_dscp,
1071 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1075 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1079 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1083 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1084 .funct = add_port_id
1087 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1091 .mask = HAIRPIN_QUEUE_ACTION,
1095 .mask = HAIRPIN_RSS_ACTION,
1099 .mask = FLOW_ACTION_MASK(
1100 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1102 .funct = add_raw_encap,
1105 .mask = FLOW_ACTION_MASK(
1106 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1108 .funct = add_raw_decap,
1111 .mask = FLOW_ACTION_MASK(
1112 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1114 .funct = add_vxlan_encap,
1117 .mask = FLOW_ACTION_MASK(
1118 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1120 .funct = add_vxlan_decap,
1123 .mask = FLOW_ACTION_MASK(
1124 RTE_FLOW_ACTION_TYPE_METER
1130 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1131 if (flow_actions[j] == 0)
1133 for (i = 0; i < RTE_DIM(actions_list); i++) {
1134 if ((flow_actions[j] &
1135 actions_list[i].mask) == 0)
1137 actions_list[i].funct(
1138 actions, actions_counter++,
1139 additional_para_data
1144 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;