1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include <rte_geneve.h>
18 #include "actions_gen.h"
23 /* Storage for additional parameters for actions */
24 struct additional_para {
28 uint16_t queues_number;
34 /* Storage for struct rte_flow_action_raw_encap including external data. */
35 struct action_raw_encap_data {
36 struct rte_flow_action_raw_encap conf;
38 uint8_t preserve[128];
42 /* Storage for struct rte_flow_action_raw_decap including external data. */
43 struct action_raw_decap_data {
44 struct rte_flow_action_raw_decap conf;
49 /* Storage for struct rte_flow_action_rss including external data. */
50 struct action_rss_data {
51 struct rte_flow_action_rss conf;
57 add_mark(struct rte_flow_action *actions,
58 uint8_t actions_counter,
59 struct additional_para para)
61 static struct rte_flow_action_mark mark_action;
62 uint32_t counter = para.counter;
65 /* Random values from 1 to 256 */
66 mark_action.id = (counter % 255) + 1;
69 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
70 actions[actions_counter].conf = &mark_action;
74 add_queue(struct rte_flow_action *actions,
75 uint8_t actions_counter,
76 struct additional_para para)
78 static struct rte_flow_action_queue queue_action;
81 queue_action.index = para.queue;
84 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
85 actions[actions_counter].conf = &queue_action;
89 add_jump(struct rte_flow_action *actions,
90 uint8_t actions_counter,
91 struct additional_para para)
93 static struct rte_flow_action_jump jump_action;
96 jump_action.group = para.next_table;
99 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
100 actions[actions_counter].conf = &jump_action;
104 add_rss(struct rte_flow_action *actions,
105 uint8_t actions_counter,
106 struct additional_para para)
108 static struct rte_flow_action_rss *rss_action;
109 static struct action_rss_data *rss_data;
113 if (rss_data == NULL)
114 rss_data = rte_malloc("rss_data",
115 sizeof(struct action_rss_data), 0);
117 if (rss_data == NULL)
118 rte_exit(EXIT_FAILURE, "No Memory available!");
120 *rss_data = (struct action_rss_data){
121 .conf = (struct rte_flow_action_rss){
122 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
124 .types = GET_RSS_HF(),
125 .key_len = sizeof(rss_data->key),
126 .queue_num = para.queues_number,
127 .key = rss_data->key,
128 .queue = rss_data->queue,
134 for (queue = 0; queue < para.queues_number; queue++)
135 rss_data->queue[queue] = para.queues[queue];
137 rss_action = &rss_data->conf;
139 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
140 actions[actions_counter].conf = rss_action;
144 add_set_meta(struct rte_flow_action *actions,
145 uint8_t actions_counter,
146 __rte_unused struct additional_para para)
148 static struct rte_flow_action_set_meta meta_action;
151 meta_action.data = RTE_BE32(META_DATA);
152 meta_action.mask = RTE_BE32(0xffffffff);
155 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
156 actions[actions_counter].conf = &meta_action;
160 add_set_tag(struct rte_flow_action *actions,
161 uint8_t actions_counter,
162 __rte_unused struct additional_para para)
164 static struct rte_flow_action_set_tag tag_action;
167 tag_action.data = RTE_BE32(META_DATA);
168 tag_action.mask = RTE_BE32(0xffffffff);
169 tag_action.index = TAG_INDEX;
172 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
173 actions[actions_counter].conf = &tag_action;
177 add_port_id(struct rte_flow_action *actions,
178 uint8_t actions_counter,
179 __rte_unused struct additional_para para)
181 static struct rte_flow_action_port_id port_id;
184 port_id.id = PORT_ID_DST;
187 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
188 actions[actions_counter].conf = &port_id;
192 add_drop(struct rte_flow_action *actions,
193 uint8_t actions_counter,
194 __rte_unused struct additional_para para)
196 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
200 add_count(struct rte_flow_action *actions,
201 uint8_t actions_counter,
202 __rte_unused struct additional_para para)
204 static struct rte_flow_action_count count_action;
206 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
207 actions[actions_counter].conf = &count_action;
211 add_set_src_mac(struct rte_flow_action *actions,
212 uint8_t actions_counter,
213 __rte_unused struct additional_para para)
215 static struct rte_flow_action_set_mac set_mac;
216 uint32_t mac = para.counter;
223 /* Mac address to be set is random each time */
224 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
225 set_mac.mac_addr[i] = mac & 0xff;
229 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
230 actions[actions_counter].conf = &set_mac;
234 add_set_dst_mac(struct rte_flow_action *actions,
235 uint8_t actions_counter,
236 __rte_unused struct additional_para para)
238 static struct rte_flow_action_set_mac set_mac;
239 uint32_t mac = para.counter;
246 /* Mac address to be set is random each time */
247 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
248 set_mac.mac_addr[i] = mac & 0xff;
252 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
253 actions[actions_counter].conf = &set_mac;
257 add_set_src_ipv4(struct rte_flow_action *actions,
258 uint8_t actions_counter,
259 __rte_unused struct additional_para para)
261 static struct rte_flow_action_set_ipv4 set_ipv4;
262 uint32_t ip = para.counter;
268 /* IPv4 value to be set is random each time */
269 set_ipv4.ipv4_addr = RTE_BE32(ip + 1);
271 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
272 actions[actions_counter].conf = &set_ipv4;
276 add_set_dst_ipv4(struct rte_flow_action *actions,
277 uint8_t actions_counter,
278 __rte_unused struct additional_para para)
280 static struct rte_flow_action_set_ipv4 set_ipv4;
281 uint32_t ip = para.counter;
287 /* IPv4 value to be set is random each time */
288 set_ipv4.ipv4_addr = RTE_BE32(ip + 1);
290 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
291 actions[actions_counter].conf = &set_ipv4;
295 add_set_src_ipv6(struct rte_flow_action *actions,
296 uint8_t actions_counter,
297 __rte_unused struct additional_para para)
299 static struct rte_flow_action_set_ipv6 set_ipv6;
300 uint32_t ipv6 = para.counter;
307 /* IPv6 value to set is random each time */
308 for (i = 0; i < 16; i++) {
309 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
313 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
314 actions[actions_counter].conf = &set_ipv6;
318 add_set_dst_ipv6(struct rte_flow_action *actions,
319 uint8_t actions_counter,
320 __rte_unused struct additional_para para)
322 static struct rte_flow_action_set_ipv6 set_ipv6;
323 uint32_t ipv6 = para.counter;
330 /* IPv6 value to set is random each time */
331 for (i = 0; i < 16; i++) {
332 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
336 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
337 actions[actions_counter].conf = &set_ipv6;
341 add_set_src_tp(struct rte_flow_action *actions,
342 uint8_t actions_counter,
343 __rte_unused struct additional_para para)
345 static struct rte_flow_action_set_tp set_tp;
346 uint32_t tp = para.counter;
352 /* TP src port is random each time */
355 set_tp.port = RTE_BE16(tp & 0xffff);
357 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
358 actions[actions_counter].conf = &set_tp;
362 add_set_dst_tp(struct rte_flow_action *actions,
363 uint8_t actions_counter,
364 __rte_unused struct additional_para para)
366 static struct rte_flow_action_set_tp set_tp;
367 uint32_t tp = para.counter;
373 /* TP src port is random each time */
377 set_tp.port = RTE_BE16(tp & 0xffff);
379 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
380 actions[actions_counter].conf = &set_tp;
384 add_inc_tcp_ack(struct rte_flow_action *actions,
385 uint8_t actions_counter,
386 __rte_unused struct additional_para para)
388 static rte_be32_t value;
389 uint32_t ack_value = para.counter;
395 value = RTE_BE32(ack_value);
397 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
398 actions[actions_counter].conf = &value;
402 add_dec_tcp_ack(struct rte_flow_action *actions,
403 uint8_t actions_counter,
404 __rte_unused struct additional_para para)
406 static rte_be32_t value;
407 uint32_t ack_value = para.counter;
413 value = RTE_BE32(ack_value);
415 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
416 actions[actions_counter].conf = &value;
420 add_inc_tcp_seq(struct rte_flow_action *actions,
421 uint8_t actions_counter,
422 __rte_unused struct additional_para para)
424 static rte_be32_t value;
425 uint32_t seq_value = para.counter;
431 value = RTE_BE32(seq_value);
433 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
434 actions[actions_counter].conf = &value;
438 add_dec_tcp_seq(struct rte_flow_action *actions,
439 uint8_t actions_counter,
440 __rte_unused struct additional_para para)
442 static rte_be32_t value;
443 uint32_t seq_value = para.counter;
449 value = RTE_BE32(seq_value);
451 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
452 actions[actions_counter].conf = &value;
456 add_set_ttl(struct rte_flow_action *actions,
457 uint8_t actions_counter,
458 __rte_unused struct additional_para para)
460 static struct rte_flow_action_set_ttl set_ttl;
461 uint32_t ttl_value = para.counter;
467 /* Set ttl to random value each time */
468 ttl_value = ttl_value % 0xff;
470 set_ttl.ttl_value = ttl_value;
472 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
473 actions[actions_counter].conf = &set_ttl;
477 add_dec_ttl(struct rte_flow_action *actions,
478 uint8_t actions_counter,
479 __rte_unused struct additional_para para)
481 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
485 add_set_ipv4_dscp(struct rte_flow_action *actions,
486 uint8_t actions_counter,
487 __rte_unused struct additional_para para)
489 static struct rte_flow_action_set_dscp set_dscp;
490 uint32_t dscp_value = para.counter;
496 /* Set dscp to random value each time */
497 dscp_value = dscp_value % 0xff;
499 set_dscp.dscp = dscp_value;
501 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
502 actions[actions_counter].conf = &set_dscp;
506 add_set_ipv6_dscp(struct rte_flow_action *actions,
507 uint8_t actions_counter,
508 __rte_unused struct additional_para para)
510 static struct rte_flow_action_set_dscp set_dscp;
511 uint32_t dscp_value = para.counter;
517 /* Set dscp to random value each time */
518 dscp_value = dscp_value % 0xff;
520 set_dscp.dscp = dscp_value;
522 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
523 actions[actions_counter].conf = &set_dscp;
527 add_flag(struct rte_flow_action *actions,
528 uint8_t actions_counter,
529 __rte_unused struct additional_para para)
531 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
535 add_ether_header(uint8_t **header, uint64_t data,
536 __rte_unused struct additional_para para)
538 struct rte_ether_hdr eth_hdr;
540 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
543 memset(ð_hdr, 0, sizeof(struct rte_ether_hdr));
544 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
545 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
546 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
547 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
548 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
549 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
550 memcpy(*header, ð_hdr, sizeof(eth_hdr));
551 *header += sizeof(eth_hdr);
555 add_vlan_header(uint8_t **header, uint64_t data,
556 __rte_unused struct additional_para para)
558 struct rte_vlan_hdr vlan_hdr;
561 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
564 vlan_value = VLAN_VALUE;
566 memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
567 vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
569 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
570 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
571 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
572 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
573 memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
574 *header += sizeof(vlan_hdr);
578 add_ipv4_header(uint8_t **header, uint64_t data,
579 struct additional_para para)
581 struct rte_ipv4_hdr ipv4_hdr;
582 uint32_t ip_dst = para.counter;
584 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
591 memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
592 ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
593 ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
594 ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
595 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
596 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
597 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
598 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
599 memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
600 *header += sizeof(ipv4_hdr);
604 add_ipv6_header(uint8_t **header, uint64_t data,
605 __rte_unused struct additional_para para)
607 struct rte_ipv6_hdr ipv6_hdr;
609 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
612 memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
613 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
614 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
615 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
616 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
617 memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
618 *header += sizeof(ipv6_hdr);
622 add_udp_header(uint8_t **header, uint64_t data,
623 __rte_unused struct additional_para para)
625 struct rte_udp_hdr udp_hdr;
627 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
630 memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
631 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
632 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
633 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
634 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
635 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
636 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
637 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
638 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
639 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
640 *header += sizeof(udp_hdr);
644 add_vxlan_header(uint8_t **header, uint64_t data,
645 struct additional_para para)
647 struct rte_vxlan_hdr vxlan_hdr;
648 uint32_t vni_value = para.counter;
650 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
657 memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
659 vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
660 vxlan_hdr.vx_flags = 0x8;
662 memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
663 *header += sizeof(vxlan_hdr);
667 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
668 struct additional_para para)
670 struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
671 uint32_t vni_value = para.counter;
673 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
680 memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
682 vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
683 vxlan_gpe_hdr.vx_flags = 0x0c;
685 memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
686 *header += sizeof(vxlan_gpe_hdr);
690 add_gre_header(uint8_t **header, uint64_t data,
691 __rte_unused struct additional_para para)
693 struct rte_gre_hdr gre_hdr;
695 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
698 memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
700 gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
702 memcpy(*header, &gre_hdr, sizeof(gre_hdr));
703 *header += sizeof(gre_hdr);
707 add_geneve_header(uint8_t **header, uint64_t data,
708 struct additional_para para)
710 struct rte_geneve_hdr geneve_hdr;
711 uint32_t vni_value = para.counter;
714 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
721 memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
723 for (i = 0; i < 3; i++)
724 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
726 memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
727 *header += sizeof(geneve_hdr);
731 add_gtp_header(uint8_t **header, uint64_t data,
732 struct additional_para para)
734 struct rte_gtp_hdr gtp_hdr;
735 uint32_t teid_value = para.counter;
737 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
744 memset(>p_hdr, 0, sizeof(struct rte_flow_item_gtp));
746 gtp_hdr.teid = RTE_BE32(teid_value);
747 gtp_hdr.msg_type = 255;
749 memcpy(*header, >p_hdr, sizeof(gtp_hdr));
750 *header += sizeof(gtp_hdr);
753 static const struct encap_decap_headers {
757 struct additional_para para
760 {.funct = add_ether_header},
761 {.funct = add_vlan_header},
762 {.funct = add_ipv4_header},
763 {.funct = add_ipv6_header},
764 {.funct = add_udp_header},
765 {.funct = add_vxlan_header},
766 {.funct = add_vxlan_gpe_header},
767 {.funct = add_gre_header},
768 {.funct = add_geneve_header},
769 {.funct = add_gtp_header},
773 add_raw_encap(struct rte_flow_action *actions,
774 uint8_t actions_counter,
775 struct additional_para para)
777 static struct action_raw_encap_data *action_encap_data;
778 uint64_t encap_data = para.encap_data;
782 /* Avoid double allocation. */
783 if (action_encap_data == NULL)
784 action_encap_data = rte_malloc("encap_data",
785 sizeof(struct action_raw_encap_data), 0);
787 /* Check if allocation failed. */
788 if (action_encap_data == NULL)
789 rte_exit(EXIT_FAILURE, "No Memory available!");
791 *action_encap_data = (struct action_raw_encap_data) {
792 .conf = (struct rte_flow_action_raw_encap) {
793 .data = action_encap_data->data,
797 header = action_encap_data->data;
799 for (i = 0; i < RTE_DIM(headers); i++)
800 headers[i].funct(&header, encap_data, para);
802 action_encap_data->conf.size = header -
803 action_encap_data->data;
805 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
806 actions[actions_counter].conf = &action_encap_data->conf;
810 add_raw_decap(struct rte_flow_action *actions,
811 uint8_t actions_counter,
812 struct additional_para para)
814 static struct action_raw_decap_data *action_decap_data;
815 uint64_t decap_data = para.decap_data;
819 /* Avoid double allocation. */
820 if (action_decap_data == NULL)
821 action_decap_data = rte_malloc("decap_data",
822 sizeof(struct action_raw_decap_data), 0);
824 /* Check if allocation failed. */
825 if (action_decap_data == NULL)
826 rte_exit(EXIT_FAILURE, "No Memory available!");
828 *action_decap_data = (struct action_raw_decap_data) {
829 .conf = (struct rte_flow_action_raw_decap) {
830 .data = action_decap_data->data,
834 header = action_decap_data->data;
836 for (i = 0; i < RTE_DIM(headers); i++)
837 headers[i].funct(&header, decap_data, para);
839 action_decap_data->conf.size = header -
840 action_decap_data->data;
842 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
843 actions[actions_counter].conf = &action_decap_data->conf;
847 add_vxlan_encap(struct rte_flow_action *actions,
848 uint8_t actions_counter,
849 __rte_unused struct additional_para para)
851 static struct rte_flow_action_vxlan_encap vxlan_encap;
852 static struct rte_flow_item items[5];
853 static struct rte_flow_item_eth item_eth;
854 static struct rte_flow_item_ipv4 item_ipv4;
855 static struct rte_flow_item_udp item_udp;
856 static struct rte_flow_item_vxlan item_vxlan;
857 uint32_t ip_dst = para.counter;
863 items[0].spec = &item_eth;
864 items[0].mask = &item_eth;
865 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
867 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
868 item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
869 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
870 items[1].spec = &item_ipv4;
871 items[1].mask = &item_ipv4;
872 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
875 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
876 items[2].spec = &item_udp;
877 items[2].mask = &item_udp;
878 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
881 item_vxlan.vni[2] = 1;
882 items[3].spec = &item_vxlan;
883 items[3].mask = &item_vxlan;
884 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
886 items[4].type = RTE_FLOW_ITEM_TYPE_END;
888 vxlan_encap.definition = items;
890 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
891 actions[actions_counter].conf = &vxlan_encap;
895 add_vxlan_decap(struct rte_flow_action *actions,
896 uint8_t actions_counter,
897 __rte_unused struct additional_para para)
899 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
903 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
904 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
905 uint64_t encap_data, uint64_t decap_data)
907 struct additional_para additional_para_data;
908 uint8_t actions_counter = 0;
909 uint16_t hairpin_queues[hairpinq];
910 uint16_t queues[RXQ_NUM];
913 for (i = 0; i < RXQ_NUM; i++)
916 for (i = 0; i < hairpinq; i++)
917 hairpin_queues[i] = i + RXQ_NUM;
919 additional_para_data = (struct additional_para){
920 .queue = counter % RXQ_NUM,
921 .next_table = next_table,
923 .queues_number = RXQ_NUM,
925 .encap_data = encap_data,
926 .decap_data = decap_data,
930 additional_para_data.queues = hairpin_queues;
931 additional_para_data.queues_number = hairpinq;
932 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
935 static const struct actions_dict {
938 struct rte_flow_action *actions,
939 uint8_t actions_counter,
940 struct additional_para para
944 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
948 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
952 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
953 .funct = add_set_meta,
956 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
957 .funct = add_set_tag,
960 .mask = FLOW_ACTION_MASK(
961 RTE_FLOW_ACTION_TYPE_FLAG
966 .mask = FLOW_ACTION_MASK(
967 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
969 .funct = add_set_src_mac,
972 .mask = FLOW_ACTION_MASK(
973 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
975 .funct = add_set_dst_mac,
978 .mask = FLOW_ACTION_MASK(
979 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
981 .funct = add_set_src_ipv4,
984 .mask = FLOW_ACTION_MASK(
985 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
987 .funct = add_set_dst_ipv4,
990 .mask = FLOW_ACTION_MASK(
991 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
993 .funct = add_set_src_ipv6,
996 .mask = FLOW_ACTION_MASK(
997 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
999 .funct = add_set_dst_ipv6,
1002 .mask = FLOW_ACTION_MASK(
1003 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1005 .funct = add_set_src_tp,
1008 .mask = FLOW_ACTION_MASK(
1009 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1011 .funct = add_set_dst_tp,
1014 .mask = FLOW_ACTION_MASK(
1015 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1017 .funct = add_inc_tcp_ack,
1020 .mask = FLOW_ACTION_MASK(
1021 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1023 .funct = add_dec_tcp_ack,
1026 .mask = FLOW_ACTION_MASK(
1027 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1029 .funct = add_inc_tcp_seq,
1032 .mask = FLOW_ACTION_MASK(
1033 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1035 .funct = add_dec_tcp_seq,
1038 .mask = FLOW_ACTION_MASK(
1039 RTE_FLOW_ACTION_TYPE_SET_TTL
1041 .funct = add_set_ttl,
1044 .mask = FLOW_ACTION_MASK(
1045 RTE_FLOW_ACTION_TYPE_DEC_TTL
1047 .funct = add_dec_ttl,
1050 .mask = FLOW_ACTION_MASK(
1051 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1053 .funct = add_set_ipv4_dscp,
1056 .mask = FLOW_ACTION_MASK(
1057 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1059 .funct = add_set_ipv6_dscp,
1062 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1066 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1070 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1074 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1075 .funct = add_port_id
1078 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1082 .mask = HAIRPIN_QUEUE_ACTION,
1086 .mask = HAIRPIN_RSS_ACTION,
1090 .mask = FLOW_ACTION_MASK(
1091 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1093 .funct = add_raw_encap,
1096 .mask = FLOW_ACTION_MASK(
1097 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1099 .funct = add_raw_decap,
1102 .mask = FLOW_ACTION_MASK(
1103 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1105 .funct = add_vxlan_encap,
1108 .mask = FLOW_ACTION_MASK(
1109 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1111 .funct = add_vxlan_decap,
1115 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1116 if (flow_actions[j] == 0)
1118 for (i = 0; i < RTE_DIM(actions_list); i++) {
1119 if ((flow_actions[j] &
1120 actions_list[i].mask) == 0)
1122 actions_list[i].funct(
1123 actions, actions_counter++,
1124 additional_para_data
1129 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;