1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include "actions_gen.h"
21 /* Storage for additional parameters for actions */
22 struct additional_para {
26 uint16_t queues_number;
32 /* Storage for struct rte_flow_action_raw_encap including external data. */
33 struct action_raw_encap_data {
34 struct rte_flow_action_raw_encap conf;
36 uint8_t preserve[128];
40 /* Storage for struct rte_flow_action_raw_decap including external data. */
41 struct action_raw_decap_data {
42 struct rte_flow_action_raw_decap conf;
47 /* Storage for struct rte_flow_action_rss including external data. */
48 struct action_rss_data {
49 struct rte_flow_action_rss conf;
55 add_mark(struct rte_flow_action *actions,
56 uint8_t actions_counter,
57 struct additional_para para)
59 static struct rte_flow_action_mark mark_action;
60 uint32_t counter = para.counter;
63 /* Random values from 1 to 256 */
64 mark_action.id = (counter % 255) + 1;
67 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
68 actions[actions_counter].conf = &mark_action;
72 add_queue(struct rte_flow_action *actions,
73 uint8_t actions_counter,
74 struct additional_para para)
76 static struct rte_flow_action_queue queue_action;
79 queue_action.index = para.queue;
82 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
83 actions[actions_counter].conf = &queue_action;
87 add_jump(struct rte_flow_action *actions,
88 uint8_t actions_counter,
89 struct additional_para para)
91 static struct rte_flow_action_jump jump_action;
94 jump_action.group = para.next_table;
97 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
98 actions[actions_counter].conf = &jump_action;
102 add_rss(struct rte_flow_action *actions,
103 uint8_t actions_counter,
104 struct additional_para para)
106 static struct rte_flow_action_rss *rss_action;
107 static struct action_rss_data *rss_data;
111 if (rss_data == NULL)
112 rss_data = rte_malloc("rss_data",
113 sizeof(struct action_rss_data), 0);
115 if (rss_data == NULL)
116 rte_exit(EXIT_FAILURE, "No Memory available!");
118 *rss_data = (struct action_rss_data){
119 .conf = (struct rte_flow_action_rss){
120 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
122 .types = GET_RSS_HF(),
123 .key_len = sizeof(rss_data->key),
124 .queue_num = para.queues_number,
125 .key = rss_data->key,
126 .queue = rss_data->queue,
132 for (queue = 0; queue < para.queues_number; queue++)
133 rss_data->queue[queue] = para.queues[queue];
135 rss_action = &rss_data->conf;
137 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
138 actions[actions_counter].conf = rss_action;
142 add_set_meta(struct rte_flow_action *actions,
143 uint8_t actions_counter,
144 __rte_unused struct additional_para para)
146 static struct rte_flow_action_set_meta meta_action;
149 meta_action.data = RTE_BE32(META_DATA);
150 meta_action.mask = RTE_BE32(0xffffffff);
153 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
154 actions[actions_counter].conf = &meta_action;
158 add_set_tag(struct rte_flow_action *actions,
159 uint8_t actions_counter,
160 __rte_unused struct additional_para para)
162 static struct rte_flow_action_set_tag tag_action;
165 tag_action.data = RTE_BE32(META_DATA);
166 tag_action.mask = RTE_BE32(0xffffffff);
167 tag_action.index = TAG_INDEX;
170 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
171 actions[actions_counter].conf = &tag_action;
175 add_port_id(struct rte_flow_action *actions,
176 uint8_t actions_counter,
177 __rte_unused struct additional_para para)
179 static struct rte_flow_action_port_id port_id;
182 port_id.id = PORT_ID_DST;
185 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
186 actions[actions_counter].conf = &port_id;
190 add_drop(struct rte_flow_action *actions,
191 uint8_t actions_counter,
192 __rte_unused struct additional_para para)
194 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
198 add_count(struct rte_flow_action *actions,
199 uint8_t actions_counter,
200 __rte_unused struct additional_para para)
202 static struct rte_flow_action_count count_action;
204 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
205 actions[actions_counter].conf = &count_action;
209 add_set_src_mac(struct rte_flow_action *actions,
210 uint8_t actions_counter,
211 __rte_unused struct additional_para para)
213 static struct rte_flow_action_set_mac set_mac;
214 uint32_t mac = para.counter;
217 /* Mac address to be set is random each time */
218 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
219 set_mac.mac_addr[i] = mac & 0xff;
223 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
224 actions[actions_counter].conf = &set_mac;
228 add_set_dst_mac(struct rte_flow_action *actions,
229 uint8_t actions_counter,
230 __rte_unused struct additional_para para)
232 static struct rte_flow_action_set_mac set_mac;
233 uint32_t mac = para.counter;
236 /* Mac address to be set is random each time */
237 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
238 set_mac.mac_addr[i] = mac & 0xff;
242 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
243 actions[actions_counter].conf = &set_mac;
247 add_set_src_ipv4(struct rte_flow_action *actions,
248 uint8_t actions_counter,
249 __rte_unused struct additional_para para)
251 static struct rte_flow_action_set_ipv4 set_ipv4;
253 /* IPv4 value to be set is random each time */
254 set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1);
256 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
257 actions[actions_counter].conf = &set_ipv4;
261 add_set_dst_ipv4(struct rte_flow_action *actions,
262 uint8_t actions_counter,
263 __rte_unused struct additional_para para)
265 static struct rte_flow_action_set_ipv4 set_ipv4;
267 /* IPv4 value to be set is random each time */
268 set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1);
270 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
271 actions[actions_counter].conf = &set_ipv4;
275 add_set_src_ipv6(struct rte_flow_action *actions,
276 uint8_t actions_counter,
277 __rte_unused struct additional_para para)
279 static struct rte_flow_action_set_ipv6 set_ipv6;
280 uint32_t ipv6 = para.counter;
283 /* IPv6 value to set is random each time */
284 for (i = 0; i < 16; i++) {
285 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
289 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
290 actions[actions_counter].conf = &set_ipv6;
294 add_set_dst_ipv6(struct rte_flow_action *actions,
295 uint8_t actions_counter,
296 __rte_unused struct additional_para para)
298 static struct rte_flow_action_set_ipv6 set_ipv6;
299 uint32_t ipv6 = para.counter;
302 /* IPv6 value to set is random each time */
303 for (i = 0; i < 16; i++) {
304 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
308 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
309 actions[actions_counter].conf = &set_ipv6;
313 add_set_src_tp(struct rte_flow_action *actions,
314 uint8_t actions_counter,
315 __rte_unused struct additional_para para)
317 static struct rte_flow_action_set_tp set_tp;
318 uint32_t tp = para.counter;
320 /* TP src port is random each time */
324 set_tp.port = RTE_BE16(tp & 0xffff);
326 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
327 actions[actions_counter].conf = &set_tp;
331 add_set_dst_tp(struct rte_flow_action *actions,
332 uint8_t actions_counter,
333 __rte_unused struct additional_para para)
335 static struct rte_flow_action_set_tp set_tp;
336 uint32_t tp = para.counter;
338 /* TP src port is random each time */
342 set_tp.port = RTE_BE16(tp & 0xffff);
344 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
345 actions[actions_counter].conf = &set_tp;
349 add_inc_tcp_ack(struct rte_flow_action *actions,
350 uint8_t actions_counter,
351 __rte_unused struct additional_para para)
353 static rte_be32_t value = RTE_BE32(1);
355 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
356 actions[actions_counter].conf = &value;
360 add_dec_tcp_ack(struct rte_flow_action *actions,
361 uint8_t actions_counter,
362 __rte_unused struct additional_para para)
364 static rte_be32_t value = RTE_BE32(1);
366 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
367 actions[actions_counter].conf = &value;
371 add_inc_tcp_seq(struct rte_flow_action *actions,
372 uint8_t actions_counter,
373 __rte_unused struct additional_para para)
375 static rte_be32_t value = RTE_BE32(1);
377 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
378 actions[actions_counter].conf = &value;
382 add_dec_tcp_seq(struct rte_flow_action *actions,
383 uint8_t actions_counter,
384 __rte_unused struct additional_para para)
386 static rte_be32_t value = RTE_BE32(1);
388 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
389 actions[actions_counter].conf = &value;
393 add_set_ttl(struct rte_flow_action *actions,
394 uint8_t actions_counter,
395 __rte_unused struct additional_para para)
397 static struct rte_flow_action_set_ttl set_ttl;
398 uint32_t ttl_value = para.counter;
400 /* Set ttl to random value each time */
401 while (ttl_value > 0xff)
402 ttl_value = ttl_value >> 8;
404 set_ttl.ttl_value = ttl_value;
406 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
407 actions[actions_counter].conf = &set_ttl;
411 add_dec_ttl(struct rte_flow_action *actions,
412 uint8_t actions_counter,
413 __rte_unused struct additional_para para)
415 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
419 add_set_ipv4_dscp(struct rte_flow_action *actions,
420 uint8_t actions_counter,
421 __rte_unused struct additional_para para)
423 static struct rte_flow_action_set_dscp set_dscp;
424 uint32_t dscp_value = para.counter;
426 /* Set dscp to random value each time */
427 while (dscp_value > 0xff)
428 dscp_value = dscp_value >> 8;
430 set_dscp.dscp = dscp_value;
432 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
433 actions[actions_counter].conf = &set_dscp;
437 add_set_ipv6_dscp(struct rte_flow_action *actions,
438 uint8_t actions_counter,
439 __rte_unused struct additional_para para)
441 static struct rte_flow_action_set_dscp set_dscp;
442 uint32_t dscp_value = para.counter;
444 /* Set dscp to random value each time */
445 while (dscp_value > 0xff)
446 dscp_value = dscp_value >> 8;
448 set_dscp.dscp = dscp_value;
450 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
451 actions[actions_counter].conf = &set_dscp;
455 add_flag(struct rte_flow_action *actions,
456 uint8_t actions_counter,
457 __rte_unused struct additional_para para)
459 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
463 add_ether_header(uint8_t **header, uint64_t data,
464 __rte_unused struct additional_para para)
466 struct rte_flow_item_eth eth_item;
468 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
471 memset(ð_item, 0, sizeof(struct rte_flow_item_eth));
472 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
473 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
474 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
475 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
476 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
477 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
478 memcpy(*header, ð_item, sizeof(eth_item));
479 *header += sizeof(eth_item);
483 add_vlan_header(uint8_t **header, uint64_t data,
484 __rte_unused struct additional_para para)
486 struct rte_flow_item_vlan vlan_item;
489 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
492 vlan_value = VLAN_VALUE;
494 memset(&vlan_item, 0, sizeof(struct rte_flow_item_vlan));
495 vlan_item.tci = RTE_BE16(vlan_value);
497 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
498 vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
499 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
500 vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
501 memcpy(*header, &vlan_item, sizeof(vlan_item));
502 *header += sizeof(vlan_item);
506 add_ipv4_header(uint8_t **header, uint64_t data,
507 struct additional_para para)
509 struct rte_flow_item_ipv4 ipv4_item;
511 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
514 memset(&ipv4_item, 0, sizeof(struct rte_flow_item_ipv4));
515 ipv4_item.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
516 ipv4_item.hdr.dst_addr = RTE_BE32(para.counter);
517 ipv4_item.hdr.version_ihl = RTE_IPV4_VHL_DEF;
518 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
519 ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_UDP;
520 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
521 ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_GRE;
522 memcpy(*header, &ipv4_item, sizeof(ipv4_item));
523 *header += sizeof(ipv4_item);
527 add_ipv6_header(uint8_t **header, uint64_t data,
528 __rte_unused struct additional_para para)
530 struct rte_flow_item_ipv6 ipv6_item;
532 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
535 memset(&ipv6_item, 0, sizeof(struct rte_flow_item_ipv6));
536 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
537 ipv6_item.hdr.proto = RTE_IP_TYPE_UDP;
538 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
539 ipv6_item.hdr.proto = RTE_IP_TYPE_GRE;
540 memcpy(*header, &ipv6_item, sizeof(ipv6_item));
541 *header += sizeof(ipv6_item);
545 add_udp_header(uint8_t **header, uint64_t data,
546 __rte_unused struct additional_para para)
548 struct rte_flow_item_udp udp_item;
550 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
553 memset(&udp_item, 0, sizeof(struct rte_flow_item_udp));
554 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
555 udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
556 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
557 udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
558 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
559 udp_item.hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
560 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
561 udp_item.hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
562 memcpy(*header, &udp_item, sizeof(udp_item));
563 *header += sizeof(udp_item);
567 add_vxlan_header(uint8_t **header, uint64_t data,
568 struct additional_para para)
570 struct rte_flow_item_vxlan vxlan_item;
571 uint32_t vni_value = para.counter;
574 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
577 memset(&vxlan_item, 0, sizeof(struct rte_flow_item_vxlan));
579 for (i = 0; i < 3; i++)
580 vxlan_item.vni[2 - i] = vni_value >> (i * 8);
581 vxlan_item.flags = 0x8;
583 memcpy(*header, &vxlan_item, sizeof(vxlan_item));
584 *header += sizeof(vxlan_item);
588 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
589 struct additional_para para)
591 struct rte_flow_item_vxlan_gpe vxlan_gpe_item;
592 uint32_t vni_value = para.counter;
595 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
598 memset(&vxlan_gpe_item, 0, sizeof(struct rte_flow_item_vxlan_gpe));
600 for (i = 0; i < 3; i++)
601 vxlan_gpe_item.vni[2 - i] = vni_value >> (i * 8);
602 vxlan_gpe_item.flags = 0x0c;
604 memcpy(*header, &vxlan_gpe_item, sizeof(vxlan_gpe_item));
605 *header += sizeof(vxlan_gpe_item);
609 add_gre_header(uint8_t **header, uint64_t data,
610 __rte_unused struct additional_para para)
612 struct rte_flow_item_gre gre_item;
614 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
617 memset(&gre_item, 0, sizeof(struct rte_flow_item_gre));
619 gre_item.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB);
621 memcpy(*header, &gre_item, sizeof(gre_item));
622 *header += sizeof(gre_item);
626 add_geneve_header(uint8_t **header, uint64_t data,
627 struct additional_para para)
629 struct rte_flow_item_geneve geneve_item;
630 uint32_t vni_value = para.counter;
633 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
636 memset(&geneve_item, 0, sizeof(struct rte_flow_item_geneve));
638 for (i = 0; i < 3; i++)
639 geneve_item.vni[2 - i] = vni_value >> (i * 8);
641 memcpy(*header, &geneve_item, sizeof(geneve_item));
642 *header += sizeof(geneve_item);
646 add_gtp_header(uint8_t **header, uint64_t data,
647 struct additional_para para)
649 struct rte_flow_item_gtp gtp_item;
651 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
654 memset(>p_item, 0, sizeof(struct rte_flow_item_gtp));
656 gtp_item.teid = RTE_BE32(para.counter);
657 gtp_item.msg_type = 255;
659 memcpy(*header, >p_item, sizeof(gtp_item));
660 *header += sizeof(gtp_item);
663 static const struct encap_decap_headers {
667 struct additional_para para
670 {.funct = add_ether_header},
671 {.funct = add_vlan_header},
672 {.funct = add_ipv4_header},
673 {.funct = add_ipv6_header},
674 {.funct = add_udp_header},
675 {.funct = add_vxlan_header},
676 {.funct = add_vxlan_gpe_header},
677 {.funct = add_gre_header},
678 {.funct = add_geneve_header},
679 {.funct = add_gtp_header},
683 add_raw_encap(struct rte_flow_action *actions,
684 uint8_t actions_counter,
685 struct additional_para para)
687 static struct action_raw_encap_data *action_encap_data;
688 uint64_t encap_data = para.encap_data;
692 /* Avoid double allocation. */
693 if (action_encap_data == NULL)
694 action_encap_data = rte_malloc("encap_data",
695 sizeof(struct action_raw_encap_data), 0);
697 /* Check if allocation failed. */
698 if (action_encap_data == NULL)
699 rte_exit(EXIT_FAILURE, "No Memory available!");
701 *action_encap_data = (struct action_raw_encap_data) {
702 .conf = (struct rte_flow_action_raw_encap) {
703 .data = action_encap_data->data,
707 header = action_encap_data->data;
709 for (i = 0; i < RTE_DIM(headers); i++)
710 headers[i].funct(&header, encap_data, para);
712 action_encap_data->conf.size = header -
713 action_encap_data->data;
715 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
716 actions[actions_counter].conf = &action_encap_data->conf;
720 add_raw_decap(struct rte_flow_action *actions,
721 uint8_t actions_counter,
722 struct additional_para para)
724 static struct action_raw_decap_data *action_decap_data;
725 uint64_t decap_data = para.decap_data;
729 /* Avoid double allocation. */
730 if (action_decap_data == NULL)
731 action_decap_data = rte_malloc("decap_data",
732 sizeof(struct action_raw_decap_data), 0);
734 /* Check if allocation failed. */
735 if (action_decap_data == NULL)
736 rte_exit(EXIT_FAILURE, "No Memory available!");
738 *action_decap_data = (struct action_raw_decap_data) {
739 .conf = (struct rte_flow_action_raw_decap) {
740 .data = action_decap_data->data,
744 header = action_decap_data->data;
746 for (i = 0; i < RTE_DIM(headers); i++)
747 headers[i].funct(&header, decap_data, para);
749 action_decap_data->conf.size = header -
750 action_decap_data->data;
752 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
753 actions[actions_counter].conf = &action_decap_data->conf;
757 add_vxlan_encap(struct rte_flow_action *actions,
758 uint8_t actions_counter,
759 __rte_unused struct additional_para para)
761 static struct rte_flow_action_vxlan_encap vxlan_encap;
762 static struct rte_flow_item items[5];
763 static struct rte_flow_item_eth item_eth;
764 static struct rte_flow_item_ipv4 item_ipv4;
765 static struct rte_flow_item_udp item_udp;
766 static struct rte_flow_item_vxlan item_vxlan;
768 items[0].spec = &item_eth;
769 items[0].mask = &item_eth;
770 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
772 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
773 item_ipv4.hdr.dst_addr = RTE_IPV4(255, 255, 255, 255);
774 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
775 items[1].spec = &item_ipv4;
776 items[1].mask = &item_ipv4;
777 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
780 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
781 items[2].spec = &item_udp;
782 items[2].mask = &item_udp;
783 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
786 item_vxlan.vni[2] = 1;
787 items[3].spec = &item_vxlan;
788 items[3].mask = &item_vxlan;
789 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
791 items[4].type = RTE_FLOW_ITEM_TYPE_END;
793 vxlan_encap.definition = items;
795 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
796 actions[actions_counter].conf = &vxlan_encap;
800 add_vxlan_decap(struct rte_flow_action *actions,
801 uint8_t actions_counter,
802 __rte_unused struct additional_para para)
804 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
808 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
809 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
810 uint64_t encap_data, uint64_t decap_data)
812 struct additional_para additional_para_data;
813 uint8_t actions_counter = 0;
814 uint16_t hairpin_queues[hairpinq];
815 uint16_t queues[RXQ_NUM];
818 for (i = 0; i < RXQ_NUM; i++)
821 for (i = 0; i < hairpinq; i++)
822 hairpin_queues[i] = i + RXQ_NUM;
824 additional_para_data = (struct additional_para){
825 .queue = counter % RXQ_NUM,
826 .next_table = next_table,
828 .queues_number = RXQ_NUM,
830 .encap_data = encap_data,
831 .decap_data = decap_data,
835 additional_para_data.queues = hairpin_queues;
836 additional_para_data.queues_number = hairpinq;
837 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
840 static const struct actions_dict {
843 struct rte_flow_action *actions,
844 uint8_t actions_counter,
845 struct additional_para para
849 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
853 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
857 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
858 .funct = add_set_meta,
861 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
862 .funct = add_set_tag,
865 .mask = FLOW_ACTION_MASK(
866 RTE_FLOW_ACTION_TYPE_FLAG
871 .mask = FLOW_ACTION_MASK(
872 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
874 .funct = add_set_src_mac,
877 .mask = FLOW_ACTION_MASK(
878 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
880 .funct = add_set_dst_mac,
883 .mask = FLOW_ACTION_MASK(
884 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
886 .funct = add_set_src_ipv4,
889 .mask = FLOW_ACTION_MASK(
890 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
892 .funct = add_set_dst_ipv4,
895 .mask = FLOW_ACTION_MASK(
896 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
898 .funct = add_set_src_ipv6,
901 .mask = FLOW_ACTION_MASK(
902 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
904 .funct = add_set_dst_ipv6,
907 .mask = FLOW_ACTION_MASK(
908 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
910 .funct = add_set_src_tp,
913 .mask = FLOW_ACTION_MASK(
914 RTE_FLOW_ACTION_TYPE_SET_TP_DST
916 .funct = add_set_dst_tp,
919 .mask = FLOW_ACTION_MASK(
920 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
922 .funct = add_inc_tcp_ack,
925 .mask = FLOW_ACTION_MASK(
926 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
928 .funct = add_dec_tcp_ack,
931 .mask = FLOW_ACTION_MASK(
932 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
934 .funct = add_inc_tcp_seq,
937 .mask = FLOW_ACTION_MASK(
938 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
940 .funct = add_dec_tcp_seq,
943 .mask = FLOW_ACTION_MASK(
944 RTE_FLOW_ACTION_TYPE_SET_TTL
946 .funct = add_set_ttl,
949 .mask = FLOW_ACTION_MASK(
950 RTE_FLOW_ACTION_TYPE_DEC_TTL
952 .funct = add_dec_ttl,
955 .mask = FLOW_ACTION_MASK(
956 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
958 .funct = add_set_ipv4_dscp,
961 .mask = FLOW_ACTION_MASK(
962 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
964 .funct = add_set_ipv6_dscp,
967 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
971 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
975 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
979 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
983 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
987 .mask = HAIRPIN_QUEUE_ACTION,
991 .mask = HAIRPIN_RSS_ACTION,
995 .mask = FLOW_ACTION_MASK(
996 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
998 .funct = add_raw_encap,
1001 .mask = FLOW_ACTION_MASK(
1002 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1004 .funct = add_raw_decap,
1007 .mask = FLOW_ACTION_MASK(
1008 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1010 .funct = add_vxlan_encap,
1013 .mask = FLOW_ACTION_MASK(
1014 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1016 .funct = add_vxlan_decap,
1020 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1021 if (flow_actions[j] == 0)
1023 for (i = 0; i < RTE_DIM(actions_list); i++) {
1024 if ((flow_actions[j] &
1025 actions_list[i].mask) == 0)
1027 actions_list[i].funct(
1028 actions, actions_counter++,
1029 additional_para_data
1034 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;