1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include "actions_gen.h"
21 /* Storage for additional parameters for actions */
22 struct additional_para {
26 uint16_t queues_number;
32 /* Storage for struct rte_flow_action_raw_encap including external data. */
33 struct action_raw_encap_data {
34 struct rte_flow_action_raw_encap conf;
36 uint8_t preserve[128];
40 /* Storage for struct rte_flow_action_raw_decap including external data. */
41 struct action_raw_decap_data {
42 struct rte_flow_action_raw_decap conf;
47 /* Storage for struct rte_flow_action_rss including external data. */
48 struct action_rss_data {
49 struct rte_flow_action_rss conf;
55 add_mark(struct rte_flow_action *actions,
56 uint8_t actions_counter,
57 __rte_unused struct additional_para para)
59 static struct rte_flow_action_mark mark_action;
62 mark_action.id = MARK_ID;
65 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
66 actions[actions_counter].conf = &mark_action;
70 add_queue(struct rte_flow_action *actions,
71 uint8_t actions_counter,
72 struct additional_para para)
74 static struct rte_flow_action_queue queue_action;
77 queue_action.index = para.queue;
80 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
81 actions[actions_counter].conf = &queue_action;
85 add_jump(struct rte_flow_action *actions,
86 uint8_t actions_counter,
87 struct additional_para para)
89 static struct rte_flow_action_jump jump_action;
92 jump_action.group = para.next_table;
95 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
96 actions[actions_counter].conf = &jump_action;
100 add_rss(struct rte_flow_action *actions,
101 uint8_t actions_counter,
102 struct additional_para para)
104 static struct rte_flow_action_rss *rss_action;
105 static struct action_rss_data *rss_data;
109 if (rss_data == NULL)
110 rss_data = rte_malloc("rss_data",
111 sizeof(struct action_rss_data), 0);
113 if (rss_data == NULL)
114 rte_exit(EXIT_FAILURE, "No Memory available!");
116 *rss_data = (struct action_rss_data){
117 .conf = (struct rte_flow_action_rss){
118 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
120 .types = GET_RSS_HF(),
121 .key_len = sizeof(rss_data->key),
122 .queue_num = para.queues_number,
123 .key = rss_data->key,
124 .queue = rss_data->queue,
130 for (queue = 0; queue < para.queues_number; queue++)
131 rss_data->queue[queue] = para.queues[queue];
133 rss_action = &rss_data->conf;
135 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
136 actions[actions_counter].conf = rss_action;
140 add_set_meta(struct rte_flow_action *actions,
141 uint8_t actions_counter,
142 __rte_unused struct additional_para para)
144 static struct rte_flow_action_set_meta meta_action;
147 meta_action.data = RTE_BE32(META_DATA);
148 meta_action.mask = RTE_BE32(0xffffffff);
151 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
152 actions[actions_counter].conf = &meta_action;
156 add_set_tag(struct rte_flow_action *actions,
157 uint8_t actions_counter,
158 __rte_unused struct additional_para para)
160 static struct rte_flow_action_set_tag tag_action;
163 tag_action.data = RTE_BE32(META_DATA);
164 tag_action.mask = RTE_BE32(0xffffffff);
165 tag_action.index = TAG_INDEX;
168 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
169 actions[actions_counter].conf = &tag_action;
173 add_port_id(struct rte_flow_action *actions,
174 uint8_t actions_counter,
175 __rte_unused struct additional_para para)
177 static struct rte_flow_action_port_id port_id;
180 port_id.id = PORT_ID_DST;
183 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
184 actions[actions_counter].conf = &port_id;
188 add_drop(struct rte_flow_action *actions,
189 uint8_t actions_counter,
190 __rte_unused struct additional_para para)
192 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
196 add_count(struct rte_flow_action *actions,
197 uint8_t actions_counter,
198 __rte_unused struct additional_para para)
200 static struct rte_flow_action_count count_action;
202 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
203 actions[actions_counter].conf = &count_action;
207 add_set_src_mac(struct rte_flow_action *actions,
208 uint8_t actions_counter,
209 __rte_unused struct additional_para para)
211 static struct rte_flow_action_set_mac set_mac;
212 uint32_t mac = para.counter;
215 /* Mac address to be set is random each time */
216 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
217 set_mac.mac_addr[i] = mac & 0xff;
221 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
222 actions[actions_counter].conf = &set_mac;
226 add_set_dst_mac(struct rte_flow_action *actions,
227 uint8_t actions_counter,
228 __rte_unused struct additional_para para)
230 static struct rte_flow_action_set_mac set_mac;
231 uint32_t mac = para.counter;
234 /* Mac address to be set is random each time */
235 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
236 set_mac.mac_addr[i] = mac & 0xff;
240 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
241 actions[actions_counter].conf = &set_mac;
245 add_set_src_ipv4(struct rte_flow_action *actions,
246 uint8_t actions_counter,
247 __rte_unused struct additional_para para)
249 static struct rte_flow_action_set_ipv4 set_ipv4;
251 /* IPv4 value to be set is random each time */
252 set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1);
254 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
255 actions[actions_counter].conf = &set_ipv4;
259 add_set_dst_ipv4(struct rte_flow_action *actions,
260 uint8_t actions_counter,
261 __rte_unused struct additional_para para)
263 static struct rte_flow_action_set_ipv4 set_ipv4;
265 /* IPv4 value to be set is random each time */
266 set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1);
268 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
269 actions[actions_counter].conf = &set_ipv4;
273 add_set_src_ipv6(struct rte_flow_action *actions,
274 uint8_t actions_counter,
275 __rte_unused struct additional_para para)
277 static struct rte_flow_action_set_ipv6 set_ipv6;
278 uint32_t ipv6 = para.counter;
281 /* IPv6 value to set is random each time */
282 for (i = 0; i < 16; i++) {
283 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
287 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
288 actions[actions_counter].conf = &set_ipv6;
292 add_set_dst_ipv6(struct rte_flow_action *actions,
293 uint8_t actions_counter,
294 __rte_unused struct additional_para para)
296 static struct rte_flow_action_set_ipv6 set_ipv6;
297 uint32_t ipv6 = para.counter;
300 /* IPv6 value to set is random each time */
301 for (i = 0; i < 16; i++) {
302 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
306 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
307 actions[actions_counter].conf = &set_ipv6;
311 add_set_src_tp(struct rte_flow_action *actions,
312 uint8_t actions_counter,
313 __rte_unused struct additional_para para)
315 static struct rte_flow_action_set_tp set_tp;
316 uint32_t tp = para.counter;
318 /* TP src port is random each time */
322 set_tp.port = RTE_BE16(tp & 0xffff);
324 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
325 actions[actions_counter].conf = &set_tp;
329 add_set_dst_tp(struct rte_flow_action *actions,
330 uint8_t actions_counter,
331 __rte_unused struct additional_para para)
333 static struct rte_flow_action_set_tp set_tp;
334 uint32_t tp = para.counter;
336 /* TP src port is random each time */
340 set_tp.port = RTE_BE16(tp & 0xffff);
342 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
343 actions[actions_counter].conf = &set_tp;
347 add_inc_tcp_ack(struct rte_flow_action *actions,
348 uint8_t actions_counter,
349 __rte_unused struct additional_para para)
351 static rte_be32_t value = RTE_BE32(1);
353 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
354 actions[actions_counter].conf = &value;
358 add_dec_tcp_ack(struct rte_flow_action *actions,
359 uint8_t actions_counter,
360 __rte_unused struct additional_para para)
362 static rte_be32_t value = RTE_BE32(1);
364 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
365 actions[actions_counter].conf = &value;
369 add_inc_tcp_seq(struct rte_flow_action *actions,
370 uint8_t actions_counter,
371 __rte_unused struct additional_para para)
373 static rte_be32_t value = RTE_BE32(1);
375 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
376 actions[actions_counter].conf = &value;
380 add_dec_tcp_seq(struct rte_flow_action *actions,
381 uint8_t actions_counter,
382 __rte_unused struct additional_para para)
384 static rte_be32_t value = RTE_BE32(1);
386 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
387 actions[actions_counter].conf = &value;
391 add_set_ttl(struct rte_flow_action *actions,
392 uint8_t actions_counter,
393 __rte_unused struct additional_para para)
395 static struct rte_flow_action_set_ttl set_ttl;
396 uint32_t ttl_value = para.counter;
398 /* Set ttl to random value each time */
399 while (ttl_value > 0xff)
400 ttl_value = ttl_value >> 8;
402 set_ttl.ttl_value = ttl_value;
404 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
405 actions[actions_counter].conf = &set_ttl;
409 add_dec_ttl(struct rte_flow_action *actions,
410 uint8_t actions_counter,
411 __rte_unused struct additional_para para)
413 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
417 add_set_ipv4_dscp(struct rte_flow_action *actions,
418 uint8_t actions_counter,
419 __rte_unused struct additional_para para)
421 static struct rte_flow_action_set_dscp set_dscp;
422 uint32_t dscp_value = para.counter;
424 /* Set dscp to random value each time */
425 while (dscp_value > 0xff)
426 dscp_value = dscp_value >> 8;
428 set_dscp.dscp = dscp_value;
430 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
431 actions[actions_counter].conf = &set_dscp;
435 add_set_ipv6_dscp(struct rte_flow_action *actions,
436 uint8_t actions_counter,
437 __rte_unused struct additional_para para)
439 static struct rte_flow_action_set_dscp set_dscp;
440 uint32_t dscp_value = para.counter;
442 /* Set dscp to random value each time */
443 while (dscp_value > 0xff)
444 dscp_value = dscp_value >> 8;
446 set_dscp.dscp = dscp_value;
448 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
449 actions[actions_counter].conf = &set_dscp;
453 add_flag(struct rte_flow_action *actions,
454 uint8_t actions_counter,
455 __rte_unused struct additional_para para)
457 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
461 add_ether_header(uint8_t **header, uint64_t data,
462 __rte_unused struct additional_para para)
464 struct rte_flow_item_eth eth_item;
466 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
469 memset(ð_item, 0, sizeof(struct rte_flow_item_eth));
470 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
471 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
472 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
473 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
474 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
475 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
476 memcpy(*header, ð_item, sizeof(eth_item));
477 *header += sizeof(eth_item);
481 add_vlan_header(uint8_t **header, uint64_t data,
482 __rte_unused struct additional_para para)
484 struct rte_flow_item_vlan vlan_item;
487 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
490 vlan_value = VLAN_VALUE;
492 memset(&vlan_item, 0, sizeof(struct rte_flow_item_vlan));
493 vlan_item.tci = RTE_BE16(vlan_value);
495 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
496 vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
497 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
498 vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
499 memcpy(*header, &vlan_item, sizeof(vlan_item));
500 *header += sizeof(vlan_item);
504 add_ipv4_header(uint8_t **header, uint64_t data,
505 struct additional_para para)
507 struct rte_flow_item_ipv4 ipv4_item;
509 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
512 memset(&ipv4_item, 0, sizeof(struct rte_flow_item_ipv4));
513 ipv4_item.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
514 ipv4_item.hdr.dst_addr = RTE_BE32(para.counter);
515 ipv4_item.hdr.version_ihl = RTE_IPV4_VHL_DEF;
516 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
517 ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_UDP;
518 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
519 ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_GRE;
520 memcpy(*header, &ipv4_item, sizeof(ipv4_item));
521 *header += sizeof(ipv4_item);
525 add_ipv6_header(uint8_t **header, uint64_t data,
526 __rte_unused struct additional_para para)
528 struct rte_flow_item_ipv6 ipv6_item;
530 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
533 memset(&ipv6_item, 0, sizeof(struct rte_flow_item_ipv6));
534 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
535 ipv6_item.hdr.proto = RTE_IP_TYPE_UDP;
536 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
537 ipv6_item.hdr.proto = RTE_IP_TYPE_GRE;
538 memcpy(*header, &ipv6_item, sizeof(ipv6_item));
539 *header += sizeof(ipv6_item);
543 add_udp_header(uint8_t **header, uint64_t data,
544 __rte_unused struct additional_para para)
546 struct rte_flow_item_udp udp_item;
548 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
551 memset(&udp_item, 0, sizeof(struct rte_flow_item_udp));
552 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
553 udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
554 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
555 udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
556 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
557 udp_item.hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
558 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
559 udp_item.hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
560 memcpy(*header, &udp_item, sizeof(udp_item));
561 *header += sizeof(udp_item);
565 add_vxlan_header(uint8_t **header, uint64_t data,
566 struct additional_para para)
568 struct rte_flow_item_vxlan vxlan_item;
569 uint32_t vni_value = para.counter;
572 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
575 memset(&vxlan_item, 0, sizeof(struct rte_flow_item_vxlan));
577 for (i = 0; i < 3; i++)
578 vxlan_item.vni[2 - i] = vni_value >> (i * 8);
579 vxlan_item.flags = 0x8;
581 memcpy(*header, &vxlan_item, sizeof(vxlan_item));
582 *header += sizeof(vxlan_item);
586 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
587 struct additional_para para)
589 struct rte_flow_item_vxlan_gpe vxlan_gpe_item;
590 uint32_t vni_value = para.counter;
593 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
596 memset(&vxlan_gpe_item, 0, sizeof(struct rte_flow_item_vxlan_gpe));
598 for (i = 0; i < 3; i++)
599 vxlan_gpe_item.vni[2 - i] = vni_value >> (i * 8);
600 vxlan_gpe_item.flags = 0x0c;
602 memcpy(*header, &vxlan_gpe_item, sizeof(vxlan_gpe_item));
603 *header += sizeof(vxlan_gpe_item);
607 add_gre_header(uint8_t **header, uint64_t data,
608 __rte_unused struct additional_para para)
610 struct rte_flow_item_gre gre_item;
612 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
615 memset(&gre_item, 0, sizeof(struct rte_flow_item_gre));
617 gre_item.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB);
619 memcpy(*header, &gre_item, sizeof(gre_item));
620 *header += sizeof(gre_item);
624 add_geneve_header(uint8_t **header, uint64_t data,
625 struct additional_para para)
627 struct rte_flow_item_geneve geneve_item;
628 uint32_t vni_value = para.counter;
631 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
634 memset(&geneve_item, 0, sizeof(struct rte_flow_item_geneve));
636 for (i = 0; i < 3; i++)
637 geneve_item.vni[2 - i] = vni_value >> (i * 8);
639 memcpy(*header, &geneve_item, sizeof(geneve_item));
640 *header += sizeof(geneve_item);
644 add_gtp_header(uint8_t **header, uint64_t data,
645 struct additional_para para)
647 struct rte_flow_item_gtp gtp_item;
649 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
652 memset(>p_item, 0, sizeof(struct rte_flow_item_gtp));
654 gtp_item.teid = RTE_BE32(para.counter);
655 gtp_item.msg_type = 255;
657 memcpy(*header, >p_item, sizeof(gtp_item));
658 *header += sizeof(gtp_item);
661 static const struct encap_decap_headers {
665 struct additional_para para
668 {.funct = add_ether_header},
669 {.funct = add_vlan_header},
670 {.funct = add_ipv4_header},
671 {.funct = add_ipv6_header},
672 {.funct = add_udp_header},
673 {.funct = add_vxlan_header},
674 {.funct = add_vxlan_gpe_header},
675 {.funct = add_gre_header},
676 {.funct = add_geneve_header},
677 {.funct = add_gtp_header},
681 add_raw_encap(struct rte_flow_action *actions,
682 uint8_t actions_counter,
683 struct additional_para para)
685 static struct action_raw_encap_data *action_encap_data;
686 uint64_t encap_data = para.encap_data;
690 /* Avoid double allocation. */
691 if (action_encap_data == NULL)
692 action_encap_data = rte_malloc("encap_data",
693 sizeof(struct action_raw_encap_data), 0);
695 /* Check if allocation failed. */
696 if (action_encap_data == NULL)
697 rte_exit(EXIT_FAILURE, "No Memory available!");
699 *action_encap_data = (struct action_raw_encap_data) {
700 .conf = (struct rte_flow_action_raw_encap) {
701 .data = action_encap_data->data,
705 header = action_encap_data->data;
707 for (i = 0; i < RTE_DIM(headers); i++)
708 headers[i].funct(&header, encap_data, para);
710 action_encap_data->conf.size = header -
711 action_encap_data->data;
713 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
714 actions[actions_counter].conf = &action_encap_data->conf;
718 add_raw_decap(struct rte_flow_action *actions,
719 uint8_t actions_counter,
720 struct additional_para para)
722 static struct action_raw_decap_data *action_decap_data;
723 uint64_t decap_data = para.decap_data;
727 /* Avoid double allocation. */
728 if (action_decap_data == NULL)
729 action_decap_data = rte_malloc("decap_data",
730 sizeof(struct action_raw_decap_data), 0);
732 /* Check if allocation failed. */
733 if (action_decap_data == NULL)
734 rte_exit(EXIT_FAILURE, "No Memory available!");
736 *action_decap_data = (struct action_raw_decap_data) {
737 .conf = (struct rte_flow_action_raw_decap) {
738 .data = action_decap_data->data,
742 header = action_decap_data->data;
744 for (i = 0; i < RTE_DIM(headers); i++)
745 headers[i].funct(&header, decap_data, para);
747 action_decap_data->conf.size = header -
748 action_decap_data->data;
750 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
751 actions[actions_counter].conf = &action_decap_data->conf;
755 add_vxlan_encap(struct rte_flow_action *actions,
756 uint8_t actions_counter,
757 __rte_unused struct additional_para para)
759 static struct rte_flow_action_vxlan_encap vxlan_encap;
760 static struct rte_flow_item items[5];
761 static struct rte_flow_item_eth item_eth;
762 static struct rte_flow_item_ipv4 item_ipv4;
763 static struct rte_flow_item_udp item_udp;
764 static struct rte_flow_item_vxlan item_vxlan;
766 items[0].spec = &item_eth;
767 items[0].mask = &item_eth;
768 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
770 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
771 item_ipv4.hdr.dst_addr = RTE_IPV4(255, 255, 255, 255);
772 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
773 items[1].spec = &item_ipv4;
774 items[1].mask = &item_ipv4;
775 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
778 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
779 items[2].spec = &item_udp;
780 items[2].mask = &item_udp;
781 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
784 item_vxlan.vni[2] = 1;
785 items[3].spec = &item_vxlan;
786 items[3].mask = &item_vxlan;
787 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
789 items[4].type = RTE_FLOW_ITEM_TYPE_END;
791 vxlan_encap.definition = items;
793 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
794 actions[actions_counter].conf = &vxlan_encap;
798 add_vxlan_decap(struct rte_flow_action *actions,
799 uint8_t actions_counter,
800 __rte_unused struct additional_para para)
802 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
806 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
807 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
808 uint64_t encap_data, uint64_t decap_data)
810 struct additional_para additional_para_data;
811 uint8_t actions_counter = 0;
812 uint16_t hairpin_queues[hairpinq];
813 uint16_t queues[RXQ_NUM];
816 for (i = 0; i < RXQ_NUM; i++)
819 for (i = 0; i < hairpinq; i++)
820 hairpin_queues[i] = i + RXQ_NUM;
822 additional_para_data = (struct additional_para){
823 .queue = counter % RXQ_NUM,
824 .next_table = next_table,
826 .queues_number = RXQ_NUM,
828 .encap_data = encap_data,
829 .decap_data = decap_data,
833 additional_para_data.queues = hairpin_queues;
834 additional_para_data.queues_number = hairpinq;
835 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
838 static const struct actions_dict {
841 struct rte_flow_action *actions,
842 uint8_t actions_counter,
843 struct additional_para para
847 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
851 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
855 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
856 .funct = add_set_meta,
859 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
860 .funct = add_set_tag,
863 .mask = FLOW_ACTION_MASK(
864 RTE_FLOW_ACTION_TYPE_FLAG
869 .mask = FLOW_ACTION_MASK(
870 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
872 .funct = add_set_src_mac,
875 .mask = FLOW_ACTION_MASK(
876 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
878 .funct = add_set_dst_mac,
881 .mask = FLOW_ACTION_MASK(
882 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
884 .funct = add_set_src_ipv4,
887 .mask = FLOW_ACTION_MASK(
888 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
890 .funct = add_set_dst_ipv4,
893 .mask = FLOW_ACTION_MASK(
894 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
896 .funct = add_set_src_ipv6,
899 .mask = FLOW_ACTION_MASK(
900 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
902 .funct = add_set_dst_ipv6,
905 .mask = FLOW_ACTION_MASK(
906 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
908 .funct = add_set_src_tp,
911 .mask = FLOW_ACTION_MASK(
912 RTE_FLOW_ACTION_TYPE_SET_TP_DST
914 .funct = add_set_dst_tp,
917 .mask = FLOW_ACTION_MASK(
918 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
920 .funct = add_inc_tcp_ack,
923 .mask = FLOW_ACTION_MASK(
924 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
926 .funct = add_dec_tcp_ack,
929 .mask = FLOW_ACTION_MASK(
930 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
932 .funct = add_inc_tcp_seq,
935 .mask = FLOW_ACTION_MASK(
936 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
938 .funct = add_dec_tcp_seq,
941 .mask = FLOW_ACTION_MASK(
942 RTE_FLOW_ACTION_TYPE_SET_TTL
944 .funct = add_set_ttl,
947 .mask = FLOW_ACTION_MASK(
948 RTE_FLOW_ACTION_TYPE_DEC_TTL
950 .funct = add_dec_ttl,
953 .mask = FLOW_ACTION_MASK(
954 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
956 .funct = add_set_ipv4_dscp,
959 .mask = FLOW_ACTION_MASK(
960 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
962 .funct = add_set_ipv6_dscp,
965 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
969 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
973 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
977 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
981 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
985 .mask = HAIRPIN_QUEUE_ACTION,
989 .mask = HAIRPIN_RSS_ACTION,
993 .mask = FLOW_ACTION_MASK(
994 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
996 .funct = add_raw_encap,
999 .mask = FLOW_ACTION_MASK(
1000 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1002 .funct = add_raw_decap,
1005 .mask = FLOW_ACTION_MASK(
1006 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1008 .funct = add_vxlan_encap,
1011 .mask = FLOW_ACTION_MASK(
1012 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1014 .funct = add_vxlan_decap,
1018 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1019 if (flow_actions[j] == 0)
1021 for (i = 0; i < RTE_DIM(actions_list); i++) {
1022 if ((flow_actions[j] &
1023 actions_list[i].mask) == 0)
1025 actions_list[i].funct(
1026 actions, actions_counter++,
1027 additional_para_data
1032 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;