1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
10 #include <rte_malloc.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
16 #include "actions_gen.h"
20 /* Storage for additional parameters for actions */
21 struct additional_para {
25 uint16_t queues_number;
31 /* Storage for struct rte_flow_action_raw_encap including external data. */
32 struct action_raw_encap_data {
33 struct rte_flow_action_raw_encap conf;
35 uint8_t preserve[128];
39 /* Storage for struct rte_flow_action_raw_decap including external data. */
40 struct action_raw_decap_data {
41 struct rte_flow_action_raw_decap conf;
46 /* Storage for struct rte_flow_action_rss including external data. */
47 struct action_rss_data {
48 struct rte_flow_action_rss conf;
54 add_mark(struct rte_flow_action *actions,
55 uint8_t actions_counter,
56 __rte_unused struct additional_para para)
58 static struct rte_flow_action_mark mark_action;
61 mark_action.id = MARK_ID;
64 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
65 actions[actions_counter].conf = &mark_action;
69 add_queue(struct rte_flow_action *actions,
70 uint8_t actions_counter,
71 struct additional_para para)
73 static struct rte_flow_action_queue queue_action;
76 queue_action.index = para.queue;
79 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
80 actions[actions_counter].conf = &queue_action;
84 add_jump(struct rte_flow_action *actions,
85 uint8_t actions_counter,
86 struct additional_para para)
88 static struct rte_flow_action_jump jump_action;
91 jump_action.group = para.next_table;
94 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
95 actions[actions_counter].conf = &jump_action;
99 add_rss(struct rte_flow_action *actions,
100 uint8_t actions_counter,
101 struct additional_para para)
103 static struct rte_flow_action_rss *rss_action;
104 static struct action_rss_data *rss_data;
108 if (rss_data == NULL)
109 rss_data = rte_malloc("rss_data",
110 sizeof(struct action_rss_data), 0);
112 if (rss_data == NULL)
113 rte_exit(EXIT_FAILURE, "No Memory available!");
115 *rss_data = (struct action_rss_data){
116 .conf = (struct rte_flow_action_rss){
117 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
119 .types = GET_RSS_HF(),
120 .key_len = sizeof(rss_data->key),
121 .queue_num = para.queues_number,
122 .key = rss_data->key,
123 .queue = rss_data->queue,
129 for (queue = 0; queue < para.queues_number; queue++)
130 rss_data->queue[queue] = para.queues[queue];
132 rss_action = &rss_data->conf;
134 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
135 actions[actions_counter].conf = rss_action;
139 add_set_meta(struct rte_flow_action *actions,
140 uint8_t actions_counter,
141 __rte_unused struct additional_para para)
143 static struct rte_flow_action_set_meta meta_action;
146 meta_action.data = RTE_BE32(META_DATA);
147 meta_action.mask = RTE_BE32(0xffffffff);
150 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
151 actions[actions_counter].conf = &meta_action;
155 add_set_tag(struct rte_flow_action *actions,
156 uint8_t actions_counter,
157 __rte_unused struct additional_para para)
159 static struct rte_flow_action_set_tag tag_action;
162 tag_action.data = RTE_BE32(META_DATA);
163 tag_action.mask = RTE_BE32(0xffffffff);
164 tag_action.index = TAG_INDEX;
167 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
168 actions[actions_counter].conf = &tag_action;
172 add_port_id(struct rte_flow_action *actions,
173 uint8_t actions_counter,
174 __rte_unused struct additional_para para)
176 static struct rte_flow_action_port_id port_id;
179 port_id.id = PORT_ID_DST;
182 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
183 actions[actions_counter].conf = &port_id;
187 add_drop(struct rte_flow_action *actions,
188 uint8_t actions_counter,
189 __rte_unused struct additional_para para)
191 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
195 add_count(struct rte_flow_action *actions,
196 uint8_t actions_counter,
197 __rte_unused struct additional_para para)
199 static struct rte_flow_action_count count_action;
201 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
202 actions[actions_counter].conf = &count_action;
206 add_set_src_mac(struct rte_flow_action *actions,
207 uint8_t actions_counter,
208 __rte_unused struct additional_para para)
210 static struct rte_flow_action_set_mac set_mac;
211 uint32_t mac = para.counter;
214 /* Mac address to be set is random each time */
215 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
216 set_mac.mac_addr[i] = mac & 0xff;
220 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
221 actions[actions_counter].conf = &set_mac;
225 add_set_dst_mac(struct rte_flow_action *actions,
226 uint8_t actions_counter,
227 __rte_unused struct additional_para para)
229 static struct rte_flow_action_set_mac set_mac;
230 uint32_t mac = para.counter;
233 /* Mac address to be set is random each time */
234 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
235 set_mac.mac_addr[i] = mac & 0xff;
239 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
240 actions[actions_counter].conf = &set_mac;
244 add_set_src_ipv4(struct rte_flow_action *actions,
245 uint8_t actions_counter,
246 __rte_unused struct additional_para para)
248 static struct rte_flow_action_set_ipv4 set_ipv4;
250 /* IPv4 value to be set is random each time */
251 set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1);
253 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
254 actions[actions_counter].conf = &set_ipv4;
258 add_set_dst_ipv4(struct rte_flow_action *actions,
259 uint8_t actions_counter,
260 __rte_unused struct additional_para para)
262 static struct rte_flow_action_set_ipv4 set_ipv4;
264 /* IPv4 value to be set is random each time */
265 set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1);
267 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
268 actions[actions_counter].conf = &set_ipv4;
272 add_set_src_ipv6(struct rte_flow_action *actions,
273 uint8_t actions_counter,
274 __rte_unused struct additional_para para)
276 static struct rte_flow_action_set_ipv6 set_ipv6;
277 uint32_t ipv6 = para.counter;
280 /* IPv6 value to set is random each time */
281 for (i = 0; i < 16; i++) {
282 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
286 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
287 actions[actions_counter].conf = &set_ipv6;
291 add_set_dst_ipv6(struct rte_flow_action *actions,
292 uint8_t actions_counter,
293 __rte_unused struct additional_para para)
295 static struct rte_flow_action_set_ipv6 set_ipv6;
296 uint32_t ipv6 = para.counter;
299 /* IPv6 value to set is random each time */
300 for (i = 0; i < 16; i++) {
301 set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
305 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
306 actions[actions_counter].conf = &set_ipv6;
310 add_set_src_tp(struct rte_flow_action *actions,
311 uint8_t actions_counter,
312 __rte_unused struct additional_para para)
314 static struct rte_flow_action_set_tp set_tp;
315 uint32_t tp = para.counter;
317 /* TP src port is random each time */
321 set_tp.port = RTE_BE16(tp & 0xffff);
323 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
324 actions[actions_counter].conf = &set_tp;
328 add_set_dst_tp(struct rte_flow_action *actions,
329 uint8_t actions_counter,
330 __rte_unused struct additional_para para)
332 static struct rte_flow_action_set_tp set_tp;
333 uint32_t tp = para.counter;
335 /* TP src port is random each time */
339 set_tp.port = RTE_BE16(tp & 0xffff);
341 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
342 actions[actions_counter].conf = &set_tp;
346 add_inc_tcp_ack(struct rte_flow_action *actions,
347 uint8_t actions_counter,
348 __rte_unused struct additional_para para)
350 static rte_be32_t value = RTE_BE32(1);
352 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
353 actions[actions_counter].conf = &value;
357 add_dec_tcp_ack(struct rte_flow_action *actions,
358 uint8_t actions_counter,
359 __rte_unused struct additional_para para)
361 static rte_be32_t value = RTE_BE32(1);
363 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
364 actions[actions_counter].conf = &value;
368 add_inc_tcp_seq(struct rte_flow_action *actions,
369 uint8_t actions_counter,
370 __rte_unused struct additional_para para)
372 static rte_be32_t value = RTE_BE32(1);
374 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
375 actions[actions_counter].conf = &value;
379 add_dec_tcp_seq(struct rte_flow_action *actions,
380 uint8_t actions_counter,
381 __rte_unused struct additional_para para)
383 static rte_be32_t value = RTE_BE32(1);
385 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
386 actions[actions_counter].conf = &value;
390 add_set_ttl(struct rte_flow_action *actions,
391 uint8_t actions_counter,
392 __rte_unused struct additional_para para)
394 static struct rte_flow_action_set_ttl set_ttl;
395 uint32_t ttl_value = para.counter;
397 /* Set ttl to random value each time */
398 while (ttl_value > 0xff)
399 ttl_value = ttl_value >> 8;
401 set_ttl.ttl_value = ttl_value;
403 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
404 actions[actions_counter].conf = &set_ttl;
408 add_dec_ttl(struct rte_flow_action *actions,
409 uint8_t actions_counter,
410 __rte_unused struct additional_para para)
412 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
416 add_set_ipv4_dscp(struct rte_flow_action *actions,
417 uint8_t actions_counter,
418 __rte_unused struct additional_para para)
420 static struct rte_flow_action_set_dscp set_dscp;
421 uint32_t dscp_value = para.counter;
423 /* Set dscp to random value each time */
424 while (dscp_value > 0xff)
425 dscp_value = dscp_value >> 8;
427 set_dscp.dscp = dscp_value;
429 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
430 actions[actions_counter].conf = &set_dscp;
434 add_set_ipv6_dscp(struct rte_flow_action *actions,
435 uint8_t actions_counter,
436 __rte_unused struct additional_para para)
438 static struct rte_flow_action_set_dscp set_dscp;
439 uint32_t dscp_value = para.counter;
441 /* Set dscp to random value each time */
442 while (dscp_value > 0xff)
443 dscp_value = dscp_value >> 8;
445 set_dscp.dscp = dscp_value;
447 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
448 actions[actions_counter].conf = &set_dscp;
452 add_flag(struct rte_flow_action *actions,
453 uint8_t actions_counter,
454 __rte_unused struct additional_para para)
456 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
460 add_ether_header(uint8_t **header, uint64_t data,
461 __rte_unused struct additional_para para)
463 struct rte_flow_item_eth eth_item;
465 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
468 memset(ð_item, 0, sizeof(struct rte_flow_item_eth));
469 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
470 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
471 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
472 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
473 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
474 eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
475 memcpy(*header, ð_item, sizeof(eth_item));
476 *header += sizeof(eth_item);
480 add_vlan_header(uint8_t **header, uint64_t data,
481 __rte_unused struct additional_para para)
483 struct rte_flow_item_vlan vlan_item;
486 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
489 vlan_value = VLAN_VALUE;
491 memset(&vlan_item, 0, sizeof(struct rte_flow_item_vlan));
492 vlan_item.tci = RTE_BE16(vlan_value);
494 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
495 vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
496 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
497 vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
498 memcpy(*header, &vlan_item, sizeof(vlan_item));
499 *header += sizeof(vlan_item);
503 add_ipv4_header(uint8_t **header, uint64_t data,
504 struct additional_para para)
506 struct rte_flow_item_ipv4 ipv4_item;
508 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
511 memset(&ipv4_item, 0, sizeof(struct rte_flow_item_ipv4));
512 ipv4_item.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
513 ipv4_item.hdr.dst_addr = RTE_BE32(para.counter);
514 ipv4_item.hdr.version_ihl = RTE_IPV4_VHL_DEF;
515 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
516 ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_UDP;
517 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
518 ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_GRE;
519 memcpy(*header, &ipv4_item, sizeof(ipv4_item));
520 *header += sizeof(ipv4_item);
524 add_ipv6_header(uint8_t **header, uint64_t data,
525 __rte_unused struct additional_para para)
527 struct rte_flow_item_ipv6 ipv6_item;
529 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
532 memset(&ipv6_item, 0, sizeof(struct rte_flow_item_ipv6));
533 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
534 ipv6_item.hdr.proto = RTE_IP_TYPE_UDP;
535 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
536 ipv6_item.hdr.proto = RTE_IP_TYPE_GRE;
537 memcpy(*header, &ipv6_item, sizeof(ipv6_item));
538 *header += sizeof(ipv6_item);
542 add_udp_header(uint8_t **header, uint64_t data,
543 __rte_unused struct additional_para para)
545 struct rte_flow_item_udp udp_item;
547 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
550 memset(&udp_item, 0, sizeof(struct rte_flow_item_udp));
551 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
552 udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
553 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
554 udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
555 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
556 udp_item.hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
557 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
558 udp_item.hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
559 memcpy(*header, &udp_item, sizeof(udp_item));
560 *header += sizeof(udp_item);
564 add_vxlan_header(uint8_t **header, uint64_t data,
565 struct additional_para para)
567 struct rte_flow_item_vxlan vxlan_item;
568 uint32_t vni_value = para.counter;
571 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
574 memset(&vxlan_item, 0, sizeof(struct rte_flow_item_vxlan));
576 for (i = 0; i < 3; i++)
577 vxlan_item.vni[2 - i] = vni_value >> (i * 8);
578 vxlan_item.flags = 0x8;
580 memcpy(*header, &vxlan_item, sizeof(vxlan_item));
581 *header += sizeof(vxlan_item);
585 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
586 struct additional_para para)
588 struct rte_flow_item_vxlan_gpe vxlan_gpe_item;
589 uint32_t vni_value = para.counter;
592 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
595 memset(&vxlan_gpe_item, 0, sizeof(struct rte_flow_item_vxlan_gpe));
597 for (i = 0; i < 3; i++)
598 vxlan_gpe_item.vni[2 - i] = vni_value >> (i * 8);
599 vxlan_gpe_item.flags = 0x0c;
601 memcpy(*header, &vxlan_gpe_item, sizeof(vxlan_gpe_item));
602 *header += sizeof(vxlan_gpe_item);
606 add_gre_header(uint8_t **header, uint64_t data,
607 __rte_unused struct additional_para para)
609 struct rte_flow_item_gre gre_item;
611 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
614 memset(&gre_item, 0, sizeof(struct rte_flow_item_gre));
616 gre_item.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB);
618 memcpy(*header, &gre_item, sizeof(gre_item));
619 *header += sizeof(gre_item);
623 add_geneve_header(uint8_t **header, uint64_t data,
624 struct additional_para para)
626 struct rte_flow_item_geneve geneve_item;
627 uint32_t vni_value = para.counter;
630 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
633 memset(&geneve_item, 0, sizeof(struct rte_flow_item_geneve));
635 for (i = 0; i < 3; i++)
636 geneve_item.vni[2 - i] = vni_value >> (i * 8);
638 memcpy(*header, &geneve_item, sizeof(geneve_item));
639 *header += sizeof(geneve_item);
643 add_gtp_header(uint8_t **header, uint64_t data,
644 struct additional_para para)
646 struct rte_flow_item_gtp gtp_item;
648 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
651 memset(>p_item, 0, sizeof(struct rte_flow_item_gtp));
653 gtp_item.teid = RTE_BE32(para.counter);
654 gtp_item.msg_type = 255;
656 memcpy(*header, >p_item, sizeof(gtp_item));
657 *header += sizeof(gtp_item);
660 static const struct encap_decap_headers {
664 struct additional_para para
667 {.funct = add_ether_header},
668 {.funct = add_vlan_header},
669 {.funct = add_ipv4_header},
670 {.funct = add_ipv6_header},
671 {.funct = add_udp_header},
672 {.funct = add_vxlan_header},
673 {.funct = add_vxlan_gpe_header},
674 {.funct = add_gre_header},
675 {.funct = add_geneve_header},
676 {.funct = add_gtp_header},
680 add_raw_encap(struct rte_flow_action *actions,
681 uint8_t actions_counter,
682 struct additional_para para)
684 static struct action_raw_encap_data *action_encap_data;
685 uint64_t encap_data = para.encap_data;
689 /* Avoid double allocation. */
690 if (action_encap_data == NULL)
691 action_encap_data = rte_malloc("encap_data",
692 sizeof(struct action_raw_encap_data), 0);
694 /* Check if allocation failed. */
695 if (action_encap_data == NULL)
696 rte_exit(EXIT_FAILURE, "No Memory available!");
698 *action_encap_data = (struct action_raw_encap_data) {
699 .conf = (struct rte_flow_action_raw_encap) {
700 .data = action_encap_data->data,
704 header = action_encap_data->data;
706 for (i = 0; i < RTE_DIM(headers); i++)
707 headers[i].funct(&header, encap_data, para);
709 action_encap_data->conf.size = header -
710 action_encap_data->data;
712 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
713 actions[actions_counter].conf = &action_encap_data->conf;
717 add_raw_decap(struct rte_flow_action *actions,
718 uint8_t actions_counter,
719 struct additional_para para)
721 static struct action_raw_decap_data *action_decap_data;
722 uint64_t decap_data = para.decap_data;
726 /* Avoid double allocation. */
727 if (action_decap_data == NULL)
728 action_decap_data = rte_malloc("decap_data",
729 sizeof(struct action_raw_decap_data), 0);
731 /* Check if allocation failed. */
732 if (action_decap_data == NULL)
733 rte_exit(EXIT_FAILURE, "No Memory available!");
735 *action_decap_data = (struct action_raw_decap_data) {
736 .conf = (struct rte_flow_action_raw_decap) {
737 .data = action_decap_data->data,
741 header = action_decap_data->data;
743 for (i = 0; i < RTE_DIM(headers); i++)
744 headers[i].funct(&header, decap_data, para);
746 action_decap_data->conf.size = header -
747 action_decap_data->data;
749 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
750 actions[actions_counter].conf = &action_decap_data->conf;
754 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
755 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
756 uint64_t encap_data, uint64_t decap_data)
758 struct additional_para additional_para_data;
759 uint8_t actions_counter = 0;
760 uint16_t hairpin_queues[hairpinq];
761 uint16_t queues[RXQ_NUM];
764 for (i = 0; i < RXQ_NUM; i++)
767 for (i = 0; i < hairpinq; i++)
768 hairpin_queues[i] = i + RXQ_NUM;
770 additional_para_data = (struct additional_para){
771 .queue = counter % RXQ_NUM,
772 .next_table = next_table,
774 .queues_number = RXQ_NUM,
776 .encap_data = encap_data,
777 .decap_data = decap_data,
781 additional_para_data.queues = hairpin_queues;
782 additional_para_data.queues_number = hairpinq;
783 additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
786 static const struct actions_dict {
789 struct rte_flow_action *actions,
790 uint8_t actions_counter,
791 struct additional_para para
795 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
799 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
803 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
804 .funct = add_set_meta,
807 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
808 .funct = add_set_tag,
811 .mask = FLOW_ACTION_MASK(
812 RTE_FLOW_ACTION_TYPE_FLAG
817 .mask = FLOW_ACTION_MASK(
818 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
820 .funct = add_set_src_mac,
823 .mask = FLOW_ACTION_MASK(
824 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
826 .funct = add_set_dst_mac,
829 .mask = FLOW_ACTION_MASK(
830 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
832 .funct = add_set_src_ipv4,
835 .mask = FLOW_ACTION_MASK(
836 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
838 .funct = add_set_dst_ipv4,
841 .mask = FLOW_ACTION_MASK(
842 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
844 .funct = add_set_src_ipv6,
847 .mask = FLOW_ACTION_MASK(
848 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
850 .funct = add_set_dst_ipv6,
853 .mask = FLOW_ACTION_MASK(
854 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
856 .funct = add_set_src_tp,
859 .mask = FLOW_ACTION_MASK(
860 RTE_FLOW_ACTION_TYPE_SET_TP_DST
862 .funct = add_set_dst_tp,
865 .mask = FLOW_ACTION_MASK(
866 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
868 .funct = add_inc_tcp_ack,
871 .mask = FLOW_ACTION_MASK(
872 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
874 .funct = add_dec_tcp_ack,
877 .mask = FLOW_ACTION_MASK(
878 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
880 .funct = add_inc_tcp_seq,
883 .mask = FLOW_ACTION_MASK(
884 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
886 .funct = add_dec_tcp_seq,
889 .mask = FLOW_ACTION_MASK(
890 RTE_FLOW_ACTION_TYPE_SET_TTL
892 .funct = add_set_ttl,
895 .mask = FLOW_ACTION_MASK(
896 RTE_FLOW_ACTION_TYPE_DEC_TTL
898 .funct = add_dec_ttl,
901 .mask = FLOW_ACTION_MASK(
902 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
904 .funct = add_set_ipv4_dscp,
907 .mask = FLOW_ACTION_MASK(
908 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
910 .funct = add_set_ipv6_dscp,
913 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
917 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
921 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
925 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
929 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
933 .mask = HAIRPIN_QUEUE_ACTION,
937 .mask = HAIRPIN_RSS_ACTION,
941 .mask = FLOW_ACTION_MASK(
942 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
944 .funct = add_raw_encap,
947 .mask = FLOW_ACTION_MASK(
948 RTE_FLOW_ACTION_TYPE_RAW_DECAP
950 .funct = add_raw_decap,
954 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
955 if (flow_actions[j] == 0)
957 for (i = 0; i < RTE_DIM(actions_list); i++) {
958 if ((flow_actions[j] &
959 actions_list[i].mask) == 0)
961 actions_list[i].funct(
962 actions, actions_counter++,
968 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;