1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
11 #include <rte_string_fns.h>
13 #include <rte_flow_driver.h>
14 #include <rte_tailq.h>
16 #include "rte_eth_softnic_internals.h"
17 #include "rte_eth_softnic.h"
19 #define rte_htons rte_cpu_to_be_16
20 #define rte_htonl rte_cpu_to_be_32
22 #define rte_ntohs rte_be_to_cpu_16
23 #define rte_ntohl rte_be_to_cpu_32
25 static struct rte_flow *
26 softnic_flow_find(struct softnic_table *table,
27 struct softnic_table_rule_match *rule_match)
29 struct rte_flow *flow;
31 TAILQ_FOREACH(flow, &table->flows, node)
32 if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
39 flow_attr_map_set(struct pmd_internals *softnic,
42 const char *pipeline_name,
45 struct pipeline *pipeline;
46 struct flow_attr_map *map;
48 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
49 pipeline_name == NULL)
52 pipeline = softnic_pipeline_find(softnic, pipeline_name);
53 if (pipeline == NULL ||
54 table_id >= pipeline->n_tables)
57 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
58 &softnic->flow.egress_map[group_id];
59 strlcpy(map->pipeline_name, pipeline_name, sizeof(map->pipeline_name));
60 map->table_id = table_id;
66 struct flow_attr_map *
67 flow_attr_map_get(struct pmd_internals *softnic,
71 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
74 return (ingress) ? &softnic->flow.ingress_map[group_id] :
75 &softnic->flow.egress_map[group_id];
79 flow_pipeline_table_get(struct pmd_internals *softnic,
80 const struct rte_flow_attr *attr,
81 const char **pipeline_name,
83 struct rte_flow_error *error)
85 struct flow_attr_map *map;
88 return rte_flow_error_set(error,
90 RTE_FLOW_ERROR_TYPE_ATTR,
94 if (!attr->ingress && !attr->egress)
95 return rte_flow_error_set(error,
97 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
99 "Ingress/egress not specified");
101 if (attr->ingress && attr->egress)
102 return rte_flow_error_set(error,
104 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
106 "Setting both ingress and egress is not allowed");
108 map = flow_attr_map_get(softnic,
113 return rte_flow_error_set(error,
115 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
120 *pipeline_name = map->pipeline_name;
123 *table_id = map->table_id;
129 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
130 struct rte_flow_item_eth eth;
131 struct rte_flow_item_vlan vlan;
132 struct rte_flow_item_ipv4 ipv4;
133 struct rte_flow_item_ipv6 ipv6;
134 struct rte_flow_item_icmp icmp;
135 struct rte_flow_item_udp udp;
136 struct rte_flow_item_tcp tcp;
137 struct rte_flow_item_sctp sctp;
138 struct rte_flow_item_vxlan vxlan;
139 struct rte_flow_item_e_tag e_tag;
140 struct rte_flow_item_nvgre nvgre;
141 struct rte_flow_item_mpls mpls;
142 struct rte_flow_item_gre gre;
143 struct rte_flow_item_gtp gtp;
144 struct rte_flow_item_esp esp;
145 struct rte_flow_item_geneve geneve;
146 struct rte_flow_item_vxlan_gpe vxlan_gpe;
147 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
148 struct rte_flow_item_ipv6_ext ipv6_ext;
149 struct rte_flow_item_icmp6 icmp6;
150 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
151 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
152 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
153 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
154 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
157 static const union flow_item flow_item_raw_mask;
160 flow_item_is_proto(enum rte_flow_item_type type,
165 case RTE_FLOW_ITEM_TYPE_RAW:
166 *mask = &flow_item_raw_mask;
167 *size = sizeof(flow_item_raw_mask);
170 case RTE_FLOW_ITEM_TYPE_ETH:
171 *mask = &rte_flow_item_eth_mask;
172 *size = sizeof(struct rte_ether_hdr);
175 case RTE_FLOW_ITEM_TYPE_VLAN:
176 *mask = &rte_flow_item_vlan_mask;
177 *size = sizeof(struct rte_vlan_hdr);
180 case RTE_FLOW_ITEM_TYPE_IPV4:
181 *mask = &rte_flow_item_ipv4_mask;
182 *size = sizeof(struct rte_ipv4_hdr);
185 case RTE_FLOW_ITEM_TYPE_IPV6:
186 *mask = &rte_flow_item_ipv6_mask;
187 *size = sizeof(struct rte_ipv6_hdr);
190 case RTE_FLOW_ITEM_TYPE_ICMP:
191 *mask = &rte_flow_item_icmp_mask;
192 *size = sizeof(struct rte_flow_item_icmp);
195 case RTE_FLOW_ITEM_TYPE_UDP:
196 *mask = &rte_flow_item_udp_mask;
197 *size = sizeof(struct rte_flow_item_udp);
200 case RTE_FLOW_ITEM_TYPE_TCP:
201 *mask = &rte_flow_item_tcp_mask;
202 *size = sizeof(struct rte_flow_item_tcp);
205 case RTE_FLOW_ITEM_TYPE_SCTP:
206 *mask = &rte_flow_item_sctp_mask;
207 *size = sizeof(struct rte_flow_item_sctp);
210 case RTE_FLOW_ITEM_TYPE_VXLAN:
211 *mask = &rte_flow_item_vxlan_mask;
212 *size = sizeof(struct rte_flow_item_vxlan);
215 case RTE_FLOW_ITEM_TYPE_E_TAG:
216 *mask = &rte_flow_item_e_tag_mask;
217 *size = sizeof(struct rte_flow_item_e_tag);
220 case RTE_FLOW_ITEM_TYPE_NVGRE:
221 *mask = &rte_flow_item_nvgre_mask;
222 *size = sizeof(struct rte_flow_item_nvgre);
225 case RTE_FLOW_ITEM_TYPE_MPLS:
226 *mask = &rte_flow_item_mpls_mask;
227 *size = sizeof(struct rte_flow_item_mpls);
230 case RTE_FLOW_ITEM_TYPE_GRE:
231 *mask = &rte_flow_item_gre_mask;
232 *size = sizeof(struct rte_flow_item_gre);
235 case RTE_FLOW_ITEM_TYPE_GTP:
236 case RTE_FLOW_ITEM_TYPE_GTPC:
237 case RTE_FLOW_ITEM_TYPE_GTPU:
238 *mask = &rte_flow_item_gtp_mask;
239 *size = sizeof(struct rte_flow_item_gtp);
242 case RTE_FLOW_ITEM_TYPE_ESP:
243 *mask = &rte_flow_item_esp_mask;
244 *size = sizeof(struct rte_flow_item_esp);
247 case RTE_FLOW_ITEM_TYPE_GENEVE:
248 *mask = &rte_flow_item_geneve_mask;
249 *size = sizeof(struct rte_flow_item_geneve);
252 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
253 *mask = &rte_flow_item_vxlan_gpe_mask;
254 *size = sizeof(struct rte_flow_item_vxlan_gpe);
257 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
258 *mask = &rte_flow_item_arp_eth_ipv4_mask;
259 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
262 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
263 *mask = &rte_flow_item_ipv6_ext_mask;
264 *size = sizeof(struct rte_flow_item_ipv6_ext);
267 case RTE_FLOW_ITEM_TYPE_ICMP6:
268 *mask = &rte_flow_item_icmp6_mask;
269 *size = sizeof(struct rte_flow_item_icmp6);
272 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
273 *mask = &rte_flow_item_icmp6_nd_ns_mask;
274 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
277 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
278 *mask = &rte_flow_item_icmp6_nd_na_mask;
279 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
282 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
283 *mask = &rte_flow_item_icmp6_nd_opt_mask;
284 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
287 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
288 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
289 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
292 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
293 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
294 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
297 default: return 0; /* FALSE */
302 flow_item_raw_preprocess(const struct rte_flow_item *item,
303 union flow_item *item_spec,
304 union flow_item *item_mask,
307 struct rte_flow_error *error)
309 const struct rte_flow_item_raw *item_raw_spec = item->spec;
310 const struct rte_flow_item_raw *item_raw_mask = item->mask;
311 const uint8_t *pattern;
312 const uint8_t *pattern_mask;
313 uint8_t *spec = (uint8_t *)item_spec;
314 uint8_t *mask = (uint8_t *)item_mask;
315 size_t pattern_length, pattern_offset, i;
319 return rte_flow_error_set(error,
321 RTE_FLOW_ERROR_TYPE_ITEM,
323 "RAW: Null specification");
326 return rte_flow_error_set(error,
328 RTE_FLOW_ERROR_TYPE_ITEM,
330 "RAW: Range not allowed (last must be NULL)");
332 if (item_raw_spec->relative == 0)
333 return rte_flow_error_set(error,
335 RTE_FLOW_ERROR_TYPE_ITEM,
337 "RAW: Absolute offset not supported");
339 if (item_raw_spec->search)
340 return rte_flow_error_set(error,
342 RTE_FLOW_ERROR_TYPE_ITEM,
344 "RAW: Search not supported");
346 if (item_raw_spec->offset < 0)
347 return rte_flow_error_set(error,
348 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
350 "RAW: Negative offset not supported");
352 if (item_raw_spec->length == 0)
353 return rte_flow_error_set(error,
355 RTE_FLOW_ERROR_TYPE_ITEM,
357 "RAW: Zero pattern length");
359 if (item_raw_spec->offset + item_raw_spec->length >
360 TABLE_RULE_MATCH_SIZE_MAX)
361 return rte_flow_error_set(error,
363 RTE_FLOW_ERROR_TYPE_ITEM,
365 "RAW: Item too big");
367 if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern)
368 return rte_flow_error_set(error,
370 RTE_FLOW_ERROR_TYPE_ITEM,
372 "RAW: Non-NULL pattern mask not allowed with NULL pattern");
374 pattern = item_raw_spec->pattern;
375 pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL;
376 pattern_length = (size_t)item_raw_spec->length;
377 pattern_offset = (size_t)item_raw_spec->offset;
380 if (pattern_mask == NULL)
383 for (i = 0; i < pattern_length; i++)
387 memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX);
389 memcpy(&spec[pattern_offset], pattern, pattern_length);
391 memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX);
393 memcpy(&mask[pattern_offset], pattern_mask, pattern_length);
395 *item_size = pattern_offset + pattern_length;
396 *item_disabled = disabled;
402 flow_item_proto_preprocess(const struct rte_flow_item *item,
403 union flow_item *item_spec,
404 union flow_item *item_mask,
407 struct rte_flow_error *error)
409 const void *mask_default;
410 uint8_t *spec = (uint8_t *)item_spec;
411 uint8_t *mask = (uint8_t *)item_mask;
414 if (!flow_item_is_proto(item->type, &mask_default, &size))
415 return rte_flow_error_set(error,
417 RTE_FLOW_ERROR_TYPE_ITEM,
419 "Item type not supported");
421 if (item->type == RTE_FLOW_ITEM_TYPE_RAW)
422 return flow_item_raw_preprocess(item,
431 /* If spec is NULL, then last and mask also have to be NULL. */
432 if (item->last || item->mask)
433 return rte_flow_error_set(error,
435 RTE_FLOW_ERROR_TYPE_ITEM,
437 "Invalid item (NULL spec with non-NULL last or mask)");
439 memset(item_spec, 0, size);
440 memset(item_mask, 0, size);
442 *item_disabled = 1; /* TRUE */
446 memcpy(spec, item->spec, size);
451 memcpy(mask, item->mask, size);
453 memcpy(mask, mask_default, size);
456 for (i = 0; i < size; i++)
459 *item_disabled = (i == size) ? 1 : 0;
461 /* Apply mask over spec. */
462 for (i = 0; i < size; i++)
470 memcpy(last, item->last, size);
471 for (i = 0; i < size; i++)
474 /* check for range */
475 for (i = 0; i < size; i++)
476 if (last[i] != spec[i])
477 return rte_flow_error_set(error,
479 RTE_FLOW_ERROR_TYPE_ITEM,
481 "Range not supported");
488 * Skip disabled protocol items and VOID items
489 * until any of the mutually exclusive conditions
490 * from the list below takes place:
491 * (A) A protocol present in the proto_mask
492 * is met (either ENABLED or DISABLED);
493 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
494 * (C) The END item is met.
497 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
500 struct rte_flow_error *error)
504 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
505 union flow_item spec, mask;
507 int disabled = 0, status;
509 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
512 status = flow_item_proto_preprocess(*item,
521 if ((proto_mask & (1LLU << (*item)->type)) ||
534 #define FLOW_ITEM_PROTO_IP \
535 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
536 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
539 flow_item_skip_void(const struct rte_flow_item **item)
542 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
546 #define IP_PROTOCOL_TCP 0x06
547 #define IP_PROTOCOL_UDP 0x11
548 #define IP_PROTOCOL_SCTP 0x84
551 mask_to_depth(uint64_t mask,
556 if (mask == UINT64_MAX) {
565 if (mask & (mask + 1))
568 n = __builtin_popcountll(mask);
570 *depth = (uint32_t)(64 - n);
576 ipv4_mask_to_depth(uint32_t mask,
582 status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
594 ipv6_mask_to_depth(uint8_t *mask,
597 uint64_t *m = (uint64_t *)mask;
598 uint64_t m0 = rte_be_to_cpu_64(m[0]);
599 uint64_t m1 = rte_be_to_cpu_64(m[1]);
603 status = mask_to_depth(m0, &d0);
607 status = mask_to_depth(m1, &d1);
621 port_mask_to_range(uint16_t port,
629 status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
633 p0 = port & port_mask;
634 p1 = p0 | ~port_mask;
646 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
647 struct pipeline *pipeline __rte_unused,
648 struct softnic_table *table __rte_unused,
649 const struct rte_flow_attr *attr,
650 const struct rte_flow_item *item,
651 struct softnic_table_rule_match *rule_match,
652 struct rte_flow_error *error)
654 union flow_item spec, mask;
655 size_t size, length = 0;
656 int disabled = 0, status;
657 uint8_t ip_proto, ip_proto_mask;
659 memset(rule_match, 0, sizeof(*rule_match));
660 rule_match->match_type = TABLE_ACL;
661 rule_match->match.acl.priority = attr->priority;
663 /* VOID or disabled protos only, if any. */
664 status = flow_item_skip_disabled_protos(&item,
665 FLOW_ITEM_PROTO_IP, &length, error);
670 status = flow_item_proto_preprocess(item, &spec, &mask,
671 &size, &disabled, error);
675 switch (item->type) {
676 case RTE_FLOW_ITEM_TYPE_IPV4:
678 uint32_t sa_depth, da_depth;
680 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
683 return rte_flow_error_set(error,
685 RTE_FLOW_ERROR_TYPE_ITEM,
687 "ACL: Illegal IPv4 header source address mask");
689 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
692 return rte_flow_error_set(error,
694 RTE_FLOW_ERROR_TYPE_ITEM,
696 "ACL: Illegal IPv4 header destination address mask");
698 ip_proto = spec.ipv4.hdr.next_proto_id;
699 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
701 rule_match->match.acl.ip_version = 1;
702 rule_match->match.acl.ipv4.sa =
703 rte_ntohl(spec.ipv4.hdr.src_addr);
704 rule_match->match.acl.ipv4.da =
705 rte_ntohl(spec.ipv4.hdr.dst_addr);
706 rule_match->match.acl.sa_depth = sa_depth;
707 rule_match->match.acl.da_depth = da_depth;
708 rule_match->match.acl.proto = ip_proto;
709 rule_match->match.acl.proto_mask = ip_proto_mask;
711 } /* RTE_FLOW_ITEM_TYPE_IPV4 */
713 case RTE_FLOW_ITEM_TYPE_IPV6:
715 uint32_t sa_depth, da_depth;
717 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
719 return rte_flow_error_set(error,
721 RTE_FLOW_ERROR_TYPE_ITEM,
723 "ACL: Illegal IPv6 header source address mask");
725 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
727 return rte_flow_error_set(error,
729 RTE_FLOW_ERROR_TYPE_ITEM,
731 "ACL: Illegal IPv6 header destination address mask");
733 ip_proto = spec.ipv6.hdr.proto;
734 ip_proto_mask = mask.ipv6.hdr.proto;
736 rule_match->match.acl.ip_version = 0;
737 memcpy(rule_match->match.acl.ipv6.sa,
738 spec.ipv6.hdr.src_addr,
739 sizeof(spec.ipv6.hdr.src_addr));
740 memcpy(rule_match->match.acl.ipv6.da,
741 spec.ipv6.hdr.dst_addr,
742 sizeof(spec.ipv6.hdr.dst_addr));
743 rule_match->match.acl.sa_depth = sa_depth;
744 rule_match->match.acl.da_depth = da_depth;
745 rule_match->match.acl.proto = ip_proto;
746 rule_match->match.acl.proto_mask = ip_proto_mask;
748 } /* RTE_FLOW_ITEM_TYPE_IPV6 */
751 return rte_flow_error_set(error,
753 RTE_FLOW_ERROR_TYPE_ITEM,
755 "ACL: IP protocol required");
758 if (ip_proto_mask != UINT8_MAX)
759 return rte_flow_error_set(error,
761 RTE_FLOW_ERROR_TYPE_ITEM,
763 "ACL: Illegal IP protocol mask");
767 /* VOID only, if any. */
768 flow_item_skip_void(&item);
770 /* TCP/UDP/SCTP only. */
771 status = flow_item_proto_preprocess(item, &spec, &mask,
772 &size, &disabled, error);
776 switch (item->type) {
777 case RTE_FLOW_ITEM_TYPE_TCP:
779 uint16_t sp0, sp1, dp0, dp1;
781 if (ip_proto != IP_PROTOCOL_TCP)
782 return rte_flow_error_set(error,
784 RTE_FLOW_ERROR_TYPE_ITEM,
786 "ACL: Item type is TCP, but IP protocol is not");
788 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
789 rte_ntohs(mask.tcp.hdr.src_port),
794 return rte_flow_error_set(error,
796 RTE_FLOW_ERROR_TYPE_ITEM,
798 "ACL: Illegal TCP source port mask");
800 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
801 rte_ntohs(mask.tcp.hdr.dst_port),
806 return rte_flow_error_set(error,
808 RTE_FLOW_ERROR_TYPE_ITEM,
810 "ACL: Illegal TCP destination port mask");
812 rule_match->match.acl.sp0 = sp0;
813 rule_match->match.acl.sp1 = sp1;
814 rule_match->match.acl.dp0 = dp0;
815 rule_match->match.acl.dp1 = dp1;
818 } /* RTE_FLOW_ITEM_TYPE_TCP */
820 case RTE_FLOW_ITEM_TYPE_UDP:
822 uint16_t sp0, sp1, dp0, dp1;
824 if (ip_proto != IP_PROTOCOL_UDP)
825 return rte_flow_error_set(error,
827 RTE_FLOW_ERROR_TYPE_ITEM,
829 "ACL: Item type is UDP, but IP protocol is not");
831 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
832 rte_ntohs(mask.udp.hdr.src_port),
836 return rte_flow_error_set(error,
838 RTE_FLOW_ERROR_TYPE_ITEM,
840 "ACL: Illegal UDP source port mask");
842 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
843 rte_ntohs(mask.udp.hdr.dst_port),
847 return rte_flow_error_set(error,
849 RTE_FLOW_ERROR_TYPE_ITEM,
851 "ACL: Illegal UDP destination port mask");
853 rule_match->match.acl.sp0 = sp0;
854 rule_match->match.acl.sp1 = sp1;
855 rule_match->match.acl.dp0 = dp0;
856 rule_match->match.acl.dp1 = dp1;
859 } /* RTE_FLOW_ITEM_TYPE_UDP */
861 case RTE_FLOW_ITEM_TYPE_SCTP:
863 uint16_t sp0, sp1, dp0, dp1;
865 if (ip_proto != IP_PROTOCOL_SCTP)
866 return rte_flow_error_set(error,
868 RTE_FLOW_ERROR_TYPE_ITEM,
870 "ACL: Item type is SCTP, but IP protocol is not");
872 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
873 rte_ntohs(mask.sctp.hdr.src_port),
878 return rte_flow_error_set(error,
880 RTE_FLOW_ERROR_TYPE_ITEM,
882 "ACL: Illegal SCTP source port mask");
884 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
885 rte_ntohs(mask.sctp.hdr.dst_port),
889 return rte_flow_error_set(error,
891 RTE_FLOW_ERROR_TYPE_ITEM,
893 "ACL: Illegal SCTP destination port mask");
895 rule_match->match.acl.sp0 = sp0;
896 rule_match->match.acl.sp1 = sp1;
897 rule_match->match.acl.dp0 = dp0;
898 rule_match->match.acl.dp1 = dp1;
901 } /* RTE_FLOW_ITEM_TYPE_SCTP */
904 return rte_flow_error_set(error,
906 RTE_FLOW_ERROR_TYPE_ITEM,
908 "ACL: TCP/UDP/SCTP required");
913 /* VOID or disabled protos only, if any. */
914 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
919 if (item->type != RTE_FLOW_ITEM_TYPE_END)
920 return rte_flow_error_set(error,
922 RTE_FLOW_ERROR_TYPE_ITEM,
924 "ACL: Expecting END item");
930 * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
932 * They are located within a larger buffer at offsets *toffset* and *foffset*
933 * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
935 * Question: are the two masks equivalent?
938 * 1. Offset basically indicates that the first offset bytes in the buffer
939 * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
940 * array of *offset* bytes to the *mask*.
941 * 2. Each *mask* might contain a number of zero bytes at the beginning or
943 * 3. Bytes in the larger buffer after the end of the *mask* are also considered
944 * "don't care", so they are equivalent to appending an "all-zeros" array of
945 * bytes to the *mask*.
948 * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
949 * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
950 * => buffer mask = [00 00 00 22 00 33 00 00]
951 * fmask = [22 00 33], foffset = 3, fsize = 3 =>
952 * => buffer mask = [00 00 00 22 00 33 00 00]
953 * Therefore, the tmask and fmask from this example are equivalent.
956 hash_key_mask_is_same(uint8_t *tmask,
962 size_t *toffset_plus,
963 size_t *foffset_plus)
965 size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
966 size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
968 /* Compute tpos and fpos. */
969 for (tpos = 0; tmask[tpos] == 0; tpos++)
971 for (fpos = 0; fmask[fpos] == 0; fpos++)
974 if (toffset + tpos != foffset + fpos)
975 return 0; /* FALSE */
983 for (i = 0; i < tsize; i++)
984 if (tmask[tpos + i] != fmask[fpos + i])
985 return 0; /* FALSE */
987 for ( ; i < fsize; i++)
989 return 0; /* FALSE */
993 for (i = 0; i < fsize; i++)
994 if (tmask[tpos + i] != fmask[fpos + i])
995 return 0; /* FALSE */
997 for ( ; i < tsize; i++)
999 return 0; /* FALSE */
1003 *toffset_plus = tpos;
1006 *foffset_plus = fpos;
1008 return 1; /* TRUE */
1012 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
1013 struct pipeline *pipeline __rte_unused,
1014 struct softnic_table *table,
1015 const struct rte_flow_attr *attr __rte_unused,
1016 const struct rte_flow_item *item,
1017 struct softnic_table_rule_match *rule_match,
1018 struct rte_flow_error *error)
1020 struct softnic_table_rule_match_hash key, key_mask;
1021 struct softnic_table_hash_params *params = &table->params.match.hash;
1022 size_t offset = 0, length = 0, tpos, fpos;
1025 memset(&key, 0, sizeof(key));
1026 memset(&key_mask, 0, sizeof(key_mask));
1028 /* VOID or disabled protos only, if any. */
1029 status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
1033 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1034 return rte_flow_error_set(error,
1036 RTE_FLOW_ERROR_TYPE_ITEM,
1038 "HASH: END detected too early");
1040 /* VOID or any protocols (enabled or disabled). */
1041 for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1042 union flow_item spec, mask;
1044 int disabled, status;
1046 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1049 status = flow_item_proto_preprocess(item,
1058 if (length + size > sizeof(key)) {
1062 return rte_flow_error_set(error,
1064 RTE_FLOW_ERROR_TYPE_ITEM,
1066 "HASH: Item too big");
1069 memcpy(&key.key[length], &spec, size);
1070 memcpy(&key_mask.key[length], &mask, size);
1074 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1075 /* VOID or disabled protos only, if any. */
1076 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
1081 if (item->type != RTE_FLOW_ITEM_TYPE_END)
1082 return rte_flow_error_set(error,
1084 RTE_FLOW_ERROR_TYPE_ITEM,
1086 "HASH: Expecting END item");
1089 /* Compare flow key mask against table key mask. */
1090 offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
1092 if (!hash_key_mask_is_same(params->key_mask,
1100 return rte_flow_error_set(error,
1102 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1104 "HASH: Item list is not observing the match format");
1107 memset(rule_match, 0, sizeof(*rule_match));
1108 rule_match->match_type = TABLE_HASH;
1109 memcpy(&rule_match->match.hash.key[tpos],
1111 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
1118 flow_rule_match_get(struct pmd_internals *softnic,
1119 struct pipeline *pipeline,
1120 struct softnic_table *table,
1121 const struct rte_flow_attr *attr,
1122 const struct rte_flow_item *item,
1123 struct softnic_table_rule_match *rule_match,
1124 struct rte_flow_error *error)
1126 switch (table->params.match_type) {
1128 return flow_rule_match_acl_get(softnic,
1139 return flow_rule_match_hash_get(softnic,
1150 return rte_flow_error_set(error,
1152 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1154 "Unsupported pipeline table match type");
1159 flow_rule_action_get(struct pmd_internals *softnic,
1160 struct pipeline *pipeline,
1161 struct softnic_table *table,
1162 const struct rte_flow_attr *attr,
1163 const struct rte_flow_action *action,
1164 struct softnic_table_rule_action *rule_action,
1165 struct rte_flow_error *error)
1167 struct softnic_table_action_profile *profile;
1168 struct softnic_table_action_profile_params *params;
1169 int n_jump_queue_rss_drop = 0;
1172 int n_vxlan_decap = 0;
1174 profile = softnic_table_action_profile_find(softnic,
1175 table->params.action_profile_name);
1176 if (profile == NULL)
1177 return rte_flow_error_set(error,
1179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1181 "JUMP: Table action profile");
1183 params = &profile->params;
1185 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1186 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1189 switch (action->type) {
1190 case RTE_FLOW_ACTION_TYPE_JUMP:
1192 const struct rte_flow_action_jump *conf = action->conf;
1193 struct flow_attr_map *map;
1196 return rte_flow_error_set(error,
1198 RTE_FLOW_ERROR_TYPE_ACTION,
1200 "JUMP: Null configuration");
1202 if (n_jump_queue_rss_drop)
1203 return rte_flow_error_set(error,
1205 RTE_FLOW_ERROR_TYPE_ACTION,
1207 "Only one termination action is"
1208 " allowed per flow");
1210 if ((params->action_mask &
1211 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1212 return rte_flow_error_set(error,
1214 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1216 "JUMP action not enabled for this table");
1218 n_jump_queue_rss_drop = 1;
1220 map = flow_attr_map_get(softnic,
1223 if (map == NULL || map->valid == 0)
1224 return rte_flow_error_set(error,
1226 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1228 "JUMP: Invalid group mapping");
1230 if (strcmp(pipeline->name, map->pipeline_name) != 0)
1231 return rte_flow_error_set(error,
1233 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1235 "JUMP: Jump to table in different pipeline");
1237 /* RTE_TABLE_ACTION_FWD */
1238 rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
1239 rule_action->fwd.id = map->table_id;
1240 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1242 } /* RTE_FLOW_ACTION_TYPE_JUMP */
1244 case RTE_FLOW_ACTION_TYPE_QUEUE:
1246 char name[NAME_SIZE];
1247 struct rte_eth_dev *dev;
1248 const struct rte_flow_action_queue *conf = action->conf;
1253 return rte_flow_error_set(error,
1255 RTE_FLOW_ERROR_TYPE_ACTION,
1257 "QUEUE: Null configuration");
1259 if (n_jump_queue_rss_drop)
1260 return rte_flow_error_set(error,
1262 RTE_FLOW_ERROR_TYPE_ACTION,
1264 "Only one termination action is allowed"
1267 if ((params->action_mask &
1268 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1269 return rte_flow_error_set(error,
1271 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1273 "QUEUE action not enabled for this table");
1275 n_jump_queue_rss_drop = 1;
1277 dev = ETHDEV(softnic);
1279 conf->index >= dev->data->nb_rx_queues)
1280 return rte_flow_error_set(error,
1282 RTE_FLOW_ERROR_TYPE_ACTION,
1284 "QUEUE: Invalid RX queue ID");
1286 snprintf(name, sizeof(name), "RXQ%u",
1287 (uint32_t)conf->index);
1289 status = softnic_pipeline_port_out_find(softnic,
1294 return rte_flow_error_set(error,
1296 RTE_FLOW_ERROR_TYPE_ACTION,
1298 "QUEUE: RX queue not accessible from this pipeline");
1300 /* RTE_TABLE_ACTION_FWD */
1301 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
1302 rule_action->fwd.id = port_id;
1303 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1305 } /*RTE_FLOW_ACTION_TYPE_QUEUE */
1307 case RTE_FLOW_ACTION_TYPE_RSS:
1309 const struct rte_flow_action_rss *conf = action->conf;
1313 return rte_flow_error_set(error,
1315 RTE_FLOW_ERROR_TYPE_ACTION,
1317 "RSS: Null configuration");
1319 if (!rte_is_power_of_2(conf->queue_num))
1320 return rte_flow_error_set(error,
1322 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1324 "RSS: Number of queues must be a power of 2");
1326 if (conf->queue_num > RTE_DIM(rule_action->lb.out))
1327 return rte_flow_error_set(error,
1329 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1331 "RSS: Number of queues too big");
1333 if (n_jump_queue_rss_drop)
1334 return rte_flow_error_set(error,
1336 RTE_FLOW_ERROR_TYPE_ACTION,
1338 "Only one termination action is allowed per flow");
1340 if (((params->action_mask &
1341 (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
1342 ((params->action_mask &
1343 (1LLU << RTE_TABLE_ACTION_LB)) == 0))
1344 return rte_flow_error_set(error,
1346 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1348 "RSS action not supported by this table");
1350 if (params->lb.out_offset !=
1351 pipeline->params.offset_port_id)
1352 return rte_flow_error_set(error,
1354 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1356 "RSS action not supported by this pipeline");
1358 n_jump_queue_rss_drop = 1;
1360 /* RTE_TABLE_ACTION_LB */
1361 for (i = 0; i < conf->queue_num; i++) {
1362 char name[NAME_SIZE];
1363 struct rte_eth_dev *dev;
1367 dev = ETHDEV(softnic);
1370 dev->data->nb_rx_queues)
1371 return rte_flow_error_set(error,
1373 RTE_FLOW_ERROR_TYPE_ACTION,
1375 "RSS: Invalid RX queue ID");
1377 snprintf(name, sizeof(name), "RXQ%u",
1378 (uint32_t)conf->queue[i]);
1380 status = softnic_pipeline_port_out_find(softnic,
1385 return rte_flow_error_set(error,
1387 RTE_FLOW_ERROR_TYPE_ACTION,
1389 "RSS: RX queue not accessible from this pipeline");
1391 rule_action->lb.out[i] = port_id;
1394 for ( ; i < RTE_DIM(rule_action->lb.out); i++)
1395 rule_action->lb.out[i] =
1396 rule_action->lb.out[i % conf->queue_num];
1398 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
1400 /* RTE_TABLE_ACTION_FWD */
1401 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
1402 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1404 } /* RTE_FLOW_ACTION_TYPE_RSS */
1406 case RTE_FLOW_ACTION_TYPE_DROP:
1408 const void *conf = action->conf;
1411 return rte_flow_error_set(error,
1413 RTE_FLOW_ERROR_TYPE_ACTION,
1415 "DROP: No configuration required");
1417 if (n_jump_queue_rss_drop)
1418 return rte_flow_error_set(error,
1420 RTE_FLOW_ERROR_TYPE_ACTION,
1422 "Only one termination action is allowed per flow");
1423 if ((params->action_mask &
1424 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1425 return rte_flow_error_set(error,
1427 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1429 "DROP action not supported by this table");
1431 n_jump_queue_rss_drop = 1;
1433 /* RTE_TABLE_ACTION_FWD */
1434 rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
1435 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1437 } /* RTE_FLOW_ACTION_TYPE_DROP */
1439 case RTE_FLOW_ACTION_TYPE_COUNT:
1441 const struct rte_flow_action_count *conf = action->conf;
1444 return rte_flow_error_set(error,
1446 RTE_FLOW_ERROR_TYPE_ACTION,
1448 "COUNT: Null configuration");
1451 return rte_flow_error_set(error,
1453 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1455 "COUNT: Shared counters not supported");
1458 return rte_flow_error_set(error,
1460 RTE_FLOW_ERROR_TYPE_ACTION,
1462 "Only one COUNT action per flow");
1464 if ((params->action_mask &
1465 (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
1466 return rte_flow_error_set(error,
1468 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1470 "COUNT action not supported by this table");
1474 /* RTE_TABLE_ACTION_STATS */
1475 rule_action->stats.n_packets = 0;
1476 rule_action->stats.n_bytes = 0;
1477 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
1479 } /* RTE_FLOW_ACTION_TYPE_COUNT */
1481 case RTE_FLOW_ACTION_TYPE_MARK:
1483 const struct rte_flow_action_mark *conf = action->conf;
1486 return rte_flow_error_set(error,
1488 RTE_FLOW_ERROR_TYPE_ACTION,
1490 "MARK: Null configuration");
1493 return rte_flow_error_set(error,
1495 RTE_FLOW_ERROR_TYPE_ACTION,
1497 "Only one MARK action per flow");
1499 if ((params->action_mask &
1500 (1LLU << RTE_TABLE_ACTION_TAG)) == 0)
1501 return rte_flow_error_set(error,
1503 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1505 "MARK action not supported by this table");
1509 /* RTE_TABLE_ACTION_TAG */
1510 rule_action->tag.tag = conf->id;
1511 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
1513 } /* RTE_FLOW_ACTION_TYPE_MARK */
1515 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1517 const struct rte_flow_action_mark *conf = action->conf;
1520 return rte_flow_error_set(error,
1522 RTE_FLOW_ERROR_TYPE_ACTION,
1524 "VXLAN DECAP: Non-null configuration");
1527 return rte_flow_error_set(error,
1529 RTE_FLOW_ERROR_TYPE_ACTION,
1531 "Only one VXLAN DECAP action per flow");
1533 if ((params->action_mask &
1534 (1LLU << RTE_TABLE_ACTION_DECAP)) == 0)
1535 return rte_flow_error_set(error,
1537 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1539 "VXLAN DECAP action not supported by this table");
1543 /* RTE_TABLE_ACTION_DECAP */
1544 rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */
1545 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
1547 } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */
1549 case RTE_FLOW_ACTION_TYPE_METER:
1551 const struct rte_flow_action_meter *conf = action->conf;
1552 struct softnic_mtr_meter_profile *mp;
1553 struct softnic_mtr *m;
1554 uint32_t table_id = table - pipeline->table;
1555 uint32_t meter_profile_id;
1558 if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0)
1559 return rte_flow_error_set(error,
1561 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1563 "METER: Table action not supported");
1565 if (params->mtr.n_tc != 1)
1566 return rte_flow_error_set(error,
1568 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1570 "METER: Multiple TCs not supported");
1573 return rte_flow_error_set(error,
1575 RTE_FLOW_ERROR_TYPE_ACTION,
1577 "METER: Null configuration");
1579 m = softnic_mtr_find(softnic, conf->mtr_id);
1582 return rte_flow_error_set(error,
1584 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1586 "METER: Invalid meter ID");
1589 return rte_flow_error_set(error,
1591 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1593 "METER: Meter already attached to a flow");
1595 meter_profile_id = m->params.meter_profile_id;
1596 mp = softnic_mtr_meter_profile_find(softnic, meter_profile_id);
1598 /* Add meter profile to pipeline table */
1599 if (!softnic_pipeline_table_meter_profile_find(table,
1600 meter_profile_id)) {
1601 struct rte_table_action_meter_profile profile;
1603 memset(&profile, 0, sizeof(profile));
1604 profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
1605 profile.trtcm.cir = mp->params.trtcm_rfc2698.cir;
1606 profile.trtcm.pir = mp->params.trtcm_rfc2698.pir;
1607 profile.trtcm.cbs = mp->params.trtcm_rfc2698.cbs;
1608 profile.trtcm.pbs = mp->params.trtcm_rfc2698.pbs;
1610 status = softnic_pipeline_table_mtr_profile_add(softnic,
1616 rte_flow_error_set(error,
1618 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1620 "METER: Table meter profile add failed");
1625 /* RTE_TABLE_ACTION_METER */
1626 rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id;
1627 rule_action->mtr.mtr[0].policer[RTE_COLOR_GREEN] =
1628 softnic_table_action_policer(m->params.action[RTE_COLOR_GREEN]);
1629 rule_action->mtr.mtr[0].policer[RTE_COLOR_YELLOW] =
1630 softnic_table_action_policer(m->params.action[RTE_COLOR_YELLOW]);
1631 rule_action->mtr.mtr[0].policer[RTE_COLOR_RED] =
1632 softnic_table_action_policer(m->params.action[RTE_COLOR_RED]);
1633 rule_action->mtr.tc_mask = 1;
1634 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
1636 } /* RTE_FLOW_ACTION_TYPE_METER */
1638 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1640 const struct rte_flow_action_vxlan_encap *conf =
1642 const struct rte_flow_item *item;
1643 union flow_item spec, mask;
1644 int disabled = 0, status;
1648 return rte_flow_error_set(error,
1650 RTE_FLOW_ERROR_TYPE_ACTION,
1652 "VXLAN ENCAP: Null configuration");
1654 item = conf->definition;
1656 return rte_flow_error_set(error,
1658 RTE_FLOW_ERROR_TYPE_ACTION,
1660 "VXLAN ENCAP: Null configuration definition");
1662 if (!(params->action_mask &
1663 (1LLU << RTE_TABLE_ACTION_ENCAP)))
1664 return rte_flow_error_set(error,
1666 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1668 "VXLAN ENCAP: Encap action not enabled for this table");
1670 /* Check for Ether. */
1671 flow_item_skip_void(&item);
1672 status = flow_item_proto_preprocess(item, &spec, &mask,
1673 &size, &disabled, error);
1677 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1678 return rte_flow_error_set(error,
1680 RTE_FLOW_ERROR_TYPE_ITEM,
1682 "VXLAN ENCAP: first encap item should be ether");
1684 rte_ether_addr_copy(&spec.eth.dst,
1685 &rule_action->encap.vxlan.ether.da);
1686 rte_ether_addr_copy(&spec.eth.src,
1687 &rule_action->encap.vxlan.ether.sa);
1691 /* Check for VLAN. */
1692 flow_item_skip_void(&item);
1693 status = flow_item_proto_preprocess(item, &spec, &mask,
1694 &size, &disabled, error);
1698 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1699 if (!params->encap.vxlan.vlan)
1700 return rte_flow_error_set(error,
1702 RTE_FLOW_ERROR_TYPE_ITEM,
1704 "VXLAN ENCAP: vlan encap not supported by table");
1706 uint16_t tci = rte_ntohs(spec.vlan.tci);
1707 rule_action->encap.vxlan.vlan.pcp =
1709 rule_action->encap.vxlan.vlan.dei =
1711 rule_action->encap.vxlan.vlan.vid =
1716 flow_item_skip_void(&item);
1717 status = flow_item_proto_preprocess(item, &spec,
1718 &mask, &size, &disabled, error);
1722 if (params->encap.vxlan.vlan)
1723 return rte_flow_error_set(error,
1725 RTE_FLOW_ERROR_TYPE_ITEM,
1727 "VXLAN ENCAP: expecting vlan encap item");
1730 /* Check for IPV4/IPV6. */
1731 switch (item->type) {
1732 case RTE_FLOW_ITEM_TYPE_IPV4:
1734 rule_action->encap.vxlan.ipv4.sa =
1735 rte_ntohl(spec.ipv4.hdr.src_addr);
1736 rule_action->encap.vxlan.ipv4.da =
1737 rte_ntohl(spec.ipv4.hdr.dst_addr);
1738 rule_action->encap.vxlan.ipv4.dscp =
1739 spec.ipv4.hdr.type_of_service >> 2;
1740 rule_action->encap.vxlan.ipv4.ttl =
1741 spec.ipv4.hdr.time_to_live;
1744 case RTE_FLOW_ITEM_TYPE_IPV6:
1748 memcpy(&rule_action->encap.vxlan.ipv6.sa,
1749 &spec.ipv6.hdr.src_addr,
1750 sizeof(spec.ipv6.hdr.src_addr));
1751 memcpy(&rule_action->encap.vxlan.ipv6.da,
1752 &spec.ipv6.hdr.dst_addr,
1753 sizeof(spec.ipv6.hdr.dst_addr));
1754 vtc_flow = rte_ntohl(spec.ipv6.hdr.vtc_flow);
1755 rule_action->encap.vxlan.ipv6.flow_label =
1757 rule_action->encap.vxlan.ipv6.dscp =
1758 (vtc_flow >> 22) & 0x3f;
1759 rule_action->encap.vxlan.ipv6.hop_limit =
1760 spec.ipv6.hdr.hop_limits;
1764 return rte_flow_error_set(error,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1768 "VXLAN ENCAP: encap item after ether should be ipv4/ipv6");
1773 /* Check for UDP. */
1774 flow_item_skip_void(&item);
1775 status = flow_item_proto_preprocess(item, &spec, &mask,
1776 &size, &disabled, error);
1780 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1781 return rte_flow_error_set(error,
1783 RTE_FLOW_ERROR_TYPE_ITEM,
1785 "VXLAN ENCAP: encap item after ipv4/ipv6 should be udp");
1787 rule_action->encap.vxlan.udp.sp =
1788 rte_ntohs(spec.udp.hdr.src_port);
1789 rule_action->encap.vxlan.udp.dp =
1790 rte_ntohs(spec.udp.hdr.dst_port);
1794 /* Check for VXLAN. */
1795 flow_item_skip_void(&item);
1796 status = flow_item_proto_preprocess(item, &spec, &mask,
1797 &size, &disabled, error);
1801 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1802 return rte_flow_error_set(error,
1804 RTE_FLOW_ERROR_TYPE_ITEM,
1806 "VXLAN ENCAP: encap item after udp should be vxlan");
1808 rule_action->encap.vxlan.vxlan.vni =
1809 (spec.vxlan.vni[0] << 16U |
1810 spec.vxlan.vni[1] << 8U
1811 | spec.vxlan.vni[2]);
1815 /* Check for END. */
1816 flow_item_skip_void(&item);
1818 if (item->type != RTE_FLOW_ITEM_TYPE_END)
1819 return rte_flow_error_set(error,
1821 RTE_FLOW_ERROR_TYPE_ITEM,
1823 "VXLAN ENCAP: expecting END item");
1825 rule_action->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN;
1826 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
1828 } /* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP */
1835 if (n_jump_queue_rss_drop == 0)
1836 return rte_flow_error_set(error,
1838 RTE_FLOW_ERROR_TYPE_ACTION,
1840 "Flow does not have any terminating action");
1846 pmd_flow_validate(struct rte_eth_dev *dev,
1847 const struct rte_flow_attr *attr,
1848 const struct rte_flow_item item[],
1849 const struct rte_flow_action action[],
1850 struct rte_flow_error *error)
1852 struct softnic_table_rule_match rule_match;
1853 struct softnic_table_rule_action rule_action;
1855 struct pmd_internals *softnic = dev->data->dev_private;
1856 struct pipeline *pipeline;
1857 struct softnic_table *table;
1858 const char *pipeline_name = NULL;
1859 uint32_t table_id = 0;
1862 /* Check input parameters. */
1864 return rte_flow_error_set(error,
1866 RTE_FLOW_ERROR_TYPE_ATTR,
1870 return rte_flow_error_set(error,
1872 RTE_FLOW_ERROR_TYPE_ITEM,
1877 return rte_flow_error_set(error,
1879 RTE_FLOW_ERROR_TYPE_ACTION,
1883 /* Identify the pipeline table to add this flow to. */
1884 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1889 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1890 if (pipeline == NULL)
1891 return rte_flow_error_set(error,
1893 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1895 "Invalid pipeline name");
1897 if (table_id >= pipeline->n_tables)
1898 return rte_flow_error_set(error,
1900 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1902 "Invalid pipeline table ID");
1904 table = &pipeline->table[table_id];
1907 memset(&rule_match, 0, sizeof(rule_match));
1908 status = flow_rule_match_get(softnic,
1919 memset(&rule_action, 0, sizeof(rule_action));
1920 status = flow_rule_action_get(softnic,
1933 static struct softnic_mtr *
1934 flow_action_meter_get(struct pmd_internals *softnic,
1935 const struct rte_flow_action *action)
1937 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++)
1938 if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1939 const struct rte_flow_action_meter *conf = action->conf;
1944 return softnic_mtr_find(softnic, conf->mtr_id);
1951 flow_meter_owner_reset(struct pmd_internals *softnic,
1952 struct rte_flow *flow)
1954 struct softnic_mtr_list *ml = &softnic->mtr.mtrs;
1955 struct softnic_mtr *m;
1957 TAILQ_FOREACH(m, ml, node)
1958 if (m->flow == flow) {
1965 flow_meter_owner_set(struct pmd_internals *softnic,
1966 struct rte_flow *flow,
1967 struct softnic_mtr *mtr)
1969 /* Reset current flow meter */
1970 flow_meter_owner_reset(softnic, flow);
1972 /* Set new flow meter */
1977 is_meter_action_enable(struct pmd_internals *softnic,
1978 struct softnic_table *table)
1980 struct softnic_table_action_profile *profile =
1981 softnic_table_action_profile_find(softnic,
1982 table->params.action_profile_name);
1983 struct softnic_table_action_profile_params *params = &profile->params;
1985 return (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) ? 1 : 0;
1988 static struct rte_flow *
1989 pmd_flow_create(struct rte_eth_dev *dev,
1990 const struct rte_flow_attr *attr,
1991 const struct rte_flow_item item[],
1992 const struct rte_flow_action action[],
1993 struct rte_flow_error *error)
1995 struct softnic_table_rule_match rule_match;
1996 struct softnic_table_rule_action rule_action;
1999 struct pmd_internals *softnic = dev->data->dev_private;
2000 struct pipeline *pipeline;
2001 struct softnic_table *table;
2002 struct rte_flow *flow;
2003 struct softnic_mtr *mtr;
2004 const char *pipeline_name = NULL;
2005 uint32_t table_id = 0;
2006 int new_flow, status;
2008 /* Check input parameters. */
2010 rte_flow_error_set(error,
2012 RTE_FLOW_ERROR_TYPE_ATTR,
2019 rte_flow_error_set(error,
2021 RTE_FLOW_ERROR_TYPE_ITEM,
2027 if (action == NULL) {
2028 rte_flow_error_set(error,
2030 RTE_FLOW_ERROR_TYPE_ACTION,
2036 /* Identify the pipeline table to add this flow to. */
2037 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
2042 pipeline = softnic_pipeline_find(softnic, pipeline_name);
2043 if (pipeline == NULL) {
2044 rte_flow_error_set(error,
2046 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2048 "Invalid pipeline name");
2052 if (table_id >= pipeline->n_tables) {
2053 rte_flow_error_set(error,
2055 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2057 "Invalid pipeline table ID");
2061 table = &pipeline->table[table_id];
2064 memset(&rule_match, 0, sizeof(rule_match));
2065 status = flow_rule_match_get(softnic,
2076 memset(&rule_action, 0, sizeof(rule_action));
2077 status = flow_rule_action_get(softnic,
2087 /* Flow find/allocate. */
2089 flow = softnic_flow_find(table, &rule_match);
2092 flow = calloc(1, sizeof(struct rte_flow));
2094 rte_flow_error_set(error,
2096 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2098 "Not enough memory for new flow");
2104 status = softnic_pipeline_table_rule_add(softnic,
2114 rte_flow_error_set(error,
2116 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2118 "Pipeline table rule add failed");
2123 memcpy(&flow->match, &rule_match, sizeof(rule_match));
2124 memcpy(&flow->action, &rule_action, sizeof(rule_action));
2125 flow->data = rule_data;
2126 flow->pipeline = pipeline;
2127 flow->table_id = table_id;
2129 mtr = flow_action_meter_get(softnic, action);
2131 flow_meter_owner_set(softnic, flow, mtr);
2133 /* Flow add to list. */
2135 TAILQ_INSERT_TAIL(&table->flows, flow, node);
2141 pmd_flow_destroy(struct rte_eth_dev *dev,
2142 struct rte_flow *flow,
2143 struct rte_flow_error *error)
2145 struct pmd_internals *softnic = dev->data->dev_private;
2146 struct softnic_table *table;
2149 /* Check input parameters. */
2151 return rte_flow_error_set(error,
2153 RTE_FLOW_ERROR_TYPE_HANDLE,
2157 table = &flow->pipeline->table[flow->table_id];
2160 status = softnic_pipeline_table_rule_delete(softnic,
2161 flow->pipeline->name,
2165 return rte_flow_error_set(error,
2167 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2169 "Pipeline table rule delete failed");
2171 /* Update dependencies */
2172 if (is_meter_action_enable(softnic, table))
2173 flow_meter_owner_reset(softnic, flow);
2176 TAILQ_REMOVE(&table->flows, flow, node);
2183 pmd_flow_flush(struct rte_eth_dev *dev,
2184 struct rte_flow_error *error)
2186 struct pmd_internals *softnic = dev->data->dev_private;
2187 struct pipeline *pipeline;
2188 int fail_to_del_rule = 0;
2191 TAILQ_FOREACH(pipeline, &softnic->pipeline_list, node) {
2192 /* Remove all the flows added to the tables. */
2193 for (i = 0; i < pipeline->n_tables; i++) {
2194 struct softnic_table *table = &pipeline->table[i];
2195 struct rte_flow *flow;
2199 TAILQ_FOREACH_SAFE(flow, &table->flows, node, temp) {
2201 status = softnic_pipeline_table_rule_delete
2207 fail_to_del_rule = 1;
2208 /* Update dependencies */
2209 if (is_meter_action_enable(softnic, table))
2210 flow_meter_owner_reset(softnic, flow);
2213 TAILQ_REMOVE(&table->flows, flow, node);
2219 if (fail_to_del_rule)
2220 return rte_flow_error_set(error,
2222 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2224 "Some of the rules could not be deleted");
2230 pmd_flow_query(struct rte_eth_dev *dev __rte_unused,
2231 struct rte_flow *flow,
2232 const struct rte_flow_action *action __rte_unused,
2234 struct rte_flow_error *error)
2236 struct rte_table_action_stats_counters stats;
2237 struct softnic_table *table;
2238 struct rte_flow_query_count *flow_stats = data;
2241 /* Check input parameters. */
2243 return rte_flow_error_set(error,
2245 RTE_FLOW_ERROR_TYPE_HANDLE,
2250 return rte_flow_error_set(error,
2252 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2256 table = &flow->pipeline->table[flow->table_id];
2258 /* Rule stats read. */
2259 status = rte_table_action_stats_read(table->a,
2264 return rte_flow_error_set(error,
2266 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2268 "Pipeline table rule stats read failed");
2270 /* Fill in flow stats. */
2271 flow_stats->hits_set =
2272 (table->ap->params.stats.n_packets_enabled) ? 1 : 0;
2273 flow_stats->bytes_set =
2274 (table->ap->params.stats.n_bytes_enabled) ? 1 : 0;
2275 flow_stats->hits = stats.n_packets;
2276 flow_stats->bytes = stats.n_bytes;
2281 const struct rte_flow_ops pmd_flow_ops = {
2282 .validate = pmd_flow_validate,
2283 .create = pmd_flow_create,
2284 .destroy = pmd_flow_destroy,
2285 .flush = pmd_flow_flush,
2286 .query = pmd_flow_query,