1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
11 #include <rte_string_fns.h>
13 #include <rte_flow_driver.h>
14 #include <rte_tailq.h>
16 #include "rte_eth_softnic_internals.h"
17 #include "rte_eth_softnic.h"
19 #define rte_htons rte_cpu_to_be_16
20 #define rte_htonl rte_cpu_to_be_32
22 #define rte_ntohs rte_be_to_cpu_16
23 #define rte_ntohl rte_be_to_cpu_32
25 static struct rte_flow *
26 softnic_flow_find(struct softnic_table *table,
27 struct softnic_table_rule_match *rule_match)
29 struct rte_flow *flow;
31 TAILQ_FOREACH(flow, &table->flows, node)
32 if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
39 flow_attr_map_set(struct pmd_internals *softnic,
42 const char *pipeline_name,
45 struct pipeline *pipeline;
46 struct flow_attr_map *map;
48 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
49 pipeline_name == NULL)
52 pipeline = softnic_pipeline_find(softnic, pipeline_name);
53 if (pipeline == NULL ||
54 table_id >= pipeline->n_tables)
57 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
58 &softnic->flow.egress_map[group_id];
59 strcpy(map->pipeline_name, pipeline_name);
60 map->table_id = table_id;
66 struct flow_attr_map *
67 flow_attr_map_get(struct pmd_internals *softnic,
71 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
74 return (ingress) ? &softnic->flow.ingress_map[group_id] :
75 &softnic->flow.egress_map[group_id];
79 flow_pipeline_table_get(struct pmd_internals *softnic,
80 const struct rte_flow_attr *attr,
81 const char **pipeline_name,
83 struct rte_flow_error *error)
85 struct flow_attr_map *map;
88 return rte_flow_error_set(error,
90 RTE_FLOW_ERROR_TYPE_ATTR,
94 if (!attr->ingress && !attr->egress)
95 return rte_flow_error_set(error,
97 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
99 "Ingress/egress not specified");
101 if (attr->ingress && attr->egress)
102 return rte_flow_error_set(error,
104 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
106 "Setting both ingress and egress is not allowed");
108 map = flow_attr_map_get(softnic,
113 return rte_flow_error_set(error,
115 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
120 *pipeline_name = map->pipeline_name;
123 *table_id = map->table_id;
129 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
130 struct rte_flow_item_eth eth;
131 struct rte_flow_item_vlan vlan;
132 struct rte_flow_item_ipv4 ipv4;
133 struct rte_flow_item_ipv6 ipv6;
134 struct rte_flow_item_icmp icmp;
135 struct rte_flow_item_udp udp;
136 struct rte_flow_item_tcp tcp;
137 struct rte_flow_item_sctp sctp;
138 struct rte_flow_item_vxlan vxlan;
139 struct rte_flow_item_e_tag e_tag;
140 struct rte_flow_item_nvgre nvgre;
141 struct rte_flow_item_mpls mpls;
142 struct rte_flow_item_gre gre;
143 struct rte_flow_item_gtp gtp;
144 struct rte_flow_item_esp esp;
145 struct rte_flow_item_geneve geneve;
146 struct rte_flow_item_vxlan_gpe vxlan_gpe;
147 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
148 struct rte_flow_item_ipv6_ext ipv6_ext;
149 struct rte_flow_item_icmp6 icmp6;
150 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
151 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
152 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
153 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
154 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
157 static const union flow_item flow_item_raw_mask;
160 flow_item_is_proto(enum rte_flow_item_type type,
165 case RTE_FLOW_ITEM_TYPE_RAW:
166 *mask = &flow_item_raw_mask;
167 *size = sizeof(flow_item_raw_mask);
170 case RTE_FLOW_ITEM_TYPE_ETH:
171 *mask = &rte_flow_item_eth_mask;
172 *size = sizeof(struct rte_flow_item_eth);
175 case RTE_FLOW_ITEM_TYPE_VLAN:
176 *mask = &rte_flow_item_vlan_mask;
177 *size = sizeof(struct rte_flow_item_vlan);
180 case RTE_FLOW_ITEM_TYPE_IPV4:
181 *mask = &rte_flow_item_ipv4_mask;
182 *size = sizeof(struct rte_flow_item_ipv4);
185 case RTE_FLOW_ITEM_TYPE_IPV6:
186 *mask = &rte_flow_item_ipv6_mask;
187 *size = sizeof(struct rte_flow_item_ipv6);
190 case RTE_FLOW_ITEM_TYPE_ICMP:
191 *mask = &rte_flow_item_icmp_mask;
192 *size = sizeof(struct rte_flow_item_icmp);
195 case RTE_FLOW_ITEM_TYPE_UDP:
196 *mask = &rte_flow_item_udp_mask;
197 *size = sizeof(struct rte_flow_item_udp);
200 case RTE_FLOW_ITEM_TYPE_TCP:
201 *mask = &rte_flow_item_tcp_mask;
202 *size = sizeof(struct rte_flow_item_tcp);
205 case RTE_FLOW_ITEM_TYPE_SCTP:
206 *mask = &rte_flow_item_sctp_mask;
207 *size = sizeof(struct rte_flow_item_sctp);
210 case RTE_FLOW_ITEM_TYPE_VXLAN:
211 *mask = &rte_flow_item_vxlan_mask;
212 *size = sizeof(struct rte_flow_item_vxlan);
215 case RTE_FLOW_ITEM_TYPE_E_TAG:
216 *mask = &rte_flow_item_e_tag_mask;
217 *size = sizeof(struct rte_flow_item_e_tag);
220 case RTE_FLOW_ITEM_TYPE_NVGRE:
221 *mask = &rte_flow_item_nvgre_mask;
222 *size = sizeof(struct rte_flow_item_nvgre);
225 case RTE_FLOW_ITEM_TYPE_MPLS:
226 *mask = &rte_flow_item_mpls_mask;
227 *size = sizeof(struct rte_flow_item_mpls);
230 case RTE_FLOW_ITEM_TYPE_GRE:
231 *mask = &rte_flow_item_gre_mask;
232 *size = sizeof(struct rte_flow_item_gre);
235 case RTE_FLOW_ITEM_TYPE_GTP:
236 case RTE_FLOW_ITEM_TYPE_GTPC:
237 case RTE_FLOW_ITEM_TYPE_GTPU:
238 *mask = &rte_flow_item_gtp_mask;
239 *size = sizeof(struct rte_flow_item_gtp);
242 case RTE_FLOW_ITEM_TYPE_ESP:
243 *mask = &rte_flow_item_esp_mask;
244 *size = sizeof(struct rte_flow_item_esp);
247 case RTE_FLOW_ITEM_TYPE_GENEVE:
248 *mask = &rte_flow_item_geneve_mask;
249 *size = sizeof(struct rte_flow_item_geneve);
252 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
253 *mask = &rte_flow_item_vxlan_gpe_mask;
254 *size = sizeof(struct rte_flow_item_vxlan_gpe);
257 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
258 *mask = &rte_flow_item_arp_eth_ipv4_mask;
259 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
262 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
263 *mask = &rte_flow_item_ipv6_ext_mask;
264 *size = sizeof(struct rte_flow_item_ipv6_ext);
267 case RTE_FLOW_ITEM_TYPE_ICMP6:
268 *mask = &rte_flow_item_icmp6_mask;
269 *size = sizeof(struct rte_flow_item_icmp6);
272 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
273 *mask = &rte_flow_item_icmp6_nd_ns_mask;
274 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
277 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
278 *mask = &rte_flow_item_icmp6_nd_na_mask;
279 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
282 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
283 *mask = &rte_flow_item_icmp6_nd_opt_mask;
284 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
287 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
288 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
289 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
292 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
293 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
294 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
297 default: return 0; /* FALSE */
302 flow_item_raw_preprocess(const struct rte_flow_item *item,
303 union flow_item *item_spec,
304 union flow_item *item_mask,
307 struct rte_flow_error *error)
309 const struct rte_flow_item_raw *item_raw_spec = item->spec;
310 const struct rte_flow_item_raw *item_raw_mask = item->mask;
311 const uint8_t *pattern;
312 const uint8_t *pattern_mask;
313 uint8_t *spec = (uint8_t *)item_spec;
314 uint8_t *mask = (uint8_t *)item_mask;
315 size_t pattern_length, pattern_offset, i;
319 return rte_flow_error_set(error,
321 RTE_FLOW_ERROR_TYPE_ITEM,
323 "RAW: Null specification");
326 return rte_flow_error_set(error,
328 RTE_FLOW_ERROR_TYPE_ITEM,
330 "RAW: Range not allowed (last must be NULL)");
332 if (item_raw_spec->relative == 0)
333 return rte_flow_error_set(error,
335 RTE_FLOW_ERROR_TYPE_ITEM,
337 "RAW: Absolute offset not supported");
339 if (item_raw_spec->search)
340 return rte_flow_error_set(error,
342 RTE_FLOW_ERROR_TYPE_ITEM,
344 "RAW: Search not supported");
346 if (item_raw_spec->offset < 0)
347 return rte_flow_error_set(error,
348 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
350 "RAW: Negative offset not supported");
352 if (item_raw_spec->length == 0)
353 return rte_flow_error_set(error,
355 RTE_FLOW_ERROR_TYPE_ITEM,
357 "RAW: Zero pattern length");
359 if (item_raw_spec->offset + item_raw_spec->length >
360 TABLE_RULE_MATCH_SIZE_MAX)
361 return rte_flow_error_set(error,
363 RTE_FLOW_ERROR_TYPE_ITEM,
365 "RAW: Item too big");
367 if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern)
368 return rte_flow_error_set(error,
370 RTE_FLOW_ERROR_TYPE_ITEM,
372 "RAW: Non-NULL pattern mask not allowed with NULL pattern");
374 pattern = item_raw_spec->pattern;
375 pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL;
376 pattern_length = (size_t)item_raw_spec->length;
377 pattern_offset = (size_t)item_raw_spec->offset;
380 if (pattern_mask == NULL)
383 for (i = 0; i < pattern_length; i++)
387 memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX);
389 memcpy(&spec[pattern_offset], pattern, pattern_length);
391 memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX);
393 memcpy(&mask[pattern_offset], pattern_mask, pattern_length);
395 *item_size = pattern_offset + pattern_length;
396 *item_disabled = disabled;
402 flow_item_proto_preprocess(const struct rte_flow_item *item,
403 union flow_item *item_spec,
404 union flow_item *item_mask,
407 struct rte_flow_error *error)
409 const void *mask_default;
410 uint8_t *spec = (uint8_t *)item_spec;
411 uint8_t *mask = (uint8_t *)item_mask;
414 if (!flow_item_is_proto(item->type, &mask_default, &size))
415 return rte_flow_error_set(error,
417 RTE_FLOW_ERROR_TYPE_ITEM,
419 "Item type not supported");
421 if (item->type == RTE_FLOW_ITEM_TYPE_RAW)
422 return flow_item_raw_preprocess(item,
431 /* If spec is NULL, then last and mask also have to be NULL. */
432 if (item->last || item->mask)
433 return rte_flow_error_set(error,
435 RTE_FLOW_ERROR_TYPE_ITEM,
437 "Invalid item (NULL spec with non-NULL last or mask)");
439 memset(item_spec, 0, size);
440 memset(item_mask, 0, size);
442 *item_disabled = 1; /* TRUE */
446 memcpy(spec, item->spec, size);
451 memcpy(mask, item->mask, size);
453 memcpy(mask, mask_default, size);
456 for (i = 0; i < size; i++)
459 *item_disabled = (i == size) ? 1 : 0;
461 /* Apply mask over spec. */
462 for (i = 0; i < size; i++)
470 memcpy(last, item->last, size);
471 for (i = 0; i < size; i++)
474 /* check for range */
475 for (i = 0; i < size; i++)
476 if (last[i] != spec[i])
477 return rte_flow_error_set(error,
479 RTE_FLOW_ERROR_TYPE_ITEM,
481 "Range not supported");
488 * Skip disabled protocol items and VOID items
489 * until any of the mutually exclusive conditions
490 * from the list below takes place:
491 * (A) A protocol present in the proto_mask
492 * is met (either ENABLED or DISABLED);
493 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
494 * (C) The END item is met.
497 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
500 struct rte_flow_error *error)
504 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
505 union flow_item spec, mask;
507 int disabled = 0, status;
509 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
512 status = flow_item_proto_preprocess(*item,
521 if ((proto_mask & (1LLU << (*item)->type)) ||
534 #define FLOW_ITEM_PROTO_IP \
535 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
536 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
539 flow_item_skip_void(const struct rte_flow_item **item)
542 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
546 #define IP_PROTOCOL_TCP 0x06
547 #define IP_PROTOCOL_UDP 0x11
548 #define IP_PROTOCOL_SCTP 0x84
551 mask_to_depth(uint64_t mask,
556 if (mask == UINT64_MAX) {
565 if (mask & (mask + 1))
568 n = __builtin_popcountll(mask);
570 *depth = (uint32_t)(64 - n);
576 ipv4_mask_to_depth(uint32_t mask,
582 status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
594 ipv6_mask_to_depth(uint8_t *mask,
597 uint64_t *m = (uint64_t *)mask;
598 uint64_t m0 = rte_be_to_cpu_64(m[0]);
599 uint64_t m1 = rte_be_to_cpu_64(m[1]);
603 status = mask_to_depth(m0, &d0);
607 status = mask_to_depth(m1, &d1);
621 port_mask_to_range(uint16_t port,
629 status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
633 p0 = port & port_mask;
634 p1 = p0 | ~port_mask;
646 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
647 struct pipeline *pipeline __rte_unused,
648 struct softnic_table *table __rte_unused,
649 const struct rte_flow_attr *attr,
650 const struct rte_flow_item *item,
651 struct softnic_table_rule_match *rule_match,
652 struct rte_flow_error *error)
654 union flow_item spec, mask;
655 size_t size, length = 0;
656 int disabled = 0, status;
657 uint8_t ip_proto, ip_proto_mask;
659 memset(rule_match, 0, sizeof(*rule_match));
660 rule_match->match_type = TABLE_ACL;
661 rule_match->match.acl.priority = attr->priority;
663 /* VOID or disabled protos only, if any. */
664 status = flow_item_skip_disabled_protos(&item,
665 FLOW_ITEM_PROTO_IP, &length, error);
670 status = flow_item_proto_preprocess(item, &spec, &mask,
671 &size, &disabled, error);
675 switch (item->type) {
676 case RTE_FLOW_ITEM_TYPE_IPV4:
678 uint32_t sa_depth, da_depth;
680 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
683 return rte_flow_error_set(error,
685 RTE_FLOW_ERROR_TYPE_ITEM,
687 "ACL: Illegal IPv4 header source address mask");
689 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
692 return rte_flow_error_set(error,
694 RTE_FLOW_ERROR_TYPE_ITEM,
696 "ACL: Illegal IPv4 header destination address mask");
698 ip_proto = spec.ipv4.hdr.next_proto_id;
699 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
701 rule_match->match.acl.ip_version = 1;
702 rule_match->match.acl.ipv4.sa =
703 rte_ntohl(spec.ipv4.hdr.src_addr);
704 rule_match->match.acl.ipv4.da =
705 rte_ntohl(spec.ipv4.hdr.dst_addr);
706 rule_match->match.acl.sa_depth = sa_depth;
707 rule_match->match.acl.da_depth = da_depth;
708 rule_match->match.acl.proto = ip_proto;
709 rule_match->match.acl.proto_mask = ip_proto_mask;
711 } /* RTE_FLOW_ITEM_TYPE_IPV4 */
713 case RTE_FLOW_ITEM_TYPE_IPV6:
715 uint32_t sa_depth, da_depth;
717 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
719 return rte_flow_error_set(error,
721 RTE_FLOW_ERROR_TYPE_ITEM,
723 "ACL: Illegal IPv6 header source address mask");
725 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
727 return rte_flow_error_set(error,
729 RTE_FLOW_ERROR_TYPE_ITEM,
731 "ACL: Illegal IPv6 header destination address mask");
733 ip_proto = spec.ipv6.hdr.proto;
734 ip_proto_mask = mask.ipv6.hdr.proto;
736 rule_match->match.acl.ip_version = 0;
737 memcpy(rule_match->match.acl.ipv6.sa,
738 spec.ipv6.hdr.src_addr,
739 sizeof(spec.ipv6.hdr.src_addr));
740 memcpy(rule_match->match.acl.ipv6.da,
741 spec.ipv6.hdr.dst_addr,
742 sizeof(spec.ipv6.hdr.dst_addr));
743 rule_match->match.acl.sa_depth = sa_depth;
744 rule_match->match.acl.da_depth = da_depth;
745 rule_match->match.acl.proto = ip_proto;
746 rule_match->match.acl.proto_mask = ip_proto_mask;
748 } /* RTE_FLOW_ITEM_TYPE_IPV6 */
751 return rte_flow_error_set(error,
753 RTE_FLOW_ERROR_TYPE_ITEM,
755 "ACL: IP protocol required");
758 if (ip_proto_mask != UINT8_MAX)
759 return rte_flow_error_set(error,
761 RTE_FLOW_ERROR_TYPE_ITEM,
763 "ACL: Illegal IP protocol mask");
767 /* VOID only, if any. */
768 flow_item_skip_void(&item);
770 /* TCP/UDP/SCTP only. */
771 status = flow_item_proto_preprocess(item, &spec, &mask,
772 &size, &disabled, error);
776 switch (item->type) {
777 case RTE_FLOW_ITEM_TYPE_TCP:
779 uint16_t sp0, sp1, dp0, dp1;
781 if (ip_proto != IP_PROTOCOL_TCP)
782 return rte_flow_error_set(error,
784 RTE_FLOW_ERROR_TYPE_ITEM,
786 "ACL: Item type is TCP, but IP protocol is not");
788 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
789 rte_ntohs(mask.tcp.hdr.src_port),
794 return rte_flow_error_set(error,
796 RTE_FLOW_ERROR_TYPE_ITEM,
798 "ACL: Illegal TCP source port mask");
800 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
801 rte_ntohs(mask.tcp.hdr.dst_port),
806 return rte_flow_error_set(error,
808 RTE_FLOW_ERROR_TYPE_ITEM,
810 "ACL: Illegal TCP destination port mask");
812 rule_match->match.acl.sp0 = sp0;
813 rule_match->match.acl.sp1 = sp1;
814 rule_match->match.acl.dp0 = dp0;
815 rule_match->match.acl.dp1 = dp1;
818 } /* RTE_FLOW_ITEM_TYPE_TCP */
820 case RTE_FLOW_ITEM_TYPE_UDP:
822 uint16_t sp0, sp1, dp0, dp1;
824 if (ip_proto != IP_PROTOCOL_UDP)
825 return rte_flow_error_set(error,
827 RTE_FLOW_ERROR_TYPE_ITEM,
829 "ACL: Item type is UDP, but IP protocol is not");
831 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
832 rte_ntohs(mask.udp.hdr.src_port),
836 return rte_flow_error_set(error,
838 RTE_FLOW_ERROR_TYPE_ITEM,
840 "ACL: Illegal UDP source port mask");
842 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
843 rte_ntohs(mask.udp.hdr.dst_port),
847 return rte_flow_error_set(error,
849 RTE_FLOW_ERROR_TYPE_ITEM,
851 "ACL: Illegal UDP destination port mask");
853 rule_match->match.acl.sp0 = sp0;
854 rule_match->match.acl.sp1 = sp1;
855 rule_match->match.acl.dp0 = dp0;
856 rule_match->match.acl.dp1 = dp1;
859 } /* RTE_FLOW_ITEM_TYPE_UDP */
861 case RTE_FLOW_ITEM_TYPE_SCTP:
863 uint16_t sp0, sp1, dp0, dp1;
865 if (ip_proto != IP_PROTOCOL_SCTP)
866 return rte_flow_error_set(error,
868 RTE_FLOW_ERROR_TYPE_ITEM,
870 "ACL: Item type is SCTP, but IP protocol is not");
872 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
873 rte_ntohs(mask.sctp.hdr.src_port),
878 return rte_flow_error_set(error,
880 RTE_FLOW_ERROR_TYPE_ITEM,
882 "ACL: Illegal SCTP source port mask");
884 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
885 rte_ntohs(mask.sctp.hdr.dst_port),
889 return rte_flow_error_set(error,
891 RTE_FLOW_ERROR_TYPE_ITEM,
893 "ACL: Illegal SCTP destination port mask");
895 rule_match->match.acl.sp0 = sp0;
896 rule_match->match.acl.sp1 = sp1;
897 rule_match->match.acl.dp0 = dp0;
898 rule_match->match.acl.dp1 = dp1;
901 } /* RTE_FLOW_ITEM_TYPE_SCTP */
904 return rte_flow_error_set(error,
906 RTE_FLOW_ERROR_TYPE_ITEM,
908 "ACL: TCP/UDP/SCTP required");
913 /* VOID or disabled protos only, if any. */
914 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
919 if (item->type != RTE_FLOW_ITEM_TYPE_END)
920 return rte_flow_error_set(error,
922 RTE_FLOW_ERROR_TYPE_ITEM,
924 "ACL: Expecting END item");
930 * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
932 * They are located within a larger buffer at offsets *toffset* and *foffset*
933 * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
935 * Question: are the two masks equivalent?
938 * 1. Offset basically indicates that the first offset bytes in the buffer
939 * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
940 * array of *offset* bytes to the *mask*.
941 * 2. Each *mask* might contain a number of zero bytes at the beginning or
943 * 3. Bytes in the larger buffer after the end of the *mask* are also considered
944 * "don't care", so they are equivalent to appending an "all-zeros" array of
945 * bytes to the *mask*.
948 * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
949 * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
950 * => buffer mask = [00 00 00 22 00 33 00 00]
951 * fmask = [22 00 33], foffset = 3, fsize = 3 =>
952 * => buffer mask = [00 00 00 22 00 33 00 00]
953 * Therefore, the tmask and fmask from this example are equivalent.
956 hash_key_mask_is_same(uint8_t *tmask,
962 size_t *toffset_plus,
963 size_t *foffset_plus)
965 size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
966 size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
968 /* Compute tpos and fpos. */
969 for (tpos = 0; tmask[tpos] == 0; tpos++)
971 for (fpos = 0; fmask[fpos] == 0; fpos++)
974 if (toffset + tpos != foffset + fpos)
975 return 0; /* FALSE */
983 for (i = 0; i < tsize; i++)
984 if (tmask[tpos + i] != fmask[fpos + i])
985 return 0; /* FALSE */
987 for ( ; i < fsize; i++)
989 return 0; /* FALSE */
993 for (i = 0; i < fsize; i++)
994 if (tmask[tpos + i] != fmask[fpos + i])
995 return 0; /* FALSE */
997 for ( ; i < tsize; i++)
999 return 0; /* FALSE */
1003 *toffset_plus = tpos;
1006 *foffset_plus = fpos;
1008 return 1; /* TRUE */
1012 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
1013 struct pipeline *pipeline __rte_unused,
1014 struct softnic_table *table,
1015 const struct rte_flow_attr *attr __rte_unused,
1016 const struct rte_flow_item *item,
1017 struct softnic_table_rule_match *rule_match,
1018 struct rte_flow_error *error)
1020 struct softnic_table_rule_match_hash key, key_mask;
1021 struct softnic_table_hash_params *params = &table->params.match.hash;
1022 size_t offset = 0, length = 0, tpos, fpos;
1025 memset(&key, 0, sizeof(key));
1026 memset(&key_mask, 0, sizeof(key_mask));
1028 /* VOID or disabled protos only, if any. */
1029 status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
1033 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1034 return rte_flow_error_set(error,
1036 RTE_FLOW_ERROR_TYPE_ITEM,
1038 "HASH: END detected too early");
1040 /* VOID or any protocols (enabled or disabled). */
1041 for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1042 union flow_item spec, mask;
1044 int disabled, status;
1046 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1049 status = flow_item_proto_preprocess(item,
1058 if (length + size > sizeof(key)) {
1062 return rte_flow_error_set(error,
1064 RTE_FLOW_ERROR_TYPE_ITEM,
1066 "HASH: Item too big");
1069 memcpy(&key.key[length], &spec, size);
1070 memcpy(&key_mask.key[length], &mask, size);
1074 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1075 /* VOID or disabled protos only, if any. */
1076 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
1081 if (item->type != RTE_FLOW_ITEM_TYPE_END)
1082 return rte_flow_error_set(error,
1084 RTE_FLOW_ERROR_TYPE_ITEM,
1086 "HASH: Expecting END item");
1089 /* Compare flow key mask against table key mask. */
1090 offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
1092 if (!hash_key_mask_is_same(params->key_mask,
1100 return rte_flow_error_set(error,
1102 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1104 "HASH: Item list is not observing the match format");
1107 memset(rule_match, 0, sizeof(*rule_match));
1108 rule_match->match_type = TABLE_HASH;
1109 memcpy(&rule_match->match.hash.key[tpos],
1111 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
1118 flow_rule_match_get(struct pmd_internals *softnic,
1119 struct pipeline *pipeline,
1120 struct softnic_table *table,
1121 const struct rte_flow_attr *attr,
1122 const struct rte_flow_item *item,
1123 struct softnic_table_rule_match *rule_match,
1124 struct rte_flow_error *error)
1126 switch (table->params.match_type) {
1128 return flow_rule_match_acl_get(softnic,
1139 return flow_rule_match_hash_get(softnic,
1150 return rte_flow_error_set(error,
1152 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1154 "Unsupported pipeline table match type");
1159 flow_rule_action_get(struct pmd_internals *softnic,
1160 struct pipeline *pipeline,
1161 struct softnic_table *table,
1162 const struct rte_flow_attr *attr,
1163 const struct rte_flow_action *action,
1164 struct softnic_table_rule_action *rule_action,
1165 struct rte_flow_error *error)
1167 struct softnic_table_action_profile *profile;
1168 struct softnic_table_action_profile_params *params;
1169 int n_jump_queue_rss_drop = 0;
1172 int n_vxlan_decap = 0;
1174 profile = softnic_table_action_profile_find(softnic,
1175 table->params.action_profile_name);
1176 if (profile == NULL)
1177 return rte_flow_error_set(error,
1179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1181 "JUMP: Table action profile");
1183 params = &profile->params;
1185 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1186 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1189 switch (action->type) {
1190 case RTE_FLOW_ACTION_TYPE_JUMP:
1192 const struct rte_flow_action_jump *conf = action->conf;
1193 struct flow_attr_map *map;
1196 return rte_flow_error_set(error,
1198 RTE_FLOW_ERROR_TYPE_ACTION,
1200 "JUMP: Null configuration");
1202 if (n_jump_queue_rss_drop)
1203 return rte_flow_error_set(error,
1205 RTE_FLOW_ERROR_TYPE_ACTION,
1207 "Only one termination action is"
1208 " allowed per flow");
1210 if ((params->action_mask &
1211 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1212 return rte_flow_error_set(error,
1214 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1216 "JUMP action not enabled for this table");
1218 n_jump_queue_rss_drop = 1;
1220 map = flow_attr_map_get(softnic,
1223 if (map == NULL || map->valid == 0)
1224 return rte_flow_error_set(error,
1226 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1228 "JUMP: Invalid group mapping");
1230 if (strcmp(pipeline->name, map->pipeline_name) != 0)
1231 return rte_flow_error_set(error,
1233 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1235 "JUMP: Jump to table in different pipeline");
1237 /* RTE_TABLE_ACTION_FWD */
1238 rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
1239 rule_action->fwd.id = map->table_id;
1240 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1242 } /* RTE_FLOW_ACTION_TYPE_JUMP */
1244 case RTE_FLOW_ACTION_TYPE_QUEUE:
1246 char name[NAME_SIZE];
1247 struct rte_eth_dev *dev;
1248 const struct rte_flow_action_queue *conf = action->conf;
1253 return rte_flow_error_set(error,
1255 RTE_FLOW_ERROR_TYPE_ACTION,
1257 "QUEUE: Null configuration");
1259 if (n_jump_queue_rss_drop)
1260 return rte_flow_error_set(error,
1262 RTE_FLOW_ERROR_TYPE_ACTION,
1264 "Only one termination action is allowed"
1267 if ((params->action_mask &
1268 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1269 return rte_flow_error_set(error,
1271 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1273 "QUEUE action not enabled for this table");
1275 n_jump_queue_rss_drop = 1;
1277 dev = ETHDEV(softnic);
1279 conf->index >= dev->data->nb_rx_queues)
1280 return rte_flow_error_set(error,
1282 RTE_FLOW_ERROR_TYPE_ACTION,
1284 "QUEUE: Invalid RX queue ID");
1286 sprintf(name, "RXQ%u", (uint32_t)conf->index);
1288 status = softnic_pipeline_port_out_find(softnic,
1293 return rte_flow_error_set(error,
1295 RTE_FLOW_ERROR_TYPE_ACTION,
1297 "QUEUE: RX queue not accessible from this pipeline");
1299 /* RTE_TABLE_ACTION_FWD */
1300 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
1301 rule_action->fwd.id = port_id;
1302 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1304 } /*RTE_FLOW_ACTION_TYPE_QUEUE */
1306 case RTE_FLOW_ACTION_TYPE_RSS:
1308 const struct rte_flow_action_rss *conf = action->conf;
1312 return rte_flow_error_set(error,
1314 RTE_FLOW_ERROR_TYPE_ACTION,
1316 "RSS: Null configuration");
1318 if (!rte_is_power_of_2(conf->queue_num))
1319 return rte_flow_error_set(error,
1321 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1323 "RSS: Number of queues must be a power of 2");
1325 if (conf->queue_num > RTE_DIM(rule_action->lb.out))
1326 return rte_flow_error_set(error,
1328 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1330 "RSS: Number of queues too big");
1332 if (n_jump_queue_rss_drop)
1333 return rte_flow_error_set(error,
1335 RTE_FLOW_ERROR_TYPE_ACTION,
1337 "Only one termination action is allowed per flow");
1339 if (((params->action_mask &
1340 (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
1341 ((params->action_mask &
1342 (1LLU << RTE_TABLE_ACTION_LB)) == 0))
1343 return rte_flow_error_set(error,
1345 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1347 "RSS action not supported by this table");
1349 if (params->lb.out_offset !=
1350 pipeline->params.offset_port_id)
1351 return rte_flow_error_set(error,
1353 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1355 "RSS action not supported by this pipeline");
1357 n_jump_queue_rss_drop = 1;
1359 /* RTE_TABLE_ACTION_LB */
1360 for (i = 0; i < conf->queue_num; i++) {
1361 char name[NAME_SIZE];
1362 struct rte_eth_dev *dev;
1366 dev = ETHDEV(softnic);
1369 dev->data->nb_rx_queues)
1370 return rte_flow_error_set(error,
1372 RTE_FLOW_ERROR_TYPE_ACTION,
1374 "RSS: Invalid RX queue ID");
1376 sprintf(name, "RXQ%u",
1377 (uint32_t)conf->queue[i]);
1379 status = softnic_pipeline_port_out_find(softnic,
1384 return rte_flow_error_set(error,
1386 RTE_FLOW_ERROR_TYPE_ACTION,
1388 "RSS: RX queue not accessible from this pipeline");
1390 rule_action->lb.out[i] = port_id;
1393 for ( ; i < RTE_DIM(rule_action->lb.out); i++)
1394 rule_action->lb.out[i] =
1395 rule_action->lb.out[i % conf->queue_num];
1397 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
1399 /* RTE_TABLE_ACTION_FWD */
1400 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
1401 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1403 } /* RTE_FLOW_ACTION_TYPE_RSS */
1405 case RTE_FLOW_ACTION_TYPE_DROP:
1407 const void *conf = action->conf;
1410 return rte_flow_error_set(error,
1412 RTE_FLOW_ERROR_TYPE_ACTION,
1414 "DROP: No configuration required");
1416 if (n_jump_queue_rss_drop)
1417 return rte_flow_error_set(error,
1419 RTE_FLOW_ERROR_TYPE_ACTION,
1421 "Only one termination action is allowed per flow");
1422 if ((params->action_mask &
1423 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1424 return rte_flow_error_set(error,
1426 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1428 "DROP action not supported by this table");
1430 n_jump_queue_rss_drop = 1;
1432 /* RTE_TABLE_ACTION_FWD */
1433 rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
1434 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1436 } /* RTE_FLOW_ACTION_TYPE_DROP */
1438 case RTE_FLOW_ACTION_TYPE_COUNT:
1440 const struct rte_flow_action_count *conf = action->conf;
1443 return rte_flow_error_set(error,
1445 RTE_FLOW_ERROR_TYPE_ACTION,
1447 "COUNT: Null configuration");
1450 return rte_flow_error_set(error,
1452 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1454 "COUNT: Shared counters not supported");
1457 return rte_flow_error_set(error,
1459 RTE_FLOW_ERROR_TYPE_ACTION,
1461 "Only one COUNT action per flow");
1463 if ((params->action_mask &
1464 (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
1465 return rte_flow_error_set(error,
1467 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1469 "COUNT action not supported by this table");
1473 /* RTE_TABLE_ACTION_STATS */
1474 rule_action->stats.n_packets = 0;
1475 rule_action->stats.n_bytes = 0;
1476 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
1478 } /* RTE_FLOW_ACTION_TYPE_COUNT */
1480 case RTE_FLOW_ACTION_TYPE_MARK:
1482 const struct rte_flow_action_mark *conf = action->conf;
1485 return rte_flow_error_set(error,
1487 RTE_FLOW_ERROR_TYPE_ACTION,
1489 "MARK: Null configuration");
1492 return rte_flow_error_set(error,
1494 RTE_FLOW_ERROR_TYPE_ACTION,
1496 "Only one MARK action per flow");
1498 if ((params->action_mask &
1499 (1LLU << RTE_TABLE_ACTION_TAG)) == 0)
1500 return rte_flow_error_set(error,
1502 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1504 "MARK action not supported by this table");
1508 /* RTE_TABLE_ACTION_TAG */
1509 rule_action->tag.tag = conf->id;
1510 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
1512 } /* RTE_FLOW_ACTION_TYPE_MARK */
1514 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1516 const struct rte_flow_action_mark *conf = action->conf;
1519 return rte_flow_error_set(error,
1521 RTE_FLOW_ERROR_TYPE_ACTION,
1523 "VXLAN DECAP: Non-null configuration");
1526 return rte_flow_error_set(error,
1528 RTE_FLOW_ERROR_TYPE_ACTION,
1530 "Only one VXLAN DECAP action per flow");
1532 if ((params->action_mask &
1533 (1LLU << RTE_TABLE_ACTION_DECAP)) == 0)
1534 return rte_flow_error_set(error,
1536 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1538 "VXLAN DECAP action not supported by this table");
1542 /* RTE_TABLE_ACTION_DECAP */
1543 rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */
1544 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
1546 } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */
1548 case RTE_FLOW_ACTION_TYPE_METER:
1550 const struct rte_flow_action_meter *conf = action->conf;
1551 struct softnic_mtr_meter_profile *mp;
1552 struct softnic_mtr *m;
1553 uint32_t table_id = table - pipeline->table;
1554 uint32_t meter_profile_id;
1557 if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0)
1558 return rte_flow_error_set(error,
1560 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1562 "METER: Table action not supported");
1564 if (params->mtr.n_tc != 1)
1565 return rte_flow_error_set(error,
1567 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1569 "METER: Multiple TCs not supported");
1572 return rte_flow_error_set(error,
1574 RTE_FLOW_ERROR_TYPE_ACTION,
1576 "METER: Null configuration");
1578 m = softnic_mtr_find(softnic, conf->mtr_id);
1581 return rte_flow_error_set(error,
1583 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1585 "METER: Invalid meter ID");
1588 return rte_flow_error_set(error,
1590 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1592 "METER: Meter already attached to a flow");
1594 meter_profile_id = m->params.meter_profile_id;
1595 mp = softnic_mtr_meter_profile_find(softnic, meter_profile_id);
1597 /* Add meter profile to pipeline table */
1598 if (!softnic_pipeline_table_meter_profile_find(table,
1599 meter_profile_id)) {
1600 struct rte_table_action_meter_profile profile;
1602 memset(&profile, 0, sizeof(profile));
1603 profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
1604 profile.trtcm.cir = mp->params.trtcm_rfc2698.cir;
1605 profile.trtcm.pir = mp->params.trtcm_rfc2698.pir;
1606 profile.trtcm.cbs = mp->params.trtcm_rfc2698.cbs;
1607 profile.trtcm.pbs = mp->params.trtcm_rfc2698.pbs;
1609 status = softnic_pipeline_table_mtr_profile_add(softnic,
1615 rte_flow_error_set(error,
1617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1619 "METER: Table meter profile add failed");
1624 /* RTE_TABLE_ACTION_METER */
1625 rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id;
1626 rule_action->mtr.mtr[0].policer[e_RTE_METER_GREEN] =
1627 (enum rte_table_action_policer)m->params.action[RTE_MTR_GREEN];
1628 rule_action->mtr.mtr[0].policer[e_RTE_METER_YELLOW] =
1629 (enum rte_table_action_policer)m->params.action[RTE_MTR_YELLOW];
1630 rule_action->mtr.mtr[0].policer[e_RTE_METER_RED] =
1631 (enum rte_table_action_policer)m->params.action[RTE_MTR_RED];
1632 rule_action->mtr.tc_mask = 1;
1633 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
1635 } /* RTE_FLOW_ACTION_TYPE_METER */
1642 if (n_jump_queue_rss_drop == 0)
1643 return rte_flow_error_set(error,
1645 RTE_FLOW_ERROR_TYPE_ACTION,
1647 "Flow does not have any terminating action");
1653 pmd_flow_validate(struct rte_eth_dev *dev,
1654 const struct rte_flow_attr *attr,
1655 const struct rte_flow_item item[],
1656 const struct rte_flow_action action[],
1657 struct rte_flow_error *error)
1659 struct softnic_table_rule_match rule_match;
1660 struct softnic_table_rule_action rule_action;
1662 struct pmd_internals *softnic = dev->data->dev_private;
1663 struct pipeline *pipeline;
1664 struct softnic_table *table;
1665 const char *pipeline_name = NULL;
1666 uint32_t table_id = 0;
1669 /* Check input parameters. */
1671 return rte_flow_error_set(error,
1673 RTE_FLOW_ERROR_TYPE_ATTR,
1677 return rte_flow_error_set(error,
1679 RTE_FLOW_ERROR_TYPE_ITEM,
1684 return rte_flow_error_set(error,
1686 RTE_FLOW_ERROR_TYPE_ACTION,
1690 /* Identify the pipeline table to add this flow to. */
1691 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1696 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1697 if (pipeline == NULL)
1698 return rte_flow_error_set(error,
1700 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1702 "Invalid pipeline name");
1704 if (table_id >= pipeline->n_tables)
1705 return rte_flow_error_set(error,
1707 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1709 "Invalid pipeline table ID");
1711 table = &pipeline->table[table_id];
1714 memset(&rule_match, 0, sizeof(rule_match));
1715 status = flow_rule_match_get(softnic,
1726 memset(&rule_action, 0, sizeof(rule_action));
1727 status = flow_rule_action_get(softnic,
1740 static struct softnic_mtr *
1741 flow_action_meter_get(struct pmd_internals *softnic,
1742 const struct rte_flow_action *action)
1744 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++)
1745 if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1746 const struct rte_flow_action_meter *conf = action->conf;
1751 return softnic_mtr_find(softnic, conf->mtr_id);
1758 flow_meter_owner_reset(struct pmd_internals *softnic,
1759 struct rte_flow *flow)
1761 struct softnic_mtr_list *ml = &softnic->mtr.mtrs;
1762 struct softnic_mtr *m;
1764 TAILQ_FOREACH(m, ml, node)
1765 if (m->flow == flow) {
1772 flow_meter_owner_set(struct pmd_internals *softnic,
1773 struct rte_flow *flow,
1774 struct softnic_mtr *mtr)
1776 /* Reset current flow meter */
1777 flow_meter_owner_reset(softnic, flow);
1779 /* Set new flow meter */
1784 is_meter_action_enable(struct pmd_internals *softnic,
1785 struct softnic_table *table)
1787 struct softnic_table_action_profile *profile =
1788 softnic_table_action_profile_find(softnic,
1789 table->params.action_profile_name);
1790 struct softnic_table_action_profile_params *params = &profile->params;
1792 return (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) ? 1 : 0;
1795 static struct rte_flow *
1796 pmd_flow_create(struct rte_eth_dev *dev,
1797 const struct rte_flow_attr *attr,
1798 const struct rte_flow_item item[],
1799 const struct rte_flow_action action[],
1800 struct rte_flow_error *error)
1802 struct softnic_table_rule_match rule_match;
1803 struct softnic_table_rule_action rule_action;
1806 struct pmd_internals *softnic = dev->data->dev_private;
1807 struct pipeline *pipeline;
1808 struct softnic_table *table;
1809 struct rte_flow *flow;
1810 struct softnic_mtr *mtr;
1811 const char *pipeline_name = NULL;
1812 uint32_t table_id = 0;
1813 int new_flow, status;
1815 /* Check input parameters. */
1817 rte_flow_error_set(error,
1819 RTE_FLOW_ERROR_TYPE_ATTR,
1826 rte_flow_error_set(error,
1828 RTE_FLOW_ERROR_TYPE_ITEM,
1834 if (action == NULL) {
1835 rte_flow_error_set(error,
1837 RTE_FLOW_ERROR_TYPE_ACTION,
1843 /* Identify the pipeline table to add this flow to. */
1844 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1849 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1850 if (pipeline == NULL) {
1851 rte_flow_error_set(error,
1853 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1855 "Invalid pipeline name");
1859 if (table_id >= pipeline->n_tables) {
1860 rte_flow_error_set(error,
1862 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1864 "Invalid pipeline table ID");
1868 table = &pipeline->table[table_id];
1871 memset(&rule_match, 0, sizeof(rule_match));
1872 status = flow_rule_match_get(softnic,
1883 memset(&rule_action, 0, sizeof(rule_action));
1884 status = flow_rule_action_get(softnic,
1894 /* Flow find/allocate. */
1896 flow = softnic_flow_find(table, &rule_match);
1899 flow = calloc(1, sizeof(struct rte_flow));
1901 rte_flow_error_set(error,
1903 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905 "Not enough memory for new flow");
1911 status = softnic_pipeline_table_rule_add(softnic,
1921 rte_flow_error_set(error,
1923 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1925 "Pipeline table rule add failed");
1930 memcpy(&flow->match, &rule_match, sizeof(rule_match));
1931 memcpy(&flow->action, &rule_action, sizeof(rule_action));
1932 flow->data = rule_data;
1933 flow->pipeline = pipeline;
1934 flow->table_id = table_id;
1936 mtr = flow_action_meter_get(softnic, action);
1938 flow_meter_owner_set(softnic, flow, mtr);
1940 /* Flow add to list. */
1942 TAILQ_INSERT_TAIL(&table->flows, flow, node);
1948 pmd_flow_destroy(struct rte_eth_dev *dev,
1949 struct rte_flow *flow,
1950 struct rte_flow_error *error)
1952 struct pmd_internals *softnic = dev->data->dev_private;
1953 struct softnic_table *table;
1956 /* Check input parameters. */
1958 return rte_flow_error_set(error,
1960 RTE_FLOW_ERROR_TYPE_HANDLE,
1964 table = &flow->pipeline->table[flow->table_id];
1967 status = softnic_pipeline_table_rule_delete(softnic,
1968 flow->pipeline->name,
1972 return rte_flow_error_set(error,
1974 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1976 "Pipeline table rule delete failed");
1978 /* Update dependencies */
1979 if (is_meter_action_enable(softnic, table))
1980 flow_meter_owner_reset(softnic, flow);
1983 TAILQ_REMOVE(&table->flows, flow, node);
1990 pmd_flow_flush(struct rte_eth_dev *dev,
1991 struct rte_flow_error *error)
1993 struct pmd_internals *softnic = dev->data->dev_private;
1994 struct pipeline *pipeline;
1995 int fail_to_del_rule = 0;
1998 TAILQ_FOREACH(pipeline, &softnic->pipeline_list, node) {
1999 /* Remove all the flows added to the tables. */
2000 for (i = 0; i < pipeline->n_tables; i++) {
2001 struct softnic_table *table = &pipeline->table[i];
2002 struct rte_flow *flow;
2006 TAILQ_FOREACH_SAFE(flow, &table->flows, node, temp) {
2008 status = softnic_pipeline_table_rule_delete
2014 fail_to_del_rule = 1;
2015 /* Update dependencies */
2016 if (is_meter_action_enable(softnic, table))
2017 flow_meter_owner_reset(softnic, flow);
2020 TAILQ_REMOVE(&table->flows, flow, node);
2026 if (fail_to_del_rule)
2027 return rte_flow_error_set(error,
2029 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2031 "Some of the rules could not be deleted");
2037 pmd_flow_query(struct rte_eth_dev *dev __rte_unused,
2038 struct rte_flow *flow,
2039 const struct rte_flow_action *action __rte_unused,
2041 struct rte_flow_error *error)
2043 struct rte_table_action_stats_counters stats;
2044 struct softnic_table *table;
2045 struct rte_flow_query_count *flow_stats = data;
2048 /* Check input parameters. */
2050 return rte_flow_error_set(error,
2052 RTE_FLOW_ERROR_TYPE_HANDLE,
2057 return rte_flow_error_set(error,
2059 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2063 table = &flow->pipeline->table[flow->table_id];
2065 /* Rule stats read. */
2066 status = rte_table_action_stats_read(table->a,
2071 return rte_flow_error_set(error,
2073 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2075 "Pipeline table rule stats read failed");
2077 /* Fill in flow stats. */
2078 flow_stats->hits_set =
2079 (table->ap->params.stats.n_packets_enabled) ? 1 : 0;
2080 flow_stats->bytes_set =
2081 (table->ap->params.stats.n_bytes_enabled) ? 1 : 0;
2082 flow_stats->hits = stats.n_packets;
2083 flow_stats->bytes = stats.n_bytes;
2088 const struct rte_flow_ops pmd_flow_ops = {
2089 .validate = pmd_flow_validate,
2090 .create = pmd_flow_create,
2091 .destroy = pmd_flow_destroy,
2092 .flush = pmd_flow_flush,
2093 .query = pmd_flow_query,