1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
11 #include <rte_string_fns.h>
13 #include <rte_flow_driver.h>
15 #include "rte_eth_softnic_internals.h"
16 #include "rte_eth_softnic.h"
18 #define rte_htons rte_cpu_to_be_16
19 #define rte_htonl rte_cpu_to_be_32
21 #define rte_ntohs rte_be_to_cpu_16
22 #define rte_ntohl rte_be_to_cpu_32
24 static struct rte_flow *
25 softnic_flow_find(struct softnic_table *table,
26 struct softnic_table_rule_match *rule_match)
28 struct rte_flow *flow;
30 TAILQ_FOREACH(flow, &table->flows, node)
31 if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
38 flow_attr_map_set(struct pmd_internals *softnic,
41 const char *pipeline_name,
44 struct pipeline *pipeline;
45 struct flow_attr_map *map;
47 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
48 pipeline_name == NULL)
51 pipeline = softnic_pipeline_find(softnic, pipeline_name);
52 if (pipeline == NULL ||
53 table_id >= pipeline->n_tables)
56 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
57 &softnic->flow.egress_map[group_id];
58 strcpy(map->pipeline_name, pipeline_name);
59 map->table_id = table_id;
65 struct flow_attr_map *
66 flow_attr_map_get(struct pmd_internals *softnic,
70 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
73 return (ingress) ? &softnic->flow.ingress_map[group_id] :
74 &softnic->flow.egress_map[group_id];
78 flow_pipeline_table_get(struct pmd_internals *softnic,
79 const struct rte_flow_attr *attr,
80 const char **pipeline_name,
82 struct rte_flow_error *error)
84 struct flow_attr_map *map;
87 return rte_flow_error_set(error,
89 RTE_FLOW_ERROR_TYPE_ATTR,
93 if (!attr->ingress && !attr->egress)
94 return rte_flow_error_set(error,
96 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
98 "Ingress/egress not specified");
100 if (attr->ingress && attr->egress)
101 return rte_flow_error_set(error,
103 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
105 "Setting both ingress and egress is not allowed");
107 map = flow_attr_map_get(softnic,
112 return rte_flow_error_set(error,
114 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
119 *pipeline_name = map->pipeline_name;
122 *table_id = map->table_id;
128 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
129 struct rte_flow_item_eth eth;
130 struct rte_flow_item_vlan vlan;
131 struct rte_flow_item_ipv4 ipv4;
132 struct rte_flow_item_ipv6 ipv6;
133 struct rte_flow_item_icmp icmp;
134 struct rte_flow_item_udp udp;
135 struct rte_flow_item_tcp tcp;
136 struct rte_flow_item_sctp sctp;
137 struct rte_flow_item_vxlan vxlan;
138 struct rte_flow_item_e_tag e_tag;
139 struct rte_flow_item_nvgre nvgre;
140 struct rte_flow_item_mpls mpls;
141 struct rte_flow_item_gre gre;
142 struct rte_flow_item_gtp gtp;
143 struct rte_flow_item_esp esp;
144 struct rte_flow_item_geneve geneve;
145 struct rte_flow_item_vxlan_gpe vxlan_gpe;
146 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
147 struct rte_flow_item_ipv6_ext ipv6_ext;
148 struct rte_flow_item_icmp6 icmp6;
149 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
150 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
151 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
152 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
153 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
156 static const union flow_item flow_item_raw_mask;
159 flow_item_is_proto(enum rte_flow_item_type type,
164 case RTE_FLOW_ITEM_TYPE_RAW:
165 *mask = &flow_item_raw_mask;
166 *size = sizeof(flow_item_raw_mask);
169 case RTE_FLOW_ITEM_TYPE_ETH:
170 *mask = &rte_flow_item_eth_mask;
171 *size = sizeof(struct rte_flow_item_eth);
174 case RTE_FLOW_ITEM_TYPE_VLAN:
175 *mask = &rte_flow_item_vlan_mask;
176 *size = sizeof(struct rte_flow_item_vlan);
179 case RTE_FLOW_ITEM_TYPE_IPV4:
180 *mask = &rte_flow_item_ipv4_mask;
181 *size = sizeof(struct rte_flow_item_ipv4);
184 case RTE_FLOW_ITEM_TYPE_IPV6:
185 *mask = &rte_flow_item_ipv6_mask;
186 *size = sizeof(struct rte_flow_item_ipv6);
189 case RTE_FLOW_ITEM_TYPE_ICMP:
190 *mask = &rte_flow_item_icmp_mask;
191 *size = sizeof(struct rte_flow_item_icmp);
194 case RTE_FLOW_ITEM_TYPE_UDP:
195 *mask = &rte_flow_item_udp_mask;
196 *size = sizeof(struct rte_flow_item_udp);
199 case RTE_FLOW_ITEM_TYPE_TCP:
200 *mask = &rte_flow_item_tcp_mask;
201 *size = sizeof(struct rte_flow_item_tcp);
204 case RTE_FLOW_ITEM_TYPE_SCTP:
205 *mask = &rte_flow_item_sctp_mask;
206 *size = sizeof(struct rte_flow_item_sctp);
209 case RTE_FLOW_ITEM_TYPE_VXLAN:
210 *mask = &rte_flow_item_vxlan_mask;
211 *size = sizeof(struct rte_flow_item_vxlan);
214 case RTE_FLOW_ITEM_TYPE_E_TAG:
215 *mask = &rte_flow_item_e_tag_mask;
216 *size = sizeof(struct rte_flow_item_e_tag);
219 case RTE_FLOW_ITEM_TYPE_NVGRE:
220 *mask = &rte_flow_item_nvgre_mask;
221 *size = sizeof(struct rte_flow_item_nvgre);
224 case RTE_FLOW_ITEM_TYPE_MPLS:
225 *mask = &rte_flow_item_mpls_mask;
226 *size = sizeof(struct rte_flow_item_mpls);
229 case RTE_FLOW_ITEM_TYPE_GRE:
230 *mask = &rte_flow_item_gre_mask;
231 *size = sizeof(struct rte_flow_item_gre);
234 case RTE_FLOW_ITEM_TYPE_GTP:
235 case RTE_FLOW_ITEM_TYPE_GTPC:
236 case RTE_FLOW_ITEM_TYPE_GTPU:
237 *mask = &rte_flow_item_gtp_mask;
238 *size = sizeof(struct rte_flow_item_gtp);
241 case RTE_FLOW_ITEM_TYPE_ESP:
242 *mask = &rte_flow_item_esp_mask;
243 *size = sizeof(struct rte_flow_item_esp);
246 case RTE_FLOW_ITEM_TYPE_GENEVE:
247 *mask = &rte_flow_item_geneve_mask;
248 *size = sizeof(struct rte_flow_item_geneve);
251 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
252 *mask = &rte_flow_item_vxlan_gpe_mask;
253 *size = sizeof(struct rte_flow_item_vxlan_gpe);
256 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
257 *mask = &rte_flow_item_arp_eth_ipv4_mask;
258 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
261 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
262 *mask = &rte_flow_item_ipv6_ext_mask;
263 *size = sizeof(struct rte_flow_item_ipv6_ext);
266 case RTE_FLOW_ITEM_TYPE_ICMP6:
267 *mask = &rte_flow_item_icmp6_mask;
268 *size = sizeof(struct rte_flow_item_icmp6);
271 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
272 *mask = &rte_flow_item_icmp6_nd_ns_mask;
273 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
276 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
277 *mask = &rte_flow_item_icmp6_nd_na_mask;
278 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
281 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
282 *mask = &rte_flow_item_icmp6_nd_opt_mask;
283 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
286 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
287 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
288 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
291 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
292 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
293 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
296 default: return 0; /* FALSE */
301 flow_item_raw_preprocess(const struct rte_flow_item *item,
302 union flow_item *item_spec,
303 union flow_item *item_mask,
306 struct rte_flow_error *error)
308 const struct rte_flow_item_raw *item_raw_spec = item->spec;
309 const struct rte_flow_item_raw *item_raw_mask = item->mask;
310 const uint8_t *pattern;
311 const uint8_t *pattern_mask;
312 uint8_t *spec = (uint8_t *)item_spec;
313 uint8_t *mask = (uint8_t *)item_mask;
314 size_t pattern_length, pattern_offset, i;
318 return rte_flow_error_set(error,
320 RTE_FLOW_ERROR_TYPE_ITEM,
322 "RAW: Null specification");
325 return rte_flow_error_set(error,
327 RTE_FLOW_ERROR_TYPE_ITEM,
329 "RAW: Range not allowed (last must be NULL)");
331 if (item_raw_spec->relative == 0)
332 return rte_flow_error_set(error,
334 RTE_FLOW_ERROR_TYPE_ITEM,
336 "RAW: Absolute offset not supported");
338 if (item_raw_spec->search)
339 return rte_flow_error_set(error,
341 RTE_FLOW_ERROR_TYPE_ITEM,
343 "RAW: Search not supported");
345 if (item_raw_spec->offset < 0)
346 return rte_flow_error_set(error,
347 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
349 "RAW: Negative offset not supported");
351 if (item_raw_spec->length == 0)
352 return rte_flow_error_set(error,
354 RTE_FLOW_ERROR_TYPE_ITEM,
356 "RAW: Zero pattern length");
358 if (item_raw_spec->offset + item_raw_spec->length >
359 TABLE_RULE_MATCH_SIZE_MAX)
360 return rte_flow_error_set(error,
362 RTE_FLOW_ERROR_TYPE_ITEM,
364 "RAW: Item too big");
366 if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern)
367 return rte_flow_error_set(error,
369 RTE_FLOW_ERROR_TYPE_ITEM,
371 "RAW: Non-NULL pattern mask not allowed with NULL pattern");
373 pattern = item_raw_spec->pattern;
374 pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL;
375 pattern_length = (size_t)item_raw_spec->length;
376 pattern_offset = (size_t)item_raw_spec->offset;
379 if (pattern_mask == NULL)
382 for (i = 0; i < pattern_length; i++)
386 memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX);
388 memcpy(&spec[pattern_offset], pattern, pattern_length);
390 memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX);
392 memcpy(&mask[pattern_offset], pattern_mask, pattern_length);
394 *item_size = pattern_offset + pattern_length;
395 *item_disabled = disabled;
401 flow_item_proto_preprocess(const struct rte_flow_item *item,
402 union flow_item *item_spec,
403 union flow_item *item_mask,
406 struct rte_flow_error *error)
408 const void *mask_default;
409 uint8_t *spec = (uint8_t *)item_spec;
410 uint8_t *mask = (uint8_t *)item_mask;
413 if (!flow_item_is_proto(item->type, &mask_default, &size))
414 return rte_flow_error_set(error,
416 RTE_FLOW_ERROR_TYPE_ITEM,
418 "Item type not supported");
420 if (item->type == RTE_FLOW_ITEM_TYPE_RAW)
421 return flow_item_raw_preprocess(item,
430 /* If spec is NULL, then last and mask also have to be NULL. */
431 if (item->last || item->mask)
432 return rte_flow_error_set(error,
434 RTE_FLOW_ERROR_TYPE_ITEM,
436 "Invalid item (NULL spec with non-NULL last or mask)");
438 memset(item_spec, 0, size);
439 memset(item_mask, 0, size);
441 *item_disabled = 1; /* TRUE */
445 memcpy(spec, item->spec, size);
450 memcpy(mask, item->mask, size);
452 memcpy(mask, mask_default, size);
455 for (i = 0; i < size; i++)
458 *item_disabled = (i == size) ? 1 : 0;
460 /* Apply mask over spec. */
461 for (i = 0; i < size; i++)
469 memcpy(last, item->last, size);
470 for (i = 0; i < size; i++)
473 /* check for range */
474 for (i = 0; i < size; i++)
475 if (last[i] != spec[i])
476 return rte_flow_error_set(error,
478 RTE_FLOW_ERROR_TYPE_ITEM,
480 "Range not supported");
487 * Skip disabled protocol items and VOID items
488 * until any of the mutually exclusive conditions
489 * from the list below takes place:
490 * (A) A protocol present in the proto_mask
491 * is met (either ENABLED or DISABLED);
492 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
493 * (C) The END item is met.
496 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
499 struct rte_flow_error *error)
503 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
504 union flow_item spec, mask;
506 int disabled = 0, status;
508 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
511 status = flow_item_proto_preprocess(*item,
520 if ((proto_mask & (1LLU << (*item)->type)) ||
533 #define FLOW_ITEM_PROTO_IP \
534 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
535 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
538 flow_item_skip_void(const struct rte_flow_item **item)
541 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
545 #define IP_PROTOCOL_TCP 0x06
546 #define IP_PROTOCOL_UDP 0x11
547 #define IP_PROTOCOL_SCTP 0x84
550 mask_to_depth(uint64_t mask,
555 if (mask == UINT64_MAX) {
564 if (mask & (mask + 1))
567 n = __builtin_popcountll(mask);
569 *depth = (uint32_t)(64 - n);
575 ipv4_mask_to_depth(uint32_t mask,
581 status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
593 ipv6_mask_to_depth(uint8_t *mask,
596 uint64_t *m = (uint64_t *)mask;
597 uint64_t m0 = rte_be_to_cpu_64(m[0]);
598 uint64_t m1 = rte_be_to_cpu_64(m[1]);
602 status = mask_to_depth(m0, &d0);
606 status = mask_to_depth(m1, &d1);
620 port_mask_to_range(uint16_t port,
628 status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
632 p0 = port & port_mask;
633 p1 = p0 | ~port_mask;
645 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
646 struct pipeline *pipeline __rte_unused,
647 struct softnic_table *table __rte_unused,
648 const struct rte_flow_attr *attr,
649 const struct rte_flow_item *item,
650 struct softnic_table_rule_match *rule_match,
651 struct rte_flow_error *error)
653 union flow_item spec, mask;
654 size_t size, length = 0;
655 int disabled = 0, status;
656 uint8_t ip_proto, ip_proto_mask;
658 memset(rule_match, 0, sizeof(*rule_match));
659 rule_match->match_type = TABLE_ACL;
660 rule_match->match.acl.priority = attr->priority;
662 /* VOID or disabled protos only, if any. */
663 status = flow_item_skip_disabled_protos(&item,
664 FLOW_ITEM_PROTO_IP, &length, error);
669 status = flow_item_proto_preprocess(item, &spec, &mask,
670 &size, &disabled, error);
674 switch (item->type) {
675 case RTE_FLOW_ITEM_TYPE_IPV4:
677 uint32_t sa_depth, da_depth;
679 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
682 return rte_flow_error_set(error,
684 RTE_FLOW_ERROR_TYPE_ITEM,
686 "ACL: Illegal IPv4 header source address mask");
688 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
691 return rte_flow_error_set(error,
693 RTE_FLOW_ERROR_TYPE_ITEM,
695 "ACL: Illegal IPv4 header destination address mask");
697 ip_proto = spec.ipv4.hdr.next_proto_id;
698 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
700 rule_match->match.acl.ip_version = 1;
701 rule_match->match.acl.ipv4.sa =
702 rte_ntohl(spec.ipv4.hdr.src_addr);
703 rule_match->match.acl.ipv4.da =
704 rte_ntohl(spec.ipv4.hdr.dst_addr);
705 rule_match->match.acl.sa_depth = sa_depth;
706 rule_match->match.acl.da_depth = da_depth;
707 rule_match->match.acl.proto = ip_proto;
708 rule_match->match.acl.proto_mask = ip_proto_mask;
710 } /* RTE_FLOW_ITEM_TYPE_IPV4 */
712 case RTE_FLOW_ITEM_TYPE_IPV6:
714 uint32_t sa_depth, da_depth;
716 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
718 return rte_flow_error_set(error,
720 RTE_FLOW_ERROR_TYPE_ITEM,
722 "ACL: Illegal IPv6 header source address mask");
724 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
726 return rte_flow_error_set(error,
728 RTE_FLOW_ERROR_TYPE_ITEM,
730 "ACL: Illegal IPv6 header destination address mask");
732 ip_proto = spec.ipv6.hdr.proto;
733 ip_proto_mask = mask.ipv6.hdr.proto;
735 rule_match->match.acl.ip_version = 0;
736 memcpy(rule_match->match.acl.ipv6.sa,
737 spec.ipv6.hdr.src_addr,
738 sizeof(spec.ipv6.hdr.src_addr));
739 memcpy(rule_match->match.acl.ipv6.da,
740 spec.ipv6.hdr.dst_addr,
741 sizeof(spec.ipv6.hdr.dst_addr));
742 rule_match->match.acl.sa_depth = sa_depth;
743 rule_match->match.acl.da_depth = da_depth;
744 rule_match->match.acl.proto = ip_proto;
745 rule_match->match.acl.proto_mask = ip_proto_mask;
747 } /* RTE_FLOW_ITEM_TYPE_IPV6 */
750 return rte_flow_error_set(error,
752 RTE_FLOW_ERROR_TYPE_ITEM,
754 "ACL: IP protocol required");
757 if (ip_proto_mask != UINT8_MAX)
758 return rte_flow_error_set(error,
760 RTE_FLOW_ERROR_TYPE_ITEM,
762 "ACL: Illegal IP protocol mask");
766 /* VOID only, if any. */
767 flow_item_skip_void(&item);
769 /* TCP/UDP/SCTP only. */
770 status = flow_item_proto_preprocess(item, &spec, &mask,
771 &size, &disabled, error);
775 switch (item->type) {
776 case RTE_FLOW_ITEM_TYPE_TCP:
778 uint16_t sp0, sp1, dp0, dp1;
780 if (ip_proto != IP_PROTOCOL_TCP)
781 return rte_flow_error_set(error,
783 RTE_FLOW_ERROR_TYPE_ITEM,
785 "ACL: Item type is TCP, but IP protocol is not");
787 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
788 rte_ntohs(mask.tcp.hdr.src_port),
793 return rte_flow_error_set(error,
795 RTE_FLOW_ERROR_TYPE_ITEM,
797 "ACL: Illegal TCP source port mask");
799 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
800 rte_ntohs(mask.tcp.hdr.dst_port),
805 return rte_flow_error_set(error,
807 RTE_FLOW_ERROR_TYPE_ITEM,
809 "ACL: Illegal TCP destination port mask");
811 rule_match->match.acl.sp0 = sp0;
812 rule_match->match.acl.sp1 = sp1;
813 rule_match->match.acl.dp0 = dp0;
814 rule_match->match.acl.dp1 = dp1;
817 } /* RTE_FLOW_ITEM_TYPE_TCP */
819 case RTE_FLOW_ITEM_TYPE_UDP:
821 uint16_t sp0, sp1, dp0, dp1;
823 if (ip_proto != IP_PROTOCOL_UDP)
824 return rte_flow_error_set(error,
826 RTE_FLOW_ERROR_TYPE_ITEM,
828 "ACL: Item type is UDP, but IP protocol is not");
830 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
831 rte_ntohs(mask.udp.hdr.src_port),
835 return rte_flow_error_set(error,
837 RTE_FLOW_ERROR_TYPE_ITEM,
839 "ACL: Illegal UDP source port mask");
841 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
842 rte_ntohs(mask.udp.hdr.dst_port),
846 return rte_flow_error_set(error,
848 RTE_FLOW_ERROR_TYPE_ITEM,
850 "ACL: Illegal UDP destination port mask");
852 rule_match->match.acl.sp0 = sp0;
853 rule_match->match.acl.sp1 = sp1;
854 rule_match->match.acl.dp0 = dp0;
855 rule_match->match.acl.dp1 = dp1;
858 } /* RTE_FLOW_ITEM_TYPE_UDP */
860 case RTE_FLOW_ITEM_TYPE_SCTP:
862 uint16_t sp0, sp1, dp0, dp1;
864 if (ip_proto != IP_PROTOCOL_SCTP)
865 return rte_flow_error_set(error,
867 RTE_FLOW_ERROR_TYPE_ITEM,
869 "ACL: Item type is SCTP, but IP protocol is not");
871 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
872 rte_ntohs(mask.sctp.hdr.src_port),
877 return rte_flow_error_set(error,
879 RTE_FLOW_ERROR_TYPE_ITEM,
881 "ACL: Illegal SCTP source port mask");
883 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
884 rte_ntohs(mask.sctp.hdr.dst_port),
888 return rte_flow_error_set(error,
890 RTE_FLOW_ERROR_TYPE_ITEM,
892 "ACL: Illegal SCTP destination port mask");
894 rule_match->match.acl.sp0 = sp0;
895 rule_match->match.acl.sp1 = sp1;
896 rule_match->match.acl.dp0 = dp0;
897 rule_match->match.acl.dp1 = dp1;
900 } /* RTE_FLOW_ITEM_TYPE_SCTP */
903 return rte_flow_error_set(error,
905 RTE_FLOW_ERROR_TYPE_ITEM,
907 "ACL: TCP/UDP/SCTP required");
912 /* VOID or disabled protos only, if any. */
913 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
918 if (item->type != RTE_FLOW_ITEM_TYPE_END)
919 return rte_flow_error_set(error,
921 RTE_FLOW_ERROR_TYPE_ITEM,
923 "ACL: Expecting END item");
929 * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
931 * They are located within a larger buffer at offsets *toffset* and *foffset*
932 * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
934 * Question: are the two masks equivalent?
937 * 1. Offset basically indicates that the first offset bytes in the buffer
938 * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
939 * array of *offset* bytes to the *mask*.
940 * 2. Each *mask* might contain a number of zero bytes at the beginning or
942 * 3. Bytes in the larger buffer after the end of the *mask* are also considered
943 * "don't care", so they are equivalent to appending an "all-zeros" array of
944 * bytes to the *mask*.
947 * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
948 * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
949 * => buffer mask = [00 00 00 22 00 33 00 00]
950 * fmask = [22 00 33], foffset = 3, fsize = 3 =>
951 * => buffer mask = [00 00 00 22 00 33 00 00]
952 * Therefore, the tmask and fmask from this example are equivalent.
955 hash_key_mask_is_same(uint8_t *tmask,
961 size_t *toffset_plus,
962 size_t *foffset_plus)
964 size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
965 size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
967 /* Compute tpos and fpos. */
968 for (tpos = 0; tmask[tpos] == 0; tpos++)
970 for (fpos = 0; fmask[fpos] == 0; fpos++)
973 if (toffset + tpos != foffset + fpos)
974 return 0; /* FALSE */
982 for (i = 0; i < tsize; i++)
983 if (tmask[tpos + i] != fmask[fpos + i])
984 return 0; /* FALSE */
986 for ( ; i < fsize; i++)
988 return 0; /* FALSE */
992 for (i = 0; i < fsize; i++)
993 if (tmask[tpos + i] != fmask[fpos + i])
994 return 0; /* FALSE */
996 for ( ; i < tsize; i++)
998 return 0; /* FALSE */
1002 *toffset_plus = tpos;
1005 *foffset_plus = fpos;
1007 return 1; /* TRUE */
1011 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
1012 struct pipeline *pipeline __rte_unused,
1013 struct softnic_table *table,
1014 const struct rte_flow_attr *attr __rte_unused,
1015 const struct rte_flow_item *item,
1016 struct softnic_table_rule_match *rule_match,
1017 struct rte_flow_error *error)
1019 struct softnic_table_rule_match_hash key, key_mask;
1020 struct softnic_table_hash_params *params = &table->params.match.hash;
1021 size_t offset = 0, length = 0, tpos, fpos;
1024 memset(&key, 0, sizeof(key));
1025 memset(&key_mask, 0, sizeof(key_mask));
1027 /* VOID or disabled protos only, if any. */
1028 status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
1032 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1033 return rte_flow_error_set(error,
1035 RTE_FLOW_ERROR_TYPE_ITEM,
1037 "HASH: END detected too early");
1039 /* VOID or any protocols (enabled or disabled). */
1040 for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1041 union flow_item spec, mask;
1043 int disabled, status;
1045 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1048 status = flow_item_proto_preprocess(item,
1057 if (length + size > sizeof(key)) {
1061 return rte_flow_error_set(error,
1063 RTE_FLOW_ERROR_TYPE_ITEM,
1065 "HASH: Item too big");
1068 memcpy(&key.key[length], &spec, size);
1069 memcpy(&key_mask.key[length], &mask, size);
1073 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1074 /* VOID or disabled protos only, if any. */
1075 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
1080 if (item->type != RTE_FLOW_ITEM_TYPE_END)
1081 return rte_flow_error_set(error,
1083 RTE_FLOW_ERROR_TYPE_ITEM,
1085 "HASH: Expecting END item");
1088 /* Compare flow key mask against table key mask. */
1089 offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
1091 if (!hash_key_mask_is_same(params->key_mask,
1099 return rte_flow_error_set(error,
1101 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1103 "HASH: Item list is not observing the match format");
1106 memset(rule_match, 0, sizeof(*rule_match));
1107 rule_match->match_type = TABLE_HASH;
1108 memcpy(&rule_match->match.hash.key[tpos],
1110 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
1117 flow_rule_match_get(struct pmd_internals *softnic,
1118 struct pipeline *pipeline,
1119 struct softnic_table *table,
1120 const struct rte_flow_attr *attr,
1121 const struct rte_flow_item *item,
1122 struct softnic_table_rule_match *rule_match,
1123 struct rte_flow_error *error)
1125 switch (table->params.match_type) {
1127 return flow_rule_match_acl_get(softnic,
1138 return flow_rule_match_hash_get(softnic,
1149 return rte_flow_error_set(error,
1151 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1153 "Unsupported pipeline table match type");
1158 flow_rule_action_get(struct pmd_internals *softnic,
1159 struct pipeline *pipeline,
1160 struct softnic_table *table,
1161 const struct rte_flow_attr *attr,
1162 const struct rte_flow_action *action,
1163 struct softnic_table_rule_action *rule_action,
1164 struct rte_flow_error *error)
1166 struct softnic_table_action_profile *profile;
1167 struct softnic_table_action_profile_params *params;
1168 int n_jump_queue_rss_drop = 0;
1171 profile = softnic_table_action_profile_find(softnic,
1172 table->params.action_profile_name);
1173 if (profile == NULL)
1174 return rte_flow_error_set(error,
1176 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1178 "JUMP: Table action profile");
1180 params = &profile->params;
1182 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1183 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1186 switch (action->type) {
1187 case RTE_FLOW_ACTION_TYPE_JUMP:
1189 const struct rte_flow_action_jump *conf = action->conf;
1190 struct flow_attr_map *map;
1193 return rte_flow_error_set(error,
1195 RTE_FLOW_ERROR_TYPE_ACTION,
1197 "JUMP: Null configuration");
1199 if (n_jump_queue_rss_drop)
1200 return rte_flow_error_set(error,
1202 RTE_FLOW_ERROR_TYPE_ACTION,
1204 "Only one termination action is"
1205 " allowed per flow");
1207 if ((params->action_mask &
1208 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1209 return rte_flow_error_set(error,
1211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1213 "JUMP action not enabled for this table");
1215 n_jump_queue_rss_drop = 1;
1217 map = flow_attr_map_get(softnic,
1220 if (map == NULL || map->valid == 0)
1221 return rte_flow_error_set(error,
1223 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1225 "JUMP: Invalid group mapping");
1227 if (strcmp(pipeline->name, map->pipeline_name) != 0)
1228 return rte_flow_error_set(error,
1230 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1232 "JUMP: Jump to table in different pipeline");
1234 /* RTE_TABLE_ACTION_FWD */
1235 rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
1236 rule_action->fwd.id = map->table_id;
1237 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1239 } /* RTE_FLOW_ACTION_TYPE_JUMP */
1241 case RTE_FLOW_ACTION_TYPE_QUEUE:
1243 char name[NAME_SIZE];
1244 struct rte_eth_dev *dev;
1245 const struct rte_flow_action_queue *conf = action->conf;
1250 return rte_flow_error_set(error,
1252 RTE_FLOW_ERROR_TYPE_ACTION,
1254 "QUEUE: Null configuration");
1256 if (n_jump_queue_rss_drop)
1257 return rte_flow_error_set(error,
1259 RTE_FLOW_ERROR_TYPE_ACTION,
1261 "Only one termination action is allowed"
1264 if ((params->action_mask &
1265 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1266 return rte_flow_error_set(error,
1268 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1270 "QUEUE action not enabled for this table");
1272 n_jump_queue_rss_drop = 1;
1274 dev = ETHDEV(softnic);
1276 conf->index >= dev->data->nb_rx_queues)
1277 return rte_flow_error_set(error,
1279 RTE_FLOW_ERROR_TYPE_ACTION,
1281 "QUEUE: Invalid RX queue ID");
1283 sprintf(name, "RXQ%u", (uint32_t)conf->index);
1285 status = softnic_pipeline_port_out_find(softnic,
1290 return rte_flow_error_set(error,
1292 RTE_FLOW_ERROR_TYPE_ACTION,
1294 "QUEUE: RX queue not accessible from this pipeline");
1296 /* RTE_TABLE_ACTION_FWD */
1297 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
1298 rule_action->fwd.id = port_id;
1299 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1301 } /*RTE_FLOW_ACTION_TYPE_QUEUE */
1303 case RTE_FLOW_ACTION_TYPE_RSS:
1305 const struct rte_flow_action_rss *conf = action->conf;
1309 return rte_flow_error_set(error,
1311 RTE_FLOW_ERROR_TYPE_ACTION,
1313 "RSS: Null configuration");
1315 if (!rte_is_power_of_2(conf->queue_num))
1316 return rte_flow_error_set(error,
1318 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1320 "RSS: Number of queues must be a power of 2");
1322 if (conf->queue_num > RTE_DIM(rule_action->lb.out))
1323 return rte_flow_error_set(error,
1325 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1327 "RSS: Number of queues too big");
1329 if (n_jump_queue_rss_drop)
1330 return rte_flow_error_set(error,
1332 RTE_FLOW_ERROR_TYPE_ACTION,
1334 "Only one termination action is allowed per flow");
1336 if (((params->action_mask &
1337 (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
1338 ((params->action_mask &
1339 (1LLU << RTE_TABLE_ACTION_LB)) == 0))
1340 return rte_flow_error_set(error,
1342 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1344 "RSS action not supported by this table");
1346 if (params->lb.out_offset !=
1347 pipeline->params.offset_port_id)
1348 return rte_flow_error_set(error,
1350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1352 "RSS action not supported by this pipeline");
1354 n_jump_queue_rss_drop = 1;
1356 /* RTE_TABLE_ACTION_LB */
1357 for (i = 0; i < conf->queue_num; i++) {
1358 char name[NAME_SIZE];
1359 struct rte_eth_dev *dev;
1363 dev = ETHDEV(softnic);
1366 dev->data->nb_rx_queues)
1367 return rte_flow_error_set(error,
1369 RTE_FLOW_ERROR_TYPE_ACTION,
1371 "RSS: Invalid RX queue ID");
1373 sprintf(name, "RXQ%u",
1374 (uint32_t)conf->queue[i]);
1376 status = softnic_pipeline_port_out_find(softnic,
1381 return rte_flow_error_set(error,
1383 RTE_FLOW_ERROR_TYPE_ACTION,
1385 "RSS: RX queue not accessible from this pipeline");
1387 rule_action->lb.out[i] = port_id;
1390 for ( ; i < RTE_DIM(rule_action->lb.out); i++)
1391 rule_action->lb.out[i] =
1392 rule_action->lb.out[i % conf->queue_num];
1394 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
1396 /* RTE_TABLE_ACTION_FWD */
1397 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
1398 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1400 } /* RTE_FLOW_ACTION_TYPE_RSS */
1402 case RTE_FLOW_ACTION_TYPE_DROP:
1404 const void *conf = action->conf;
1407 return rte_flow_error_set(error,
1409 RTE_FLOW_ERROR_TYPE_ACTION,
1411 "DROP: No configuration required");
1413 if (n_jump_queue_rss_drop)
1414 return rte_flow_error_set(error,
1416 RTE_FLOW_ERROR_TYPE_ACTION,
1418 "Only one termination action is allowed per flow");
1419 if ((params->action_mask &
1420 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1421 return rte_flow_error_set(error,
1423 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1425 "DROP action not supported by this table");
1427 n_jump_queue_rss_drop = 1;
1429 /* RTE_TABLE_ACTION_FWD */
1430 rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
1431 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1433 } /* RTE_FLOW_ACTION_TYPE_DROP */
1435 case RTE_FLOW_ACTION_TYPE_COUNT:
1437 const struct rte_flow_action_count *conf = action->conf;
1440 return rte_flow_error_set(error,
1442 RTE_FLOW_ERROR_TYPE_ACTION,
1444 "COUNT: Null configuration");
1447 return rte_flow_error_set(error,
1449 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1451 "COUNT: Shared counters not supported");
1454 return rte_flow_error_set(error,
1456 RTE_FLOW_ERROR_TYPE_ACTION,
1458 "Only one COUNT action per flow");
1460 if ((params->action_mask &
1461 (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
1462 return rte_flow_error_set(error,
1464 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1466 "COUNT action not supported by this table");
1470 /* RTE_TABLE_ACTION_STATS */
1471 rule_action->stats.n_packets = 0;
1472 rule_action->stats.n_bytes = 0;
1473 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
1475 } /* RTE_FLOW_ACTION_TYPE_COUNT */
1477 case RTE_FLOW_ACTION_TYPE_METER:
1479 const struct rte_flow_action_meter *conf = action->conf;
1480 struct softnic_mtr_meter_profile *mp;
1481 struct softnic_mtr *m;
1482 uint32_t table_id = table - pipeline->table;
1483 uint32_t meter_profile_id;
1486 if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0)
1487 return rte_flow_error_set(error,
1489 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1491 "METER: Table action not supported");
1493 if (params->mtr.n_tc != 1)
1494 return rte_flow_error_set(error,
1496 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1498 "METER: Multiple TCs not supported");
1501 return rte_flow_error_set(error,
1503 RTE_FLOW_ERROR_TYPE_ACTION,
1505 "METER: Null configuration");
1507 m = softnic_mtr_find(softnic, conf->mtr_id);
1510 return rte_flow_error_set(error,
1512 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1514 "METER: Invalid meter ID");
1517 return rte_flow_error_set(error,
1519 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1521 "METER: Meter already attached to a flow");
1523 meter_profile_id = m->params.meter_profile_id;
1524 mp = softnic_mtr_meter_profile_find(softnic, meter_profile_id);
1526 /* Add meter profile to pipeline table */
1527 if (!softnic_pipeline_table_meter_profile_find(table,
1528 meter_profile_id)) {
1529 struct rte_table_action_meter_profile profile;
1531 memset(&profile, 0, sizeof(profile));
1532 profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
1533 profile.trtcm.cir = mp->params.trtcm_rfc2698.cir;
1534 profile.trtcm.pir = mp->params.trtcm_rfc2698.pir;
1535 profile.trtcm.cbs = mp->params.trtcm_rfc2698.cbs;
1536 profile.trtcm.pbs = mp->params.trtcm_rfc2698.pbs;
1538 status = softnic_pipeline_table_mtr_profile_add(softnic,
1544 rte_flow_error_set(error,
1546 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1548 "METER: Table meter profile add failed");
1553 /* RTE_TABLE_ACTION_METER */
1554 rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id;
1555 rule_action->mtr.mtr[0].policer[e_RTE_METER_GREEN] =
1556 (enum rte_table_action_policer)m->params.action[RTE_MTR_GREEN];
1557 rule_action->mtr.mtr[0].policer[e_RTE_METER_YELLOW] =
1558 (enum rte_table_action_policer)m->params.action[RTE_MTR_YELLOW];
1559 rule_action->mtr.mtr[0].policer[e_RTE_METER_RED] =
1560 (enum rte_table_action_policer)m->params.action[RTE_MTR_RED];
1561 rule_action->mtr.tc_mask = 1;
1562 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
1564 } /* RTE_FLOW_ACTION_TYPE_METER */
1571 if (n_jump_queue_rss_drop == 0)
1572 return rte_flow_error_set(error,
1574 RTE_FLOW_ERROR_TYPE_ACTION,
1576 "Flow does not have any terminating action");
1582 pmd_flow_validate(struct rte_eth_dev *dev,
1583 const struct rte_flow_attr *attr,
1584 const struct rte_flow_item item[],
1585 const struct rte_flow_action action[],
1586 struct rte_flow_error *error)
1588 struct softnic_table_rule_match rule_match;
1589 struct softnic_table_rule_action rule_action;
1591 struct pmd_internals *softnic = dev->data->dev_private;
1592 struct pipeline *pipeline;
1593 struct softnic_table *table;
1594 const char *pipeline_name = NULL;
1595 uint32_t table_id = 0;
1598 /* Check input parameters. */
1600 return rte_flow_error_set(error,
1602 RTE_FLOW_ERROR_TYPE_ATTR,
1606 return rte_flow_error_set(error,
1608 RTE_FLOW_ERROR_TYPE_ITEM,
1613 return rte_flow_error_set(error,
1615 RTE_FLOW_ERROR_TYPE_ACTION,
1619 /* Identify the pipeline table to add this flow to. */
1620 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1625 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1626 if (pipeline == NULL)
1627 return rte_flow_error_set(error,
1629 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1631 "Invalid pipeline name");
1633 if (table_id >= pipeline->n_tables)
1634 return rte_flow_error_set(error,
1636 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1638 "Invalid pipeline table ID");
1640 table = &pipeline->table[table_id];
1643 memset(&rule_match, 0, sizeof(rule_match));
1644 status = flow_rule_match_get(softnic,
1655 memset(&rule_action, 0, sizeof(rule_action));
1656 status = flow_rule_action_get(softnic,
1669 static struct softnic_mtr *
1670 flow_action_meter_get(struct pmd_internals *softnic,
1671 const struct rte_flow_action *action)
1673 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++)
1674 if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1675 const struct rte_flow_action_meter *conf = action->conf;
1680 return softnic_mtr_find(softnic, conf->mtr_id);
1687 flow_meter_owner_reset(struct pmd_internals *softnic,
1688 struct rte_flow *flow)
1690 struct softnic_mtr_list *ml = &softnic->mtr.mtrs;
1691 struct softnic_mtr *m;
1693 TAILQ_FOREACH(m, ml, node)
1694 if (m->flow == flow) {
1701 flow_meter_owner_set(struct pmd_internals *softnic,
1702 struct rte_flow *flow,
1703 struct softnic_mtr *mtr)
1705 /* Reset current flow meter */
1706 flow_meter_owner_reset(softnic, flow);
1708 /* Set new flow meter */
1713 is_meter_action_enable(struct pmd_internals *softnic,
1714 struct softnic_table *table)
1716 struct softnic_table_action_profile *profile =
1717 softnic_table_action_profile_find(softnic,
1718 table->params.action_profile_name);
1719 struct softnic_table_action_profile_params *params = &profile->params;
1721 return (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) ? 1 : 0;
1724 static struct rte_flow *
1725 pmd_flow_create(struct rte_eth_dev *dev,
1726 const struct rte_flow_attr *attr,
1727 const struct rte_flow_item item[],
1728 const struct rte_flow_action action[],
1729 struct rte_flow_error *error)
1731 struct softnic_table_rule_match rule_match;
1732 struct softnic_table_rule_action rule_action;
1735 struct pmd_internals *softnic = dev->data->dev_private;
1736 struct pipeline *pipeline;
1737 struct softnic_table *table;
1738 struct rte_flow *flow;
1739 struct softnic_mtr *mtr;
1740 const char *pipeline_name = NULL;
1741 uint32_t table_id = 0;
1742 int new_flow, status;
1744 /* Check input parameters. */
1746 rte_flow_error_set(error,
1748 RTE_FLOW_ERROR_TYPE_ATTR,
1755 rte_flow_error_set(error,
1757 RTE_FLOW_ERROR_TYPE_ITEM,
1763 if (action == NULL) {
1764 rte_flow_error_set(error,
1766 RTE_FLOW_ERROR_TYPE_ACTION,
1772 /* Identify the pipeline table to add this flow to. */
1773 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1778 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1779 if (pipeline == NULL) {
1780 rte_flow_error_set(error,
1782 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1784 "Invalid pipeline name");
1788 if (table_id >= pipeline->n_tables) {
1789 rte_flow_error_set(error,
1791 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1793 "Invalid pipeline table ID");
1797 table = &pipeline->table[table_id];
1800 memset(&rule_match, 0, sizeof(rule_match));
1801 status = flow_rule_match_get(softnic,
1812 memset(&rule_action, 0, sizeof(rule_action));
1813 status = flow_rule_action_get(softnic,
1823 /* Flow find/allocate. */
1825 flow = softnic_flow_find(table, &rule_match);
1828 flow = calloc(1, sizeof(struct rte_flow));
1830 rte_flow_error_set(error,
1832 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1834 "Not enough memory for new flow");
1840 status = softnic_pipeline_table_rule_add(softnic,
1850 rte_flow_error_set(error,
1852 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1854 "Pipeline table rule add failed");
1859 memcpy(&flow->match, &rule_match, sizeof(rule_match));
1860 memcpy(&flow->action, &rule_action, sizeof(rule_action));
1861 flow->data = rule_data;
1862 flow->pipeline = pipeline;
1863 flow->table_id = table_id;
1865 mtr = flow_action_meter_get(softnic, action);
1867 flow_meter_owner_set(softnic, flow, mtr);
1869 /* Flow add to list. */
1871 TAILQ_INSERT_TAIL(&table->flows, flow, node);
1877 pmd_flow_destroy(struct rte_eth_dev *dev,
1878 struct rte_flow *flow,
1879 struct rte_flow_error *error)
1881 struct pmd_internals *softnic = dev->data->dev_private;
1882 struct softnic_table *table;
1885 /* Check input parameters. */
1887 return rte_flow_error_set(error,
1889 RTE_FLOW_ERROR_TYPE_HANDLE,
1893 table = &flow->pipeline->table[flow->table_id];
1896 status = softnic_pipeline_table_rule_delete(softnic,
1897 flow->pipeline->name,
1901 return rte_flow_error_set(error,
1903 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905 "Pipeline table rule delete failed");
1907 /* Update dependencies */
1908 if (is_meter_action_enable(softnic, table))
1909 flow_meter_owner_reset(softnic, flow);
1912 TAILQ_REMOVE(&table->flows, flow, node);
1919 pmd_flow_query(struct rte_eth_dev *dev __rte_unused,
1920 struct rte_flow *flow,
1921 const struct rte_flow_action *action __rte_unused,
1923 struct rte_flow_error *error)
1925 struct rte_table_action_stats_counters stats;
1926 struct softnic_table *table;
1927 struct rte_flow_query_count *flow_stats = data;
1930 /* Check input parameters. */
1932 return rte_flow_error_set(error,
1934 RTE_FLOW_ERROR_TYPE_HANDLE,
1939 return rte_flow_error_set(error,
1941 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1945 table = &flow->pipeline->table[flow->table_id];
1947 /* Rule stats read. */
1948 status = rte_table_action_stats_read(table->a,
1953 return rte_flow_error_set(error,
1955 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1957 "Pipeline table rule stats read failed");
1959 /* Fill in flow stats. */
1960 flow_stats->hits_set =
1961 (table->ap->params.stats.n_packets_enabled) ? 1 : 0;
1962 flow_stats->bytes_set =
1963 (table->ap->params.stats.n_bytes_enabled) ? 1 : 0;
1964 flow_stats->hits = stats.n_packets;
1965 flow_stats->bytes = stats.n_bytes;
1970 const struct rte_flow_ops pmd_flow_ops = {
1971 .validate = pmd_flow_validate,
1972 .create = pmd_flow_create,
1973 .destroy = pmd_flow_destroy,
1975 .query = pmd_flow_query,