1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
11 #include <rte_string_fns.h>
13 #include <rte_flow_driver.h>
15 #include "rte_eth_softnic_internals.h"
16 #include "rte_eth_softnic.h"
18 #define rte_htons rte_cpu_to_be_16
19 #define rte_htonl rte_cpu_to_be_32
21 #define rte_ntohs rte_be_to_cpu_16
22 #define rte_ntohl rte_be_to_cpu_32
24 static struct rte_flow *
25 softnic_flow_find(struct softnic_table *table,
26 struct softnic_table_rule_match *rule_match)
28 struct rte_flow *flow;
30 TAILQ_FOREACH(flow, &table->flows, node)
31 if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
38 flow_attr_map_set(struct pmd_internals *softnic,
41 const char *pipeline_name,
44 struct pipeline *pipeline;
45 struct flow_attr_map *map;
47 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
48 pipeline_name == NULL)
51 pipeline = softnic_pipeline_find(softnic, pipeline_name);
52 if (pipeline == NULL ||
53 table_id >= pipeline->n_tables)
56 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
57 &softnic->flow.egress_map[group_id];
58 strcpy(map->pipeline_name, pipeline_name);
59 map->table_id = table_id;
65 struct flow_attr_map *
66 flow_attr_map_get(struct pmd_internals *softnic,
70 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
73 return (ingress) ? &softnic->flow.ingress_map[group_id] :
74 &softnic->flow.egress_map[group_id];
78 flow_pipeline_table_get(struct pmd_internals *softnic,
79 const struct rte_flow_attr *attr,
80 const char **pipeline_name,
82 struct rte_flow_error *error)
84 struct flow_attr_map *map;
87 return rte_flow_error_set(error,
89 RTE_FLOW_ERROR_TYPE_ATTR,
93 if (!attr->ingress && !attr->egress)
94 return rte_flow_error_set(error,
96 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
98 "Ingress/egress not specified");
100 if (attr->ingress && attr->egress)
101 return rte_flow_error_set(error,
103 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
105 "Setting both ingress and egress is not allowed");
107 map = flow_attr_map_get(softnic,
112 return rte_flow_error_set(error,
114 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
119 *pipeline_name = map->pipeline_name;
122 *table_id = map->table_id;
128 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
129 struct rte_flow_item_eth eth;
130 struct rte_flow_item_vlan vlan;
131 struct rte_flow_item_ipv4 ipv4;
132 struct rte_flow_item_ipv6 ipv6;
133 struct rte_flow_item_icmp icmp;
134 struct rte_flow_item_udp udp;
135 struct rte_flow_item_tcp tcp;
136 struct rte_flow_item_sctp sctp;
137 struct rte_flow_item_vxlan vxlan;
138 struct rte_flow_item_e_tag e_tag;
139 struct rte_flow_item_nvgre nvgre;
140 struct rte_flow_item_mpls mpls;
141 struct rte_flow_item_gre gre;
142 struct rte_flow_item_gtp gtp;
143 struct rte_flow_item_esp esp;
144 struct rte_flow_item_geneve geneve;
145 struct rte_flow_item_vxlan_gpe vxlan_gpe;
146 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
147 struct rte_flow_item_ipv6_ext ipv6_ext;
148 struct rte_flow_item_icmp6 icmp6;
149 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
150 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
151 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
152 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
153 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
156 static const union flow_item flow_item_raw_mask;
159 flow_item_is_proto(enum rte_flow_item_type type,
164 case RTE_FLOW_ITEM_TYPE_RAW:
165 *mask = &flow_item_raw_mask;
166 *size = sizeof(flow_item_raw_mask);
169 case RTE_FLOW_ITEM_TYPE_ETH:
170 *mask = &rte_flow_item_eth_mask;
171 *size = sizeof(struct rte_flow_item_eth);
174 case RTE_FLOW_ITEM_TYPE_VLAN:
175 *mask = &rte_flow_item_vlan_mask;
176 *size = sizeof(struct rte_flow_item_vlan);
179 case RTE_FLOW_ITEM_TYPE_IPV4:
180 *mask = &rte_flow_item_ipv4_mask;
181 *size = sizeof(struct rte_flow_item_ipv4);
184 case RTE_FLOW_ITEM_TYPE_IPV6:
185 *mask = &rte_flow_item_ipv6_mask;
186 *size = sizeof(struct rte_flow_item_ipv6);
189 case RTE_FLOW_ITEM_TYPE_ICMP:
190 *mask = &rte_flow_item_icmp_mask;
191 *size = sizeof(struct rte_flow_item_icmp);
194 case RTE_FLOW_ITEM_TYPE_UDP:
195 *mask = &rte_flow_item_udp_mask;
196 *size = sizeof(struct rte_flow_item_udp);
199 case RTE_FLOW_ITEM_TYPE_TCP:
200 *mask = &rte_flow_item_tcp_mask;
201 *size = sizeof(struct rte_flow_item_tcp);
204 case RTE_FLOW_ITEM_TYPE_SCTP:
205 *mask = &rte_flow_item_sctp_mask;
206 *size = sizeof(struct rte_flow_item_sctp);
209 case RTE_FLOW_ITEM_TYPE_VXLAN:
210 *mask = &rte_flow_item_vxlan_mask;
211 *size = sizeof(struct rte_flow_item_vxlan);
214 case RTE_FLOW_ITEM_TYPE_E_TAG:
215 *mask = &rte_flow_item_e_tag_mask;
216 *size = sizeof(struct rte_flow_item_e_tag);
219 case RTE_FLOW_ITEM_TYPE_NVGRE:
220 *mask = &rte_flow_item_nvgre_mask;
221 *size = sizeof(struct rte_flow_item_nvgre);
224 case RTE_FLOW_ITEM_TYPE_MPLS:
225 *mask = &rte_flow_item_mpls_mask;
226 *size = sizeof(struct rte_flow_item_mpls);
229 case RTE_FLOW_ITEM_TYPE_GRE:
230 *mask = &rte_flow_item_gre_mask;
231 *size = sizeof(struct rte_flow_item_gre);
234 case RTE_FLOW_ITEM_TYPE_GTP:
235 case RTE_FLOW_ITEM_TYPE_GTPC:
236 case RTE_FLOW_ITEM_TYPE_GTPU:
237 *mask = &rte_flow_item_gtp_mask;
238 *size = sizeof(struct rte_flow_item_gtp);
241 case RTE_FLOW_ITEM_TYPE_ESP:
242 *mask = &rte_flow_item_esp_mask;
243 *size = sizeof(struct rte_flow_item_esp);
246 case RTE_FLOW_ITEM_TYPE_GENEVE:
247 *mask = &rte_flow_item_geneve_mask;
248 *size = sizeof(struct rte_flow_item_geneve);
251 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
252 *mask = &rte_flow_item_vxlan_gpe_mask;
253 *size = sizeof(struct rte_flow_item_vxlan_gpe);
256 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
257 *mask = &rte_flow_item_arp_eth_ipv4_mask;
258 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
261 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
262 *mask = &rte_flow_item_ipv6_ext_mask;
263 *size = sizeof(struct rte_flow_item_ipv6_ext);
266 case RTE_FLOW_ITEM_TYPE_ICMP6:
267 *mask = &rte_flow_item_icmp6_mask;
268 *size = sizeof(struct rte_flow_item_icmp6);
271 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
272 *mask = &rte_flow_item_icmp6_nd_ns_mask;
273 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
276 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
277 *mask = &rte_flow_item_icmp6_nd_na_mask;
278 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
281 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
282 *mask = &rte_flow_item_icmp6_nd_opt_mask;
283 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
286 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
287 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
288 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
291 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
292 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
293 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
296 default: return 0; /* FALSE */
301 flow_item_proto_preprocess(const struct rte_flow_item *item,
302 union flow_item *item_spec,
303 union flow_item *item_mask,
306 struct rte_flow_error *error)
308 const void *mask_default;
309 uint8_t *spec = (uint8_t *)item_spec;
310 uint8_t *mask = (uint8_t *)item_mask;
313 if (!flow_item_is_proto(item->type, &mask_default, &size))
314 return rte_flow_error_set(error,
316 RTE_FLOW_ERROR_TYPE_ITEM,
318 "Item type not supported");
322 /* If spec is NULL, then last and mask also have to be NULL. */
323 if (item->last || item->mask)
324 return rte_flow_error_set(error,
326 RTE_FLOW_ERROR_TYPE_ITEM,
328 "Invalid item (NULL spec with non-NULL last or mask)");
330 memset(item_spec, 0, size);
331 memset(item_mask, 0, size);
333 *item_disabled = 1; /* TRUE */
337 memcpy(spec, item->spec, size);
342 memcpy(mask, item->mask, size);
344 memcpy(mask, mask_default, size);
347 for (i = 0; i < size; i++)
350 *item_disabled = (i == size) ? 1 : 0;
352 /* Apply mask over spec. */
353 for (i = 0; i < size; i++)
361 memcpy(last, item->last, size);
362 for (i = 0; i < size; i++)
365 /* check for range */
366 for (i = 0; i < size; i++)
367 if (last[i] != spec[i])
368 return rte_flow_error_set(error,
370 RTE_FLOW_ERROR_TYPE_ITEM,
372 "Range not supported");
379 * Skip disabled protocol items and VOID items
380 * until any of the mutually exclusive conditions
381 * from the list below takes place:
382 * (A) A protocol present in the proto_mask
383 * is met (either ENABLED or DISABLED);
384 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
385 * (C) The END item is met.
388 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
391 struct rte_flow_error *error)
395 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
396 union flow_item spec, mask;
398 int disabled = 0, status;
400 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
403 status = flow_item_proto_preprocess(*item,
412 if ((proto_mask & (1LLU << (*item)->type)) ||
425 #define FLOW_ITEM_PROTO_IP \
426 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
427 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
430 flow_item_skip_void(const struct rte_flow_item **item)
433 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
437 #define IP_PROTOCOL_TCP 0x06
438 #define IP_PROTOCOL_UDP 0x11
439 #define IP_PROTOCOL_SCTP 0x84
442 mask_to_depth(uint64_t mask,
447 if (mask == UINT64_MAX) {
456 if (mask & (mask + 1))
459 n = __builtin_popcountll(mask);
461 *depth = (uint32_t)(64 - n);
467 ipv4_mask_to_depth(uint32_t mask,
473 status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
485 ipv6_mask_to_depth(uint8_t *mask,
488 uint64_t *m = (uint64_t *)mask;
489 uint64_t m0 = rte_be_to_cpu_64(m[0]);
490 uint64_t m1 = rte_be_to_cpu_64(m[1]);
494 status = mask_to_depth(m0, &d0);
498 status = mask_to_depth(m1, &d1);
512 port_mask_to_range(uint16_t port,
520 status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
524 p0 = port & port_mask;
525 p1 = p0 | ~port_mask;
537 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
538 struct pipeline *pipeline __rte_unused,
539 struct softnic_table *table __rte_unused,
540 const struct rte_flow_attr *attr,
541 const struct rte_flow_item *item,
542 struct softnic_table_rule_match *rule_match,
543 struct rte_flow_error *error)
545 union flow_item spec, mask;
546 size_t size, length = 0;
547 int disabled = 0, status;
548 uint8_t ip_proto, ip_proto_mask;
550 memset(rule_match, 0, sizeof(*rule_match));
551 rule_match->match_type = TABLE_ACL;
552 rule_match->match.acl.priority = attr->priority;
554 /* VOID or disabled protos only, if any. */
555 status = flow_item_skip_disabled_protos(&item,
556 FLOW_ITEM_PROTO_IP, &length, error);
561 status = flow_item_proto_preprocess(item, &spec, &mask,
562 &size, &disabled, error);
566 switch (item->type) {
567 case RTE_FLOW_ITEM_TYPE_IPV4:
569 uint32_t sa_depth, da_depth;
571 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
574 return rte_flow_error_set(error,
576 RTE_FLOW_ERROR_TYPE_ITEM,
578 "ACL: Illegal IPv4 header source address mask");
580 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
583 return rte_flow_error_set(error,
585 RTE_FLOW_ERROR_TYPE_ITEM,
587 "ACL: Illegal IPv4 header destination address mask");
589 ip_proto = spec.ipv4.hdr.next_proto_id;
590 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
592 rule_match->match.acl.ip_version = 1;
593 rule_match->match.acl.ipv4.sa =
594 rte_ntohl(spec.ipv4.hdr.src_addr);
595 rule_match->match.acl.ipv4.da =
596 rte_ntohl(spec.ipv4.hdr.dst_addr);
597 rule_match->match.acl.sa_depth = sa_depth;
598 rule_match->match.acl.da_depth = da_depth;
599 rule_match->match.acl.proto = ip_proto;
600 rule_match->match.acl.proto_mask = ip_proto_mask;
602 } /* RTE_FLOW_ITEM_TYPE_IPV4 */
604 case RTE_FLOW_ITEM_TYPE_IPV6:
606 uint32_t sa_depth, da_depth;
608 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
610 return rte_flow_error_set(error,
612 RTE_FLOW_ERROR_TYPE_ITEM,
614 "ACL: Illegal IPv6 header source address mask");
616 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
618 return rte_flow_error_set(error,
620 RTE_FLOW_ERROR_TYPE_ITEM,
622 "ACL: Illegal IPv6 header destination address mask");
624 ip_proto = spec.ipv6.hdr.proto;
625 ip_proto_mask = mask.ipv6.hdr.proto;
627 rule_match->match.acl.ip_version = 0;
628 memcpy(rule_match->match.acl.ipv6.sa,
629 spec.ipv6.hdr.src_addr,
630 sizeof(spec.ipv6.hdr.src_addr));
631 memcpy(rule_match->match.acl.ipv6.da,
632 spec.ipv6.hdr.dst_addr,
633 sizeof(spec.ipv6.hdr.dst_addr));
634 rule_match->match.acl.sa_depth = sa_depth;
635 rule_match->match.acl.da_depth = da_depth;
636 rule_match->match.acl.proto = ip_proto;
637 rule_match->match.acl.proto_mask = ip_proto_mask;
639 } /* RTE_FLOW_ITEM_TYPE_IPV6 */
642 return rte_flow_error_set(error,
644 RTE_FLOW_ERROR_TYPE_ITEM,
646 "ACL: IP protocol required");
649 if (ip_proto_mask != UINT8_MAX)
650 return rte_flow_error_set(error,
652 RTE_FLOW_ERROR_TYPE_ITEM,
654 "ACL: Illegal IP protocol mask");
658 /* VOID only, if any. */
659 flow_item_skip_void(&item);
661 /* TCP/UDP/SCTP only. */
662 status = flow_item_proto_preprocess(item, &spec, &mask,
663 &size, &disabled, error);
667 switch (item->type) {
668 case RTE_FLOW_ITEM_TYPE_TCP:
670 uint16_t sp0, sp1, dp0, dp1;
672 if (ip_proto != IP_PROTOCOL_TCP)
673 return rte_flow_error_set(error,
675 RTE_FLOW_ERROR_TYPE_ITEM,
677 "ACL: Item type is TCP, but IP protocol is not");
679 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
680 rte_ntohs(mask.tcp.hdr.src_port),
685 return rte_flow_error_set(error,
687 RTE_FLOW_ERROR_TYPE_ITEM,
689 "ACL: Illegal TCP source port mask");
691 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
692 rte_ntohs(mask.tcp.hdr.dst_port),
697 return rte_flow_error_set(error,
699 RTE_FLOW_ERROR_TYPE_ITEM,
701 "ACL: Illegal TCP destination port mask");
703 rule_match->match.acl.sp0 = sp0;
704 rule_match->match.acl.sp1 = sp1;
705 rule_match->match.acl.dp0 = dp0;
706 rule_match->match.acl.dp1 = dp1;
709 } /* RTE_FLOW_ITEM_TYPE_TCP */
711 case RTE_FLOW_ITEM_TYPE_UDP:
713 uint16_t sp0, sp1, dp0, dp1;
715 if (ip_proto != IP_PROTOCOL_UDP)
716 return rte_flow_error_set(error,
718 RTE_FLOW_ERROR_TYPE_ITEM,
720 "ACL: Item type is UDP, but IP protocol is not");
722 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
723 rte_ntohs(mask.udp.hdr.src_port),
727 return rte_flow_error_set(error,
729 RTE_FLOW_ERROR_TYPE_ITEM,
731 "ACL: Illegal UDP source port mask");
733 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
734 rte_ntohs(mask.udp.hdr.dst_port),
738 return rte_flow_error_set(error,
740 RTE_FLOW_ERROR_TYPE_ITEM,
742 "ACL: Illegal UDP destination port mask");
744 rule_match->match.acl.sp0 = sp0;
745 rule_match->match.acl.sp1 = sp1;
746 rule_match->match.acl.dp0 = dp0;
747 rule_match->match.acl.dp1 = dp1;
750 } /* RTE_FLOW_ITEM_TYPE_UDP */
752 case RTE_FLOW_ITEM_TYPE_SCTP:
754 uint16_t sp0, sp1, dp0, dp1;
756 if (ip_proto != IP_PROTOCOL_SCTP)
757 return rte_flow_error_set(error,
759 RTE_FLOW_ERROR_TYPE_ITEM,
761 "ACL: Item type is SCTP, but IP protocol is not");
763 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
764 rte_ntohs(mask.sctp.hdr.src_port),
769 return rte_flow_error_set(error,
771 RTE_FLOW_ERROR_TYPE_ITEM,
773 "ACL: Illegal SCTP source port mask");
775 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
776 rte_ntohs(mask.sctp.hdr.dst_port),
780 return rte_flow_error_set(error,
782 RTE_FLOW_ERROR_TYPE_ITEM,
784 "ACL: Illegal SCTP destination port mask");
786 rule_match->match.acl.sp0 = sp0;
787 rule_match->match.acl.sp1 = sp1;
788 rule_match->match.acl.dp0 = dp0;
789 rule_match->match.acl.dp1 = dp1;
792 } /* RTE_FLOW_ITEM_TYPE_SCTP */
795 return rte_flow_error_set(error,
797 RTE_FLOW_ERROR_TYPE_ITEM,
799 "ACL: TCP/UDP/SCTP required");
804 /* VOID or disabled protos only, if any. */
805 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
810 if (item->type != RTE_FLOW_ITEM_TYPE_END)
811 return rte_flow_error_set(error,
813 RTE_FLOW_ERROR_TYPE_ITEM,
815 "ACL: Expecting END item");
821 * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
823 * They are located within a larger buffer at offsets *toffset* and *foffset*
824 * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
826 * Question: are the two masks equivalent?
829 * 1. Offset basically indicates that the first offset bytes in the buffer
830 * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
831 * array of *offset* bytes to the *mask*.
832 * 2. Each *mask* might contain a number of zero bytes at the beginning or
834 * 3. Bytes in the larger buffer after the end of the *mask* are also considered
835 * "don't care", so they are equivalent to appending an "all-zeros" array of
836 * bytes to the *mask*.
839 * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
840 * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
841 * => buffer mask = [00 00 00 22 00 33 00 00]
842 * fmask = [22 00 33], foffset = 3, fsize = 3 =>
843 * => buffer mask = [00 00 00 22 00 33 00 00]
844 * Therefore, the tmask and fmask from this example are equivalent.
847 hash_key_mask_is_same(uint8_t *tmask,
853 size_t *toffset_plus,
854 size_t *foffset_plus)
856 size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
857 size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
859 /* Compute tpos and fpos. */
860 for (tpos = 0; tmask[tpos] == 0; tpos++)
862 for (fpos = 0; fmask[fpos] == 0; fpos++)
865 if (toffset + tpos != foffset + fpos)
866 return 0; /* FALSE */
874 for (i = 0; i < tsize; i++)
875 if (tmask[tpos + i] != fmask[fpos + i])
876 return 0; /* FALSE */
878 for ( ; i < fsize; i++)
880 return 0; /* FALSE */
884 for (i = 0; i < fsize; i++)
885 if (tmask[tpos + i] != fmask[fpos + i])
886 return 0; /* FALSE */
888 for ( ; i < tsize; i++)
890 return 0; /* FALSE */
894 *toffset_plus = tpos;
897 *foffset_plus = fpos;
903 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
904 struct pipeline *pipeline __rte_unused,
905 struct softnic_table *table,
906 const struct rte_flow_attr *attr __rte_unused,
907 const struct rte_flow_item *item,
908 struct softnic_table_rule_match *rule_match,
909 struct rte_flow_error *error)
911 struct softnic_table_rule_match_hash key, key_mask;
912 struct softnic_table_hash_params *params = &table->params.match.hash;
913 size_t offset = 0, length = 0, tpos, fpos;
916 memset(&key, 0, sizeof(key));
917 memset(&key_mask, 0, sizeof(key_mask));
919 /* VOID or disabled protos only, if any. */
920 status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
924 if (item->type == RTE_FLOW_ITEM_TYPE_END)
925 return rte_flow_error_set(error,
927 RTE_FLOW_ERROR_TYPE_ITEM,
929 "HASH: END detected too early");
931 /* VOID or any protocols (enabled or disabled). */
932 for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
933 union flow_item spec, mask;
935 int disabled, status;
937 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
940 status = flow_item_proto_preprocess(item,
949 if (length + size > sizeof(key)) {
953 return rte_flow_error_set(error,
955 RTE_FLOW_ERROR_TYPE_ITEM,
957 "HASH: Item too big");
960 memcpy(&key.key[length], &spec, size);
961 memcpy(&key_mask.key[length], &mask, size);
965 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
966 /* VOID or disabled protos only, if any. */
967 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
972 if (item->type != RTE_FLOW_ITEM_TYPE_END)
973 return rte_flow_error_set(error,
975 RTE_FLOW_ERROR_TYPE_ITEM,
977 "HASH: Expecting END item");
980 /* Compare flow key mask against table key mask. */
981 offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
983 if (!hash_key_mask_is_same(params->key_mask,
991 return rte_flow_error_set(error,
993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
995 "HASH: Item list is not observing the match format");
998 memset(rule_match, 0, sizeof(*rule_match));
999 rule_match->match_type = TABLE_HASH;
1000 memcpy(&rule_match->match.hash.key[tpos],
1002 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
1009 flow_rule_match_get(struct pmd_internals *softnic,
1010 struct pipeline *pipeline,
1011 struct softnic_table *table,
1012 const struct rte_flow_attr *attr,
1013 const struct rte_flow_item *item,
1014 struct softnic_table_rule_match *rule_match,
1015 struct rte_flow_error *error)
1017 switch (table->params.match_type) {
1019 return flow_rule_match_acl_get(softnic,
1030 return flow_rule_match_hash_get(softnic,
1041 return rte_flow_error_set(error,
1043 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1045 "Unsupported pipeline table match type");
1050 flow_rule_action_get(struct pmd_internals *softnic,
1051 struct pipeline *pipeline,
1052 struct softnic_table *table,
1053 const struct rte_flow_attr *attr,
1054 const struct rte_flow_action *action,
1055 struct softnic_table_rule_action *rule_action,
1056 struct rte_flow_error *error __rte_unused)
1058 struct softnic_table_action_profile *profile;
1059 struct softnic_table_action_profile_params *params;
1060 int n_jump_queue_rss_drop = 0;
1063 profile = softnic_table_action_profile_find(softnic,
1064 table->params.action_profile_name);
1065 if (profile == NULL)
1066 return rte_flow_error_set(error,
1068 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1070 "JUMP: Table action profile");
1072 params = &profile->params;
1074 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1075 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1078 switch (action->type) {
1079 case RTE_FLOW_ACTION_TYPE_JUMP:
1081 const struct rte_flow_action_jump *conf = action->conf;
1082 struct flow_attr_map *map;
1085 return rte_flow_error_set(error,
1087 RTE_FLOW_ERROR_TYPE_ACTION,
1089 "JUMP: Null configuration");
1091 if (n_jump_queue_rss_drop)
1092 return rte_flow_error_set(error,
1094 RTE_FLOW_ERROR_TYPE_ACTION,
1096 "Only one termination action is"
1097 " allowed per flow");
1099 if ((params->action_mask &
1100 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1101 return rte_flow_error_set(error,
1103 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1105 "JUMP action not enabled for this table");
1107 n_jump_queue_rss_drop = 1;
1109 map = flow_attr_map_get(softnic,
1112 if (map == NULL || map->valid == 0)
1113 return rte_flow_error_set(error,
1115 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117 "JUMP: Invalid group mapping");
1119 if (strcmp(pipeline->name, map->pipeline_name) != 0)
1120 return rte_flow_error_set(error,
1122 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1124 "JUMP: Jump to table in different pipeline");
1126 /* RTE_TABLE_ACTION_FWD */
1127 rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
1128 rule_action->fwd.id = map->table_id;
1129 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1131 } /* RTE_FLOW_ACTION_TYPE_JUMP */
1133 case RTE_FLOW_ACTION_TYPE_QUEUE:
1135 char name[NAME_SIZE];
1136 struct rte_eth_dev *dev;
1137 const struct rte_flow_action_queue *conf = action->conf;
1142 return rte_flow_error_set(error,
1144 RTE_FLOW_ERROR_TYPE_ACTION,
1146 "QUEUE: Null configuration");
1148 if (n_jump_queue_rss_drop)
1149 return rte_flow_error_set(error,
1151 RTE_FLOW_ERROR_TYPE_ACTION,
1153 "Only one termination action is allowed"
1156 if ((params->action_mask &
1157 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1158 return rte_flow_error_set(error,
1160 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1162 "QUEUE action not enabled for this table");
1164 n_jump_queue_rss_drop = 1;
1166 dev = ETHDEV(softnic);
1168 conf->index >= dev->data->nb_rx_queues)
1169 return rte_flow_error_set(error,
1171 RTE_FLOW_ERROR_TYPE_ACTION,
1173 "QUEUE: Invalid RX queue ID");
1175 sprintf(name, "RXQ%u", (uint32_t)conf->index);
1177 status = softnic_pipeline_port_out_find(softnic,
1182 return rte_flow_error_set(error,
1184 RTE_FLOW_ERROR_TYPE_ACTION,
1186 "QUEUE: RX queue not accessible from this pipeline");
1188 /* RTE_TABLE_ACTION_FWD */
1189 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
1190 rule_action->fwd.id = port_id;
1191 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1193 } /*RTE_FLOW_ACTION_TYPE_QUEUE */
1195 case RTE_FLOW_ACTION_TYPE_RSS:
1197 const struct rte_flow_action_rss *conf = action->conf;
1201 return rte_flow_error_set(error,
1203 RTE_FLOW_ERROR_TYPE_ACTION,
1205 "RSS: Null configuration");
1207 if (!rte_is_power_of_2(conf->queue_num))
1208 return rte_flow_error_set(error,
1210 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1212 "RSS: Number of queues must be a power of 2");
1214 if (conf->queue_num > RTE_DIM(rule_action->lb.out))
1215 return rte_flow_error_set(error,
1217 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1219 "RSS: Number of queues too big");
1221 if (n_jump_queue_rss_drop)
1222 return rte_flow_error_set(error,
1224 RTE_FLOW_ERROR_TYPE_ACTION,
1226 "Only one termination action is allowed per flow");
1228 if (((params->action_mask &
1229 (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
1230 ((params->action_mask &
1231 (1LLU << RTE_TABLE_ACTION_LB)) == 0))
1232 return rte_flow_error_set(error,
1234 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1236 "RSS action not supported by this table");
1238 if (params->lb.out_offset !=
1239 pipeline->params.offset_port_id)
1240 return rte_flow_error_set(error,
1242 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1244 "RSS action not supported by this pipeline");
1246 n_jump_queue_rss_drop = 1;
1248 /* RTE_TABLE_ACTION_LB */
1249 for (i = 0; i < conf->queue_num; i++) {
1250 char name[NAME_SIZE];
1251 struct rte_eth_dev *dev;
1255 dev = ETHDEV(softnic);
1258 dev->data->nb_rx_queues)
1259 return rte_flow_error_set(error,
1261 RTE_FLOW_ERROR_TYPE_ACTION,
1263 "RSS: Invalid RX queue ID");
1265 sprintf(name, "RXQ%u",
1266 (uint32_t)conf->queue[i]);
1268 status = softnic_pipeline_port_out_find(softnic,
1273 return rte_flow_error_set(error,
1275 RTE_FLOW_ERROR_TYPE_ACTION,
1277 "RSS: RX queue not accessible from this pipeline");
1279 rule_action->lb.out[i] = port_id;
1282 for ( ; i < RTE_DIM(rule_action->lb.out); i++)
1283 rule_action->lb.out[i] =
1284 rule_action->lb.out[i % conf->queue_num];
1286 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
1288 /* RTE_TABLE_ACTION_FWD */
1289 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
1290 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1292 } /* RTE_FLOW_ACTION_TYPE_RSS */
1294 case RTE_FLOW_ACTION_TYPE_DROP:
1296 const void *conf = action->conf;
1299 return rte_flow_error_set(error,
1301 RTE_FLOW_ERROR_TYPE_ACTION,
1303 "DROP: No configuration required");
1305 if (n_jump_queue_rss_drop)
1306 return rte_flow_error_set(error,
1308 RTE_FLOW_ERROR_TYPE_ACTION,
1310 "Only one termination action is allowed per flow");
1311 if ((params->action_mask &
1312 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1313 return rte_flow_error_set(error,
1315 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1317 "DROP action not supported by this table");
1319 n_jump_queue_rss_drop = 1;
1321 /* RTE_TABLE_ACTION_FWD */
1322 rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
1323 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1325 } /* RTE_FLOW_ACTION_TYPE_DROP */
1327 case RTE_FLOW_ACTION_TYPE_COUNT:
1329 const struct rte_flow_action_count *conf = action->conf;
1332 return rte_flow_error_set(error,
1334 RTE_FLOW_ERROR_TYPE_ACTION,
1336 "COUNT: Null configuration");
1339 return rte_flow_error_set(error,
1341 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1343 "COUNT: Shared counters not supported");
1346 return rte_flow_error_set(error,
1348 RTE_FLOW_ERROR_TYPE_ACTION,
1350 "Only one COUNT action per flow");
1352 if ((params->action_mask &
1353 (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
1354 return rte_flow_error_set(error,
1356 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1358 "COUNT action not supported by this table");
1362 /* RTE_TABLE_ACTION_STATS */
1363 rule_action->stats.n_packets = 0;
1364 rule_action->stats.n_bytes = 0;
1365 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
1367 } /* RTE_FLOW_ACTION_TYPE_COUNT */
1374 if (n_jump_queue_rss_drop == 0)
1375 return rte_flow_error_set(error,
1377 RTE_FLOW_ERROR_TYPE_ACTION,
1379 "Flow does not have any terminating action");
1385 pmd_flow_validate(struct rte_eth_dev *dev,
1386 const struct rte_flow_attr *attr,
1387 const struct rte_flow_item item[],
1388 const struct rte_flow_action action[],
1389 struct rte_flow_error *error)
1391 struct softnic_table_rule_match rule_match;
1392 struct softnic_table_rule_action rule_action;
1394 struct pmd_internals *softnic = dev->data->dev_private;
1395 struct pipeline *pipeline;
1396 struct softnic_table *table;
1397 const char *pipeline_name = NULL;
1398 uint32_t table_id = 0;
1401 /* Check input parameters. */
1403 return rte_flow_error_set(error,
1405 RTE_FLOW_ERROR_TYPE_ATTR,
1409 return rte_flow_error_set(error,
1411 RTE_FLOW_ERROR_TYPE_ITEM,
1416 return rte_flow_error_set(error,
1418 RTE_FLOW_ERROR_TYPE_ACTION,
1422 /* Identify the pipeline table to add this flow to. */
1423 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1428 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1429 if (pipeline == NULL)
1430 return rte_flow_error_set(error,
1432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1434 "Invalid pipeline name");
1436 if (table_id >= pipeline->n_tables)
1437 return rte_flow_error_set(error,
1439 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1441 "Invalid pipeline table ID");
1443 table = &pipeline->table[table_id];
1446 memset(&rule_match, 0, sizeof(rule_match));
1447 status = flow_rule_match_get(softnic,
1458 memset(&rule_action, 0, sizeof(rule_action));
1459 status = flow_rule_action_get(softnic,
1472 static struct rte_flow *
1473 pmd_flow_create(struct rte_eth_dev *dev,
1474 const struct rte_flow_attr *attr,
1475 const struct rte_flow_item item[],
1476 const struct rte_flow_action action[],
1477 struct rte_flow_error *error)
1479 struct softnic_table_rule_match rule_match;
1480 struct softnic_table_rule_action rule_action;
1483 struct pmd_internals *softnic = dev->data->dev_private;
1484 struct pipeline *pipeline;
1485 struct softnic_table *table;
1486 struct rte_flow *flow;
1487 const char *pipeline_name = NULL;
1488 uint32_t table_id = 0;
1489 int new_flow, status;
1491 /* Check input parameters. */
1493 rte_flow_error_set(error,
1495 RTE_FLOW_ERROR_TYPE_ATTR,
1502 rte_flow_error_set(error,
1504 RTE_FLOW_ERROR_TYPE_ITEM,
1510 if (action == NULL) {
1511 rte_flow_error_set(error,
1513 RTE_FLOW_ERROR_TYPE_ACTION,
1519 /* Identify the pipeline table to add this flow to. */
1520 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1525 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1526 if (pipeline == NULL) {
1527 rte_flow_error_set(error,
1529 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1531 "Invalid pipeline name");
1535 if (table_id >= pipeline->n_tables) {
1536 rte_flow_error_set(error,
1538 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1540 "Invalid pipeline table ID");
1544 table = &pipeline->table[table_id];
1547 memset(&rule_match, 0, sizeof(rule_match));
1548 status = flow_rule_match_get(softnic,
1559 memset(&rule_action, 0, sizeof(rule_action));
1560 status = flow_rule_action_get(softnic,
1570 /* Flow find/allocate. */
1572 flow = softnic_flow_find(table, &rule_match);
1575 flow = calloc(1, sizeof(struct rte_flow));
1577 rte_flow_error_set(error,
1579 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1581 "Not enough memory for new flow");
1587 status = softnic_pipeline_table_rule_add(softnic,
1597 rte_flow_error_set(error,
1599 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1601 "Pipeline table rule add failed");
1606 memcpy(&flow->match, &rule_match, sizeof(rule_match));
1607 memcpy(&flow->action, &rule_action, sizeof(rule_action));
1608 flow->data = rule_data;
1609 flow->pipeline = pipeline;
1610 flow->table_id = table_id;
1612 /* Flow add to list. */
1614 TAILQ_INSERT_TAIL(&table->flows, flow, node);
1620 pmd_flow_destroy(struct rte_eth_dev *dev,
1621 struct rte_flow *flow,
1622 struct rte_flow_error *error)
1624 struct pmd_internals *softnic = dev->data->dev_private;
1625 struct softnic_table *table;
1628 /* Check input parameters. */
1630 return rte_flow_error_set(error,
1632 RTE_FLOW_ERROR_TYPE_HANDLE,
1636 table = &flow->pipeline->table[flow->table_id];
1639 status = softnic_pipeline_table_rule_delete(softnic,
1640 flow->pipeline->name,
1644 return rte_flow_error_set(error,
1646 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1648 "Pipeline table rule delete failed");
1651 TAILQ_REMOVE(&table->flows, flow, node);
1657 const struct rte_flow_ops pmd_flow_ops = {
1658 .validate = pmd_flow_validate,
1659 .create = pmd_flow_create,
1660 .destroy = pmd_flow_destroy,