1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include "rte_eth_softnic_internals.h"
6 #include "rte_eth_softnic.h"
8 #define rte_ntohs rte_be_to_cpu_16
9 #define rte_ntohl rte_be_to_cpu_32
12 flow_attr_map_set(struct pmd_internals *softnic,
15 const char *pipeline_name,
18 struct pipeline *pipeline;
19 struct flow_attr_map *map;
21 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
22 pipeline_name == NULL)
25 pipeline = softnic_pipeline_find(softnic, pipeline_name);
26 if (pipeline == NULL ||
27 table_id >= pipeline->n_tables)
30 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
31 &softnic->flow.egress_map[group_id];
32 strcpy(map->pipeline_name, pipeline_name);
33 map->table_id = table_id;
39 struct flow_attr_map *
40 flow_attr_map_get(struct pmd_internals *softnic,
44 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
47 return (ingress) ? &softnic->flow.ingress_map[group_id] :
48 &softnic->flow.egress_map[group_id];
52 flow_pipeline_table_get(struct pmd_internals *softnic,
53 const struct rte_flow_attr *attr,
54 const char **pipeline_name,
56 struct rte_flow_error *error)
58 struct flow_attr_map *map;
61 return rte_flow_error_set(error,
63 RTE_FLOW_ERROR_TYPE_ATTR,
67 if (!attr->ingress && !attr->egress)
68 return rte_flow_error_set(error,
70 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
72 "Ingress/egress not specified");
74 if (attr->ingress && attr->egress)
75 return rte_flow_error_set(error,
77 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
79 "Setting both ingress and egress is not allowed");
81 map = flow_attr_map_get(softnic,
86 return rte_flow_error_set(error,
88 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
93 *pipeline_name = map->pipeline_name;
96 *table_id = map->table_id;
102 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
103 struct rte_flow_item_eth eth;
104 struct rte_flow_item_vlan vlan;
105 struct rte_flow_item_ipv4 ipv4;
106 struct rte_flow_item_ipv6 ipv6;
107 struct rte_flow_item_icmp icmp;
108 struct rte_flow_item_udp udp;
109 struct rte_flow_item_tcp tcp;
110 struct rte_flow_item_sctp sctp;
111 struct rte_flow_item_vxlan vxlan;
112 struct rte_flow_item_e_tag e_tag;
113 struct rte_flow_item_nvgre nvgre;
114 struct rte_flow_item_mpls mpls;
115 struct rte_flow_item_gre gre;
116 struct rte_flow_item_gtp gtp;
117 struct rte_flow_item_esp esp;
118 struct rte_flow_item_geneve geneve;
119 struct rte_flow_item_vxlan_gpe vxlan_gpe;
120 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
121 struct rte_flow_item_ipv6_ext ipv6_ext;
122 struct rte_flow_item_icmp6 icmp6;
123 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
124 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
125 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
126 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
127 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
130 static const union flow_item flow_item_raw_mask;
133 flow_item_is_proto(enum rte_flow_item_type type,
138 case RTE_FLOW_ITEM_TYPE_RAW:
139 *mask = &flow_item_raw_mask;
140 *size = sizeof(flow_item_raw_mask);
143 case RTE_FLOW_ITEM_TYPE_ETH:
144 *mask = &rte_flow_item_eth_mask;
145 *size = sizeof(struct rte_flow_item_eth);
148 case RTE_FLOW_ITEM_TYPE_VLAN:
149 *mask = &rte_flow_item_vlan_mask;
150 *size = sizeof(struct rte_flow_item_vlan);
153 case RTE_FLOW_ITEM_TYPE_IPV4:
154 *mask = &rte_flow_item_ipv4_mask;
155 *size = sizeof(struct rte_flow_item_ipv4);
158 case RTE_FLOW_ITEM_TYPE_IPV6:
159 *mask = &rte_flow_item_ipv6_mask;
160 *size = sizeof(struct rte_flow_item_ipv6);
163 case RTE_FLOW_ITEM_TYPE_ICMP:
164 *mask = &rte_flow_item_icmp_mask;
165 *size = sizeof(struct rte_flow_item_icmp);
168 case RTE_FLOW_ITEM_TYPE_UDP:
169 *mask = &rte_flow_item_udp_mask;
170 *size = sizeof(struct rte_flow_item_udp);
173 case RTE_FLOW_ITEM_TYPE_TCP:
174 *mask = &rte_flow_item_tcp_mask;
175 *size = sizeof(struct rte_flow_item_tcp);
178 case RTE_FLOW_ITEM_TYPE_SCTP:
179 *mask = &rte_flow_item_sctp_mask;
180 *size = sizeof(struct rte_flow_item_sctp);
183 case RTE_FLOW_ITEM_TYPE_VXLAN:
184 *mask = &rte_flow_item_vxlan_mask;
185 *size = sizeof(struct rte_flow_item_vxlan);
188 case RTE_FLOW_ITEM_TYPE_E_TAG:
189 *mask = &rte_flow_item_e_tag_mask;
190 *size = sizeof(struct rte_flow_item_e_tag);
193 case RTE_FLOW_ITEM_TYPE_NVGRE:
194 *mask = &rte_flow_item_nvgre_mask;
195 *size = sizeof(struct rte_flow_item_nvgre);
198 case RTE_FLOW_ITEM_TYPE_MPLS:
199 *mask = &rte_flow_item_mpls_mask;
200 *size = sizeof(struct rte_flow_item_mpls);
203 case RTE_FLOW_ITEM_TYPE_GRE:
204 *mask = &rte_flow_item_gre_mask;
205 *size = sizeof(struct rte_flow_item_gre);
208 case RTE_FLOW_ITEM_TYPE_GTP:
209 case RTE_FLOW_ITEM_TYPE_GTPC:
210 case RTE_FLOW_ITEM_TYPE_GTPU:
211 *mask = &rte_flow_item_gtp_mask;
212 *size = sizeof(struct rte_flow_item_gtp);
215 case RTE_FLOW_ITEM_TYPE_ESP:
216 *mask = &rte_flow_item_esp_mask;
217 *size = sizeof(struct rte_flow_item_esp);
220 case RTE_FLOW_ITEM_TYPE_GENEVE:
221 *mask = &rte_flow_item_geneve_mask;
222 *size = sizeof(struct rte_flow_item_geneve);
225 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
226 *mask = &rte_flow_item_vxlan_gpe_mask;
227 *size = sizeof(struct rte_flow_item_vxlan_gpe);
230 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
231 *mask = &rte_flow_item_arp_eth_ipv4_mask;
232 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
235 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
236 *mask = &rte_flow_item_ipv6_ext_mask;
237 *size = sizeof(struct rte_flow_item_ipv6_ext);
240 case RTE_FLOW_ITEM_TYPE_ICMP6:
241 *mask = &rte_flow_item_icmp6_mask;
242 *size = sizeof(struct rte_flow_item_icmp6);
245 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
246 *mask = &rte_flow_item_icmp6_nd_ns_mask;
247 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
250 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
251 *mask = &rte_flow_item_icmp6_nd_na_mask;
252 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
255 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
256 *mask = &rte_flow_item_icmp6_nd_opt_mask;
257 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
260 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
261 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
262 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
265 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
266 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
267 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
270 default: return 0; /* FALSE */
275 flow_item_proto_preprocess(const struct rte_flow_item *item,
276 union flow_item *item_spec,
277 union flow_item *item_mask,
280 struct rte_flow_error *error)
282 const void *mask_default;
283 uint8_t *spec = (uint8_t *)item_spec;
284 uint8_t *mask = (uint8_t *)item_mask;
287 if (!flow_item_is_proto(item->type, &mask_default, &size))
288 return rte_flow_error_set(error,
290 RTE_FLOW_ERROR_TYPE_ITEM,
292 "Item type not supported");
296 /* If spec is NULL, then last and mask also have to be NULL. */
297 if (item->last || item->mask)
298 return rte_flow_error_set(error,
300 RTE_FLOW_ERROR_TYPE_ITEM,
302 "Invalid item (NULL spec with non-NULL last or mask)");
304 memset(item_spec, 0, size);
305 memset(item_mask, 0, size);
307 *item_disabled = 1; /* TRUE */
311 memcpy(spec, item->spec, size);
316 memcpy(mask, item->mask, size);
318 memcpy(mask, mask_default, size);
321 for (i = 0; i < size; i++)
324 *item_disabled = (i == size) ? 1 : 0;
326 /* Apply mask over spec. */
327 for (i = 0; i < size; i++)
335 memcpy(last, item->last, size);
336 for (i = 0; i < size; i++)
339 /* check for range */
340 for (i = 0; i < size; i++)
341 if (last[i] != spec[i])
342 return rte_flow_error_set(error,
344 RTE_FLOW_ERROR_TYPE_ITEM,
346 "Range not supported");
353 * Skip disabled protocol items and VOID items
354 * until any of the mutually exclusive conditions
355 * from the list below takes place:
356 * (A) A protocol present in the proto_mask
357 * is met (either ENABLED or DISABLED);
358 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
359 * (C) The END item is met.
362 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
365 struct rte_flow_error *error)
369 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
370 union flow_item spec, mask;
372 int disabled = 0, status;
374 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
377 status = flow_item_proto_preprocess(*item,
386 if ((proto_mask & (1LLU << (*item)->type)) ||
399 #define FLOW_ITEM_PROTO_IP \
400 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
401 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
404 flow_item_skip_void(const struct rte_flow_item **item)
407 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
411 #define IP_PROTOCOL_TCP 0x06
412 #define IP_PROTOCOL_UDP 0x11
413 #define IP_PROTOCOL_SCTP 0x84
416 mask_to_depth(uint64_t mask,
421 if (mask == UINT64_MAX) {
430 if (mask & (mask + 1))
433 n = __builtin_popcountll(mask);
435 *depth = (uint32_t)(64 - n);
441 ipv4_mask_to_depth(uint32_t mask,
447 status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
459 ipv6_mask_to_depth(uint8_t *mask,
462 uint64_t *m = (uint64_t *)mask;
463 uint64_t m0 = rte_be_to_cpu_64(m[0]);
464 uint64_t m1 = rte_be_to_cpu_64(m[1]);
468 status = mask_to_depth(m0, &d0);
472 status = mask_to_depth(m1, &d1);
486 port_mask_to_range(uint16_t port,
494 status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
498 p0 = port & port_mask;
499 p1 = p0 | ~port_mask;
511 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
512 struct pipeline *pipeline __rte_unused,
513 struct softnic_table *table __rte_unused,
514 const struct rte_flow_attr *attr,
515 const struct rte_flow_item *item,
516 struct softnic_table_rule_match *rule_match,
517 struct rte_flow_error *error)
519 union flow_item spec, mask;
520 size_t size, length = 0;
521 int disabled = 0, status;
522 uint8_t ip_proto, ip_proto_mask;
524 memset(rule_match, 0, sizeof(*rule_match));
525 rule_match->match_type = TABLE_ACL;
526 rule_match->match.acl.priority = attr->priority;
528 /* VOID or disabled protos only, if any. */
529 status = flow_item_skip_disabled_protos(&item,
530 FLOW_ITEM_PROTO_IP, &length, error);
535 status = flow_item_proto_preprocess(item, &spec, &mask,
536 &size, &disabled, error);
540 switch (item->type) {
541 case RTE_FLOW_ITEM_TYPE_IPV4:
543 uint32_t sa_depth, da_depth;
545 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
548 return rte_flow_error_set(error,
550 RTE_FLOW_ERROR_TYPE_ITEM,
552 "ACL: Illegal IPv4 header source address mask");
554 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
557 return rte_flow_error_set(error,
559 RTE_FLOW_ERROR_TYPE_ITEM,
561 "ACL: Illegal IPv4 header destination address mask");
563 ip_proto = spec.ipv4.hdr.next_proto_id;
564 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
566 rule_match->match.acl.ip_version = 1;
567 rule_match->match.acl.ipv4.sa =
568 rte_ntohl(spec.ipv4.hdr.src_addr);
569 rule_match->match.acl.ipv4.da =
570 rte_ntohl(spec.ipv4.hdr.dst_addr);
571 rule_match->match.acl.sa_depth = sa_depth;
572 rule_match->match.acl.da_depth = da_depth;
573 rule_match->match.acl.proto = ip_proto;
574 rule_match->match.acl.proto_mask = ip_proto_mask;
576 } /* RTE_FLOW_ITEM_TYPE_IPV4 */
578 case RTE_FLOW_ITEM_TYPE_IPV6:
580 uint32_t sa_depth, da_depth;
582 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
584 return rte_flow_error_set(error,
586 RTE_FLOW_ERROR_TYPE_ITEM,
588 "ACL: Illegal IPv6 header source address mask");
590 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
592 return rte_flow_error_set(error,
594 RTE_FLOW_ERROR_TYPE_ITEM,
596 "ACL: Illegal IPv6 header destination address mask");
598 ip_proto = spec.ipv6.hdr.proto;
599 ip_proto_mask = mask.ipv6.hdr.proto;
601 rule_match->match.acl.ip_version = 0;
602 memcpy(rule_match->match.acl.ipv6.sa,
603 spec.ipv6.hdr.src_addr,
604 sizeof(spec.ipv6.hdr.src_addr));
605 memcpy(rule_match->match.acl.ipv6.da,
606 spec.ipv6.hdr.dst_addr,
607 sizeof(spec.ipv6.hdr.dst_addr));
608 rule_match->match.acl.sa_depth = sa_depth;
609 rule_match->match.acl.da_depth = da_depth;
610 rule_match->match.acl.proto = ip_proto;
611 rule_match->match.acl.proto_mask = ip_proto_mask;
613 } /* RTE_FLOW_ITEM_TYPE_IPV6 */
616 return rte_flow_error_set(error,
618 RTE_FLOW_ERROR_TYPE_ITEM,
620 "ACL: IP protocol required");
623 if (ip_proto_mask != UINT8_MAX)
624 return rte_flow_error_set(error,
626 RTE_FLOW_ERROR_TYPE_ITEM,
628 "ACL: Illegal IP protocol mask");
632 /* VOID only, if any. */
633 flow_item_skip_void(&item);
635 /* TCP/UDP/SCTP only. */
636 status = flow_item_proto_preprocess(item, &spec, &mask,
637 &size, &disabled, error);
641 switch (item->type) {
642 case RTE_FLOW_ITEM_TYPE_TCP:
644 uint16_t sp0, sp1, dp0, dp1;
646 if (ip_proto != IP_PROTOCOL_TCP)
647 return rte_flow_error_set(error,
649 RTE_FLOW_ERROR_TYPE_ITEM,
651 "ACL: Item type is TCP, but IP protocol is not");
653 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
654 rte_ntohs(mask.tcp.hdr.src_port),
659 return rte_flow_error_set(error,
661 RTE_FLOW_ERROR_TYPE_ITEM,
663 "ACL: Illegal TCP source port mask");
665 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
666 rte_ntohs(mask.tcp.hdr.dst_port),
671 return rte_flow_error_set(error,
673 RTE_FLOW_ERROR_TYPE_ITEM,
675 "ACL: Illegal TCP destination port mask");
677 rule_match->match.acl.sp0 = sp0;
678 rule_match->match.acl.sp1 = sp1;
679 rule_match->match.acl.dp0 = dp0;
680 rule_match->match.acl.dp1 = dp1;
683 } /* RTE_FLOW_ITEM_TYPE_TCP */
685 case RTE_FLOW_ITEM_TYPE_UDP:
687 uint16_t sp0, sp1, dp0, dp1;
689 if (ip_proto != IP_PROTOCOL_UDP)
690 return rte_flow_error_set(error,
692 RTE_FLOW_ERROR_TYPE_ITEM,
694 "ACL: Item type is UDP, but IP protocol is not");
696 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
697 rte_ntohs(mask.udp.hdr.src_port),
701 return rte_flow_error_set(error,
703 RTE_FLOW_ERROR_TYPE_ITEM,
705 "ACL: Illegal UDP source port mask");
707 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
708 rte_ntohs(mask.udp.hdr.dst_port),
712 return rte_flow_error_set(error,
714 RTE_FLOW_ERROR_TYPE_ITEM,
716 "ACL: Illegal UDP destination port mask");
718 rule_match->match.acl.sp0 = sp0;
719 rule_match->match.acl.sp1 = sp1;
720 rule_match->match.acl.dp0 = dp0;
721 rule_match->match.acl.dp1 = dp1;
724 } /* RTE_FLOW_ITEM_TYPE_UDP */
726 case RTE_FLOW_ITEM_TYPE_SCTP:
728 uint16_t sp0, sp1, dp0, dp1;
730 if (ip_proto != IP_PROTOCOL_SCTP)
731 return rte_flow_error_set(error,
733 RTE_FLOW_ERROR_TYPE_ITEM,
735 "ACL: Item type is SCTP, but IP protocol is not");
737 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
738 rte_ntohs(mask.sctp.hdr.src_port),
743 return rte_flow_error_set(error,
745 RTE_FLOW_ERROR_TYPE_ITEM,
747 "ACL: Illegal SCTP source port mask");
749 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
750 rte_ntohs(mask.sctp.hdr.dst_port),
754 return rte_flow_error_set(error,
756 RTE_FLOW_ERROR_TYPE_ITEM,
758 "ACL: Illegal SCTP destination port mask");
760 rule_match->match.acl.sp0 = sp0;
761 rule_match->match.acl.sp1 = sp1;
762 rule_match->match.acl.dp0 = dp0;
763 rule_match->match.acl.dp1 = dp1;
766 } /* RTE_FLOW_ITEM_TYPE_SCTP */
769 return rte_flow_error_set(error,
771 RTE_FLOW_ERROR_TYPE_ITEM,
773 "ACL: TCP/UDP/SCTP required");
778 /* VOID or disabled protos only, if any. */
779 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
784 if (item->type != RTE_FLOW_ITEM_TYPE_END)
785 return rte_flow_error_set(error,
787 RTE_FLOW_ERROR_TYPE_ITEM,
789 "ACL: Expecting END item");
795 * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
797 * They are located within a larger buffer at offsets *toffset* and *foffset*
798 * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
800 * Question: are the two masks equivalent?
803 * 1. Offset basically indicates that the first offset bytes in the buffer
804 * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
805 * array of *offset* bytes to the *mask*.
806 * 2. Each *mask* might contain a number of zero bytes at the beginning or
808 * 3. Bytes in the larger buffer after the end of the *mask* are also considered
809 * "don't care", so they are equivalent to appending an "all-zeros" array of
810 * bytes to the *mask*.
813 * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
814 * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
815 * => buffer mask = [00 00 00 22 00 33 00 00]
816 * fmask = [22 00 33], foffset = 3, fsize = 3 =>
817 * => buffer mask = [00 00 00 22 00 33 00 00]
818 * Therefore, the tmask and fmask from this example are equivalent.
821 hash_key_mask_is_same(uint8_t *tmask,
827 size_t *toffset_plus,
828 size_t *foffset_plus)
830 size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
831 size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
833 /* Compute tpos and fpos. */
834 for (tpos = 0; tmask[tpos] == 0; tpos++)
836 for (fpos = 0; fmask[fpos] == 0; fpos++)
839 if (toffset + tpos != foffset + fpos)
840 return 0; /* FALSE */
848 for (i = 0; i < tsize; i++)
849 if (tmask[tpos + i] != fmask[fpos + i])
850 return 0; /* FALSE */
852 for ( ; i < fsize; i++)
854 return 0; /* FALSE */
858 for (i = 0; i < fsize; i++)
859 if (tmask[tpos + i] != fmask[fpos + i])
860 return 0; /* FALSE */
862 for ( ; i < tsize; i++)
864 return 0; /* FALSE */
868 *toffset_plus = tpos;
871 *foffset_plus = fpos;
877 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
878 struct pipeline *pipeline __rte_unused,
879 struct softnic_table *table,
880 const struct rte_flow_attr *attr __rte_unused,
881 const struct rte_flow_item *item,
882 struct softnic_table_rule_match *rule_match,
883 struct rte_flow_error *error)
885 struct softnic_table_rule_match_hash key, key_mask;
886 struct softnic_table_hash_params *params = &table->params.match.hash;
887 size_t offset = 0, length = 0, tpos, fpos;
890 memset(&key, 0, sizeof(key));
891 memset(&key_mask, 0, sizeof(key_mask));
893 /* VOID or disabled protos only, if any. */
894 status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
898 if (item->type == RTE_FLOW_ITEM_TYPE_END)
899 return rte_flow_error_set(error,
901 RTE_FLOW_ERROR_TYPE_ITEM,
903 "HASH: END detected too early");
905 /* VOID or any protocols (enabled or disabled). */
906 for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
907 union flow_item spec, mask;
909 int disabled, status;
911 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
914 status = flow_item_proto_preprocess(item,
923 if (length + size > sizeof(key)) {
927 return rte_flow_error_set(error,
929 RTE_FLOW_ERROR_TYPE_ITEM,
931 "HASH: Item too big");
934 memcpy(&key.key[length], &spec, size);
935 memcpy(&key_mask.key[length], &mask, size);
939 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
940 /* VOID or disabled protos only, if any. */
941 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
946 if (item->type != RTE_FLOW_ITEM_TYPE_END)
947 return rte_flow_error_set(error,
949 RTE_FLOW_ERROR_TYPE_ITEM,
951 "HASH: Expecting END item");
954 /* Compare flow key mask against table key mask. */
955 offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
957 if (!hash_key_mask_is_same(params->key_mask,
965 return rte_flow_error_set(error,
967 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
969 "HASH: Item list is not observing the match format");
972 memset(rule_match, 0, sizeof(*rule_match));
973 rule_match->match_type = TABLE_HASH;
974 memcpy(&rule_match->match.hash.key[tpos],
976 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
983 flow_rule_match_get(struct pmd_internals *softnic,
984 struct pipeline *pipeline,
985 struct softnic_table *table,
986 const struct rte_flow_attr *attr,
987 const struct rte_flow_item *item,
988 struct softnic_table_rule_match *rule_match,
989 struct rte_flow_error *error)
991 switch (table->params.match_type) {
993 return flow_rule_match_acl_get(softnic,
1004 return flow_rule_match_hash_get(softnic,
1015 return rte_flow_error_set(error,
1017 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1019 "Unsupported pipeline table match type");
1024 flow_rule_action_get(struct pmd_internals *softnic,
1025 struct pipeline *pipeline,
1026 struct softnic_table *table,
1027 const struct rte_flow_attr *attr,
1028 const struct rte_flow_action *action,
1029 struct softnic_table_rule_action *rule_action,
1030 struct rte_flow_error *error __rte_unused)
1032 struct softnic_table_action_profile *profile;
1033 struct softnic_table_action_profile_params *params;
1034 int n_jump_queue_rss_drop = 0;
1037 profile = softnic_table_action_profile_find(softnic,
1038 table->params.action_profile_name);
1039 if (profile == NULL)
1040 return rte_flow_error_set(error,
1042 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1044 "JUMP: Table action profile");
1046 params = &profile->params;
1048 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1049 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1052 switch (action->type) {
1053 case RTE_FLOW_ACTION_TYPE_JUMP:
1055 const struct rte_flow_action_jump *conf = action->conf;
1056 struct flow_attr_map *map;
1059 return rte_flow_error_set(error,
1061 RTE_FLOW_ERROR_TYPE_ACTION,
1063 "JUMP: Null configuration");
1065 if (n_jump_queue_rss_drop)
1066 return rte_flow_error_set(error,
1068 RTE_FLOW_ERROR_TYPE_ACTION,
1070 "Only one termination action is"
1071 " allowed per flow");
1073 if ((params->action_mask &
1074 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1075 return rte_flow_error_set(error,
1077 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1079 "JUMP action not enabled for this table");
1081 n_jump_queue_rss_drop = 1;
1083 map = flow_attr_map_get(softnic,
1086 if (map == NULL || map->valid == 0)
1087 return rte_flow_error_set(error,
1089 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1091 "JUMP: Invalid group mapping");
1093 if (strcmp(pipeline->name, map->pipeline_name) != 0)
1094 return rte_flow_error_set(error,
1096 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1098 "JUMP: Jump to table in different pipeline");
1100 /* RTE_TABLE_ACTION_FWD */
1101 rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
1102 rule_action->fwd.id = map->table_id;
1103 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1105 } /* RTE_FLOW_ACTION_TYPE_JUMP */
1107 case RTE_FLOW_ACTION_TYPE_QUEUE:
1109 char name[NAME_SIZE];
1110 struct rte_eth_dev *dev;
1111 const struct rte_flow_action_queue *conf = action->conf;
1116 return rte_flow_error_set(error,
1118 RTE_FLOW_ERROR_TYPE_ACTION,
1120 "QUEUE: Null configuration");
1122 if (n_jump_queue_rss_drop)
1123 return rte_flow_error_set(error,
1125 RTE_FLOW_ERROR_TYPE_ACTION,
1127 "Only one termination action is allowed"
1130 if ((params->action_mask &
1131 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1132 return rte_flow_error_set(error,
1134 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1136 "QUEUE action not enabled for this table");
1138 n_jump_queue_rss_drop = 1;
1140 dev = ETHDEV(softnic);
1142 conf->index >= dev->data->nb_rx_queues)
1143 return rte_flow_error_set(error,
1145 RTE_FLOW_ERROR_TYPE_ACTION,
1147 "QUEUE: Invalid RX queue ID");
1149 sprintf(name, "RXQ%u", (uint32_t)conf->index);
1151 status = softnic_pipeline_port_out_find(softnic,
1156 return rte_flow_error_set(error,
1158 RTE_FLOW_ERROR_TYPE_ACTION,
1160 "QUEUE: RX queue not accessible from this pipeline");
1162 /* RTE_TABLE_ACTION_FWD */
1163 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
1164 rule_action->fwd.id = port_id;
1165 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1167 } /*RTE_FLOW_ACTION_TYPE_QUEUE */
1169 case RTE_FLOW_ACTION_TYPE_RSS:
1171 const struct rte_flow_action_rss *conf = action->conf;
1175 return rte_flow_error_set(error,
1177 RTE_FLOW_ERROR_TYPE_ACTION,
1179 "RSS: Null configuration");
1181 if (!rte_is_power_of_2(conf->queue_num))
1182 return rte_flow_error_set(error,
1184 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1186 "RSS: Number of queues must be a power of 2");
1188 if (conf->queue_num > RTE_DIM(rule_action->lb.out))
1189 return rte_flow_error_set(error,
1191 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1193 "RSS: Number of queues too big");
1195 if (n_jump_queue_rss_drop)
1196 return rte_flow_error_set(error,
1198 RTE_FLOW_ERROR_TYPE_ACTION,
1200 "Only one termination action is allowed per flow");
1202 if (((params->action_mask &
1203 (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
1204 ((params->action_mask &
1205 (1LLU << RTE_TABLE_ACTION_LB)) == 0))
1206 return rte_flow_error_set(error,
1208 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1210 "RSS action not supported by this table");
1212 if (params->lb.out_offset !=
1213 pipeline->params.offset_port_id)
1214 return rte_flow_error_set(error,
1216 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1218 "RSS action not supported by this pipeline");
1220 n_jump_queue_rss_drop = 1;
1222 /* RTE_TABLE_ACTION_LB */
1223 for (i = 0; i < conf->queue_num; i++) {
1224 char name[NAME_SIZE];
1225 struct rte_eth_dev *dev;
1229 dev = ETHDEV(softnic);
1232 dev->data->nb_rx_queues)
1233 return rte_flow_error_set(error,
1235 RTE_FLOW_ERROR_TYPE_ACTION,
1237 "RSS: Invalid RX queue ID");
1239 sprintf(name, "RXQ%u",
1240 (uint32_t)conf->queue[i]);
1242 status = softnic_pipeline_port_out_find(softnic,
1247 return rte_flow_error_set(error,
1249 RTE_FLOW_ERROR_TYPE_ACTION,
1251 "RSS: RX queue not accessible from this pipeline");
1253 rule_action->lb.out[i] = port_id;
1256 for ( ; i < RTE_DIM(rule_action->lb.out); i++)
1257 rule_action->lb.out[i] =
1258 rule_action->lb.out[i % conf->queue_num];
1260 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
1262 /* RTE_TABLE_ACTION_FWD */
1263 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
1264 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1266 } /* RTE_FLOW_ACTION_TYPE_RSS */
1268 case RTE_FLOW_ACTION_TYPE_DROP:
1270 const void *conf = action->conf;
1273 return rte_flow_error_set(error,
1275 RTE_FLOW_ERROR_TYPE_ACTION,
1277 "DROP: No configuration required");
1279 if (n_jump_queue_rss_drop)
1280 return rte_flow_error_set(error,
1282 RTE_FLOW_ERROR_TYPE_ACTION,
1284 "Only one termination action is allowed per flow");
1285 if ((params->action_mask &
1286 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1287 return rte_flow_error_set(error,
1289 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1291 "DROP action not supported by this table");
1293 n_jump_queue_rss_drop = 1;
1295 /* RTE_TABLE_ACTION_FWD */
1296 rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
1297 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1299 } /* RTE_FLOW_ACTION_TYPE_DROP */
1301 case RTE_FLOW_ACTION_TYPE_COUNT:
1303 const struct rte_flow_action_count *conf = action->conf;
1306 return rte_flow_error_set(error,
1308 RTE_FLOW_ERROR_TYPE_ACTION,
1310 "COUNT: Null configuration");
1313 return rte_flow_error_set(error,
1315 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1317 "COUNT: Shared counters not supported");
1320 return rte_flow_error_set(error,
1322 RTE_FLOW_ERROR_TYPE_ACTION,
1324 "Only one COUNT action per flow");
1326 if ((params->action_mask &
1327 (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
1328 return rte_flow_error_set(error,
1330 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1332 "COUNT action not supported by this table");
1336 /* RTE_TABLE_ACTION_STATS */
1337 rule_action->stats.n_packets = 0;
1338 rule_action->stats.n_bytes = 0;
1339 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
1341 } /* RTE_FLOW_ACTION_TYPE_COUNT */
1348 if (n_jump_queue_rss_drop == 0)
1349 return rte_flow_error_set(error,
1351 RTE_FLOW_ERROR_TYPE_ACTION,
1353 "Flow does not have any terminating action");
1359 pmd_flow_validate(struct rte_eth_dev *dev,
1360 const struct rte_flow_attr *attr,
1361 const struct rte_flow_item item[],
1362 const struct rte_flow_action action[],
1363 struct rte_flow_error *error)
1365 struct softnic_table_rule_match rule_match;
1366 struct softnic_table_rule_action rule_action;
1368 struct pmd_internals *softnic = dev->data->dev_private;
1369 struct pipeline *pipeline;
1370 struct softnic_table *table;
1371 const char *pipeline_name = NULL;
1372 uint32_t table_id = 0;
1375 /* Check input parameters. */
1377 return rte_flow_error_set(error,
1379 RTE_FLOW_ERROR_TYPE_ATTR,
1383 return rte_flow_error_set(error,
1385 RTE_FLOW_ERROR_TYPE_ITEM,
1390 return rte_flow_error_set(error,
1392 RTE_FLOW_ERROR_TYPE_ACTION,
1396 /* Identify the pipeline table to add this flow to. */
1397 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1402 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1403 if (pipeline == NULL)
1404 return rte_flow_error_set(error,
1406 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1408 "Invalid pipeline name");
1410 if (table_id >= pipeline->n_tables)
1411 return rte_flow_error_set(error,
1413 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1415 "Invalid pipeline table ID");
1417 table = &pipeline->table[table_id];
1420 memset(&rule_match, 0, sizeof(rule_match));
1421 status = flow_rule_match_get(softnic,
1432 memset(&rule_action, 0, sizeof(rule_action));
1433 status = flow_rule_action_get(softnic,
1446 const struct rte_flow_ops pmd_flow_ops = {
1447 .validate = pmd_flow_validate,