1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include "rte_eth_softnic_internals.h"
6 #include "rte_eth_softnic.h"
8 #define rte_ntohs rte_be_to_cpu_16
9 #define rte_ntohl rte_be_to_cpu_32
12 flow_attr_map_set(struct pmd_internals *softnic,
15 const char *pipeline_name,
18 struct pipeline *pipeline;
19 struct flow_attr_map *map;
21 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
22 pipeline_name == NULL)
25 pipeline = softnic_pipeline_find(softnic, pipeline_name);
26 if (pipeline == NULL ||
27 table_id >= pipeline->n_tables)
30 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
31 &softnic->flow.egress_map[group_id];
32 strcpy(map->pipeline_name, pipeline_name);
33 map->table_id = table_id;
39 struct flow_attr_map *
40 flow_attr_map_get(struct pmd_internals *softnic,
44 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
47 return (ingress) ? &softnic->flow.ingress_map[group_id] :
48 &softnic->flow.egress_map[group_id];
52 flow_pipeline_table_get(struct pmd_internals *softnic,
53 const struct rte_flow_attr *attr,
54 const char **pipeline_name,
56 struct rte_flow_error *error)
58 struct flow_attr_map *map;
61 return rte_flow_error_set(error,
63 RTE_FLOW_ERROR_TYPE_ATTR,
67 if (!attr->ingress && !attr->egress)
68 return rte_flow_error_set(error,
70 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
72 "Ingress/egress not specified");
74 if (attr->ingress && attr->egress)
75 return rte_flow_error_set(error,
77 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
79 "Setting both ingress and egress is not allowed");
81 map = flow_attr_map_get(softnic,
86 return rte_flow_error_set(error,
88 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
93 *pipeline_name = map->pipeline_name;
96 *table_id = map->table_id;
102 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
103 struct rte_flow_item_eth eth;
104 struct rte_flow_item_vlan vlan;
105 struct rte_flow_item_ipv4 ipv4;
106 struct rte_flow_item_ipv6 ipv6;
107 struct rte_flow_item_icmp icmp;
108 struct rte_flow_item_udp udp;
109 struct rte_flow_item_tcp tcp;
110 struct rte_flow_item_sctp sctp;
111 struct rte_flow_item_vxlan vxlan;
112 struct rte_flow_item_e_tag e_tag;
113 struct rte_flow_item_nvgre nvgre;
114 struct rte_flow_item_mpls mpls;
115 struct rte_flow_item_gre gre;
116 struct rte_flow_item_gtp gtp;
117 struct rte_flow_item_esp esp;
118 struct rte_flow_item_geneve geneve;
119 struct rte_flow_item_vxlan_gpe vxlan_gpe;
120 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
121 struct rte_flow_item_ipv6_ext ipv6_ext;
122 struct rte_flow_item_icmp6 icmp6;
123 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
124 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
125 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
126 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
127 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
130 static const union flow_item flow_item_raw_mask;
133 flow_item_is_proto(enum rte_flow_item_type type,
138 case RTE_FLOW_ITEM_TYPE_RAW:
139 *mask = &flow_item_raw_mask;
140 *size = sizeof(flow_item_raw_mask);
143 case RTE_FLOW_ITEM_TYPE_ETH:
144 *mask = &rte_flow_item_eth_mask;
145 *size = sizeof(struct rte_flow_item_eth);
148 case RTE_FLOW_ITEM_TYPE_VLAN:
149 *mask = &rte_flow_item_vlan_mask;
150 *size = sizeof(struct rte_flow_item_vlan);
153 case RTE_FLOW_ITEM_TYPE_IPV4:
154 *mask = &rte_flow_item_ipv4_mask;
155 *size = sizeof(struct rte_flow_item_ipv4);
158 case RTE_FLOW_ITEM_TYPE_IPV6:
159 *mask = &rte_flow_item_ipv6_mask;
160 *size = sizeof(struct rte_flow_item_ipv6);
163 case RTE_FLOW_ITEM_TYPE_ICMP:
164 *mask = &rte_flow_item_icmp_mask;
165 *size = sizeof(struct rte_flow_item_icmp);
168 case RTE_FLOW_ITEM_TYPE_UDP:
169 *mask = &rte_flow_item_udp_mask;
170 *size = sizeof(struct rte_flow_item_udp);
173 case RTE_FLOW_ITEM_TYPE_TCP:
174 *mask = &rte_flow_item_tcp_mask;
175 *size = sizeof(struct rte_flow_item_tcp);
178 case RTE_FLOW_ITEM_TYPE_SCTP:
179 *mask = &rte_flow_item_sctp_mask;
180 *size = sizeof(struct rte_flow_item_sctp);
183 case RTE_FLOW_ITEM_TYPE_VXLAN:
184 *mask = &rte_flow_item_vxlan_mask;
185 *size = sizeof(struct rte_flow_item_vxlan);
188 case RTE_FLOW_ITEM_TYPE_E_TAG:
189 *mask = &rte_flow_item_e_tag_mask;
190 *size = sizeof(struct rte_flow_item_e_tag);
193 case RTE_FLOW_ITEM_TYPE_NVGRE:
194 *mask = &rte_flow_item_nvgre_mask;
195 *size = sizeof(struct rte_flow_item_nvgre);
198 case RTE_FLOW_ITEM_TYPE_MPLS:
199 *mask = &rte_flow_item_mpls_mask;
200 *size = sizeof(struct rte_flow_item_mpls);
203 case RTE_FLOW_ITEM_TYPE_GRE:
204 *mask = &rte_flow_item_gre_mask;
205 *size = sizeof(struct rte_flow_item_gre);
208 case RTE_FLOW_ITEM_TYPE_GTP:
209 case RTE_FLOW_ITEM_TYPE_GTPC:
210 case RTE_FLOW_ITEM_TYPE_GTPU:
211 *mask = &rte_flow_item_gtp_mask;
212 *size = sizeof(struct rte_flow_item_gtp);
215 case RTE_FLOW_ITEM_TYPE_ESP:
216 *mask = &rte_flow_item_esp_mask;
217 *size = sizeof(struct rte_flow_item_esp);
220 case RTE_FLOW_ITEM_TYPE_GENEVE:
221 *mask = &rte_flow_item_geneve_mask;
222 *size = sizeof(struct rte_flow_item_geneve);
225 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
226 *mask = &rte_flow_item_vxlan_gpe_mask;
227 *size = sizeof(struct rte_flow_item_vxlan_gpe);
230 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
231 *mask = &rte_flow_item_arp_eth_ipv4_mask;
232 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
235 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
236 *mask = &rte_flow_item_ipv6_ext_mask;
237 *size = sizeof(struct rte_flow_item_ipv6_ext);
240 case RTE_FLOW_ITEM_TYPE_ICMP6:
241 *mask = &rte_flow_item_icmp6_mask;
242 *size = sizeof(struct rte_flow_item_icmp6);
245 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
246 *mask = &rte_flow_item_icmp6_nd_ns_mask;
247 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
250 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
251 *mask = &rte_flow_item_icmp6_nd_na_mask;
252 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
255 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
256 *mask = &rte_flow_item_icmp6_nd_opt_mask;
257 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
260 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
261 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
262 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
265 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
266 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
267 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
270 default: return 0; /* FALSE */
275 flow_item_proto_preprocess(const struct rte_flow_item *item,
276 union flow_item *item_spec,
277 union flow_item *item_mask,
280 struct rte_flow_error *error)
282 const void *mask_default;
283 uint8_t *spec = (uint8_t *)item_spec;
284 uint8_t *mask = (uint8_t *)item_mask;
287 if (!flow_item_is_proto(item->type, &mask_default, &size))
288 return rte_flow_error_set(error,
290 RTE_FLOW_ERROR_TYPE_ITEM,
292 "Item type not supported");
296 /* If spec is NULL, then last and mask also have to be NULL. */
297 if (item->last || item->mask)
298 return rte_flow_error_set(error,
300 RTE_FLOW_ERROR_TYPE_ITEM,
302 "Invalid item (NULL spec with non-NULL last or mask)");
304 memset(item_spec, 0, size);
305 memset(item_mask, 0, size);
307 *item_disabled = 1; /* TRUE */
311 memcpy(spec, item->spec, size);
316 memcpy(mask, item->mask, size);
318 memcpy(mask, mask_default, size);
321 for (i = 0; i < size; i++)
324 *item_disabled = (i == size) ? 1 : 0;
326 /* Apply mask over spec. */
327 for (i = 0; i < size; i++)
335 memcpy(last, item->last, size);
336 for (i = 0; i < size; i++)
339 /* check for range */
340 for (i = 0; i < size; i++)
341 if (last[i] != spec[i])
342 return rte_flow_error_set(error,
344 RTE_FLOW_ERROR_TYPE_ITEM,
346 "Range not supported");
353 * Skip disabled protocol items and VOID items
354 * until any of the mutually exclusive conditions
355 * from the list below takes place:
356 * (A) A protocol present in the proto_mask
357 * is met (either ENABLED or DISABLED);
358 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
359 * (C) The END item is met.
362 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
365 struct rte_flow_error *error)
369 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
370 union flow_item spec, mask;
372 int disabled = 0, status;
374 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
377 status = flow_item_proto_preprocess(*item,
386 if ((proto_mask & (1LLU << (*item)->type)) ||
399 #define FLOW_ITEM_PROTO_IP \
400 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
401 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
404 flow_item_skip_void(const struct rte_flow_item **item)
407 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
411 #define IP_PROTOCOL_TCP 0x06
412 #define IP_PROTOCOL_UDP 0x11
413 #define IP_PROTOCOL_SCTP 0x84
416 mask_to_depth(uint64_t mask,
421 if (mask == UINT64_MAX) {
430 if (mask & (mask + 1))
433 n = __builtin_popcountll(mask);
435 *depth = (uint32_t)(64 - n);
441 ipv4_mask_to_depth(uint32_t mask,
447 status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
459 ipv6_mask_to_depth(uint8_t *mask,
462 uint64_t *m = (uint64_t *)mask;
463 uint64_t m0 = rte_be_to_cpu_64(m[0]);
464 uint64_t m1 = rte_be_to_cpu_64(m[1]);
468 status = mask_to_depth(m0, &d0);
472 status = mask_to_depth(m1, &d1);
486 port_mask_to_range(uint16_t port,
494 status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
498 p0 = port & port_mask;
499 p1 = p0 | ~port_mask;
511 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
512 struct pipeline *pipeline __rte_unused,
513 struct softnic_table *table __rte_unused,
514 const struct rte_flow_attr *attr,
515 const struct rte_flow_item *item,
516 struct softnic_table_rule_match *rule_match,
517 struct rte_flow_error *error)
519 union flow_item spec, mask;
520 size_t size, length = 0;
521 int disabled = 0, status;
522 uint8_t ip_proto, ip_proto_mask;
524 memset(rule_match, 0, sizeof(*rule_match));
525 rule_match->match_type = TABLE_ACL;
526 rule_match->match.acl.priority = attr->priority;
528 /* VOID or disabled protos only, if any. */
529 status = flow_item_skip_disabled_protos(&item,
530 FLOW_ITEM_PROTO_IP, &length, error);
535 status = flow_item_proto_preprocess(item, &spec, &mask,
536 &size, &disabled, error);
540 switch (item->type) {
541 case RTE_FLOW_ITEM_TYPE_IPV4:
543 uint32_t sa_depth, da_depth;
545 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
548 return rte_flow_error_set(error,
550 RTE_FLOW_ERROR_TYPE_ITEM,
552 "ACL: Illegal IPv4 header source address mask");
554 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
557 return rte_flow_error_set(error,
559 RTE_FLOW_ERROR_TYPE_ITEM,
561 "ACL: Illegal IPv4 header destination address mask");
563 ip_proto = spec.ipv4.hdr.next_proto_id;
564 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
566 rule_match->match.acl.ip_version = 1;
567 rule_match->match.acl.ipv4.sa =
568 rte_ntohl(spec.ipv4.hdr.src_addr);
569 rule_match->match.acl.ipv4.da =
570 rte_ntohl(spec.ipv4.hdr.dst_addr);
571 rule_match->match.acl.sa_depth = sa_depth;
572 rule_match->match.acl.da_depth = da_depth;
573 rule_match->match.acl.proto = ip_proto;
574 rule_match->match.acl.proto_mask = ip_proto_mask;
576 } /* RTE_FLOW_ITEM_TYPE_IPV4 */
578 case RTE_FLOW_ITEM_TYPE_IPV6:
580 uint32_t sa_depth, da_depth;
582 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
584 return rte_flow_error_set(error,
586 RTE_FLOW_ERROR_TYPE_ITEM,
588 "ACL: Illegal IPv6 header source address mask");
590 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
592 return rte_flow_error_set(error,
594 RTE_FLOW_ERROR_TYPE_ITEM,
596 "ACL: Illegal IPv6 header destination address mask");
598 ip_proto = spec.ipv6.hdr.proto;
599 ip_proto_mask = mask.ipv6.hdr.proto;
601 rule_match->match.acl.ip_version = 0;
602 memcpy(rule_match->match.acl.ipv6.sa,
603 spec.ipv6.hdr.src_addr,
604 sizeof(spec.ipv6.hdr.src_addr));
605 memcpy(rule_match->match.acl.ipv6.da,
606 spec.ipv6.hdr.dst_addr,
607 sizeof(spec.ipv6.hdr.dst_addr));
608 rule_match->match.acl.sa_depth = sa_depth;
609 rule_match->match.acl.da_depth = da_depth;
610 rule_match->match.acl.proto = ip_proto;
611 rule_match->match.acl.proto_mask = ip_proto_mask;
613 } /* RTE_FLOW_ITEM_TYPE_IPV6 */
616 return rte_flow_error_set(error,
618 RTE_FLOW_ERROR_TYPE_ITEM,
620 "ACL: IP protocol required");
623 if (ip_proto_mask != UINT8_MAX)
624 return rte_flow_error_set(error,
626 RTE_FLOW_ERROR_TYPE_ITEM,
628 "ACL: Illegal IP protocol mask");
632 /* VOID only, if any. */
633 flow_item_skip_void(&item);
635 /* TCP/UDP/SCTP only. */
636 status = flow_item_proto_preprocess(item, &spec, &mask,
637 &size, &disabled, error);
641 switch (item->type) {
642 case RTE_FLOW_ITEM_TYPE_TCP:
644 uint16_t sp0, sp1, dp0, dp1;
646 if (ip_proto != IP_PROTOCOL_TCP)
647 return rte_flow_error_set(error,
649 RTE_FLOW_ERROR_TYPE_ITEM,
651 "ACL: Item type is TCP, but IP protocol is not");
653 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
654 rte_ntohs(mask.tcp.hdr.src_port),
659 return rte_flow_error_set(error,
661 RTE_FLOW_ERROR_TYPE_ITEM,
663 "ACL: Illegal TCP source port mask");
665 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
666 rte_ntohs(mask.tcp.hdr.dst_port),
671 return rte_flow_error_set(error,
673 RTE_FLOW_ERROR_TYPE_ITEM,
675 "ACL: Illegal TCP destination port mask");
677 rule_match->match.acl.sp0 = sp0;
678 rule_match->match.acl.sp1 = sp1;
679 rule_match->match.acl.dp0 = dp0;
680 rule_match->match.acl.dp1 = dp1;
683 } /* RTE_FLOW_ITEM_TYPE_TCP */
685 case RTE_FLOW_ITEM_TYPE_UDP:
687 uint16_t sp0, sp1, dp0, dp1;
689 if (ip_proto != IP_PROTOCOL_UDP)
690 return rte_flow_error_set(error,
692 RTE_FLOW_ERROR_TYPE_ITEM,
694 "ACL: Item type is UDP, but IP protocol is not");
696 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
697 rte_ntohs(mask.udp.hdr.src_port),
701 return rte_flow_error_set(error,
703 RTE_FLOW_ERROR_TYPE_ITEM,
705 "ACL: Illegal UDP source port mask");
707 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
708 rte_ntohs(mask.udp.hdr.dst_port),
712 return rte_flow_error_set(error,
714 RTE_FLOW_ERROR_TYPE_ITEM,
716 "ACL: Illegal UDP destination port mask");
718 rule_match->match.acl.sp0 = sp0;
719 rule_match->match.acl.sp1 = sp1;
720 rule_match->match.acl.dp0 = dp0;
721 rule_match->match.acl.dp1 = dp1;
724 } /* RTE_FLOW_ITEM_TYPE_UDP */
726 case RTE_FLOW_ITEM_TYPE_SCTP:
728 uint16_t sp0, sp1, dp0, dp1;
730 if (ip_proto != IP_PROTOCOL_SCTP)
731 return rte_flow_error_set(error,
733 RTE_FLOW_ERROR_TYPE_ITEM,
735 "ACL: Item type is SCTP, but IP protocol is not");
737 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
738 rte_ntohs(mask.sctp.hdr.src_port),
743 return rte_flow_error_set(error,
745 RTE_FLOW_ERROR_TYPE_ITEM,
747 "ACL: Illegal SCTP source port mask");
749 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
750 rte_ntohs(mask.sctp.hdr.dst_port),
754 return rte_flow_error_set(error,
756 RTE_FLOW_ERROR_TYPE_ITEM,
758 "ACL: Illegal SCTP destination port mask");
760 rule_match->match.acl.sp0 = sp0;
761 rule_match->match.acl.sp1 = sp1;
762 rule_match->match.acl.dp0 = dp0;
763 rule_match->match.acl.dp1 = dp1;
766 } /* RTE_FLOW_ITEM_TYPE_SCTP */
769 return rte_flow_error_set(error,
771 RTE_FLOW_ERROR_TYPE_ITEM,
773 "ACL: TCP/UDP/SCTP required");
778 /* VOID or disabled protos only, if any. */
779 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
784 if (item->type != RTE_FLOW_ITEM_TYPE_END)
785 return rte_flow_error_set(error,
787 RTE_FLOW_ERROR_TYPE_ITEM,
789 "ACL: Expecting END item");
795 * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
797 * They are located within a larger buffer at offsets *toffset* and *foffset*
798 * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
800 * Question: are the two masks equivalent?
803 * 1. Offset basically indicates that the first offset bytes in the buffer
804 * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
805 * array of *offset* bytes to the *mask*.
806 * 2. Each *mask* might contain a number of zero bytes at the beginning or
808 * 3. Bytes in the larger buffer after the end of the *mask* are also considered
809 * "don't care", so they are equivalent to appending an "all-zeros" array of
810 * bytes to the *mask*.
813 * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
814 * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
815 * => buffer mask = [00 00 00 22 00 33 00 00]
816 * fmask = [22 00 33], foffset = 3, fsize = 3 =>
817 * => buffer mask = [00 00 00 22 00 33 00 00]
818 * Therefore, the tmask and fmask from this example are equivalent.
821 hash_key_mask_is_same(uint8_t *tmask,
827 size_t *toffset_plus,
828 size_t *foffset_plus)
830 size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
831 size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
833 /* Compute tpos and fpos. */
834 for (tpos = 0; tmask[tpos] == 0; tpos++)
836 for (fpos = 0; fmask[fpos] == 0; fpos++)
839 if (toffset + tpos != foffset + fpos)
840 return 0; /* FALSE */
848 for (i = 0; i < tsize; i++)
849 if (tmask[tpos + i] != fmask[fpos + i])
850 return 0; /* FALSE */
852 for ( ; i < fsize; i++)
854 return 0; /* FALSE */
858 for (i = 0; i < fsize; i++)
859 if (tmask[tpos + i] != fmask[fpos + i])
860 return 0; /* FALSE */
862 for ( ; i < tsize; i++)
864 return 0; /* FALSE */
868 *toffset_plus = tpos;
871 *foffset_plus = fpos;
877 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
878 struct pipeline *pipeline __rte_unused,
879 struct softnic_table *table,
880 const struct rte_flow_attr *attr __rte_unused,
881 const struct rte_flow_item *item,
882 struct softnic_table_rule_match *rule_match,
883 struct rte_flow_error *error)
885 struct softnic_table_rule_match_hash key, key_mask;
886 struct softnic_table_hash_params *params = &table->params.match.hash;
887 size_t offset = 0, length = 0, tpos, fpos;
890 memset(&key, 0, sizeof(key));
891 memset(&key_mask, 0, sizeof(key_mask));
893 /* VOID or disabled protos only, if any. */
894 status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
898 if (item->type == RTE_FLOW_ITEM_TYPE_END)
899 return rte_flow_error_set(error,
901 RTE_FLOW_ERROR_TYPE_ITEM,
903 "HASH: END detected too early");
905 /* VOID or any protocols (enabled or disabled). */
906 for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
907 union flow_item spec, mask;
909 int disabled, status;
911 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
914 status = flow_item_proto_preprocess(item,
923 if (length + size > sizeof(key)) {
927 return rte_flow_error_set(error,
929 RTE_FLOW_ERROR_TYPE_ITEM,
931 "HASH: Item too big");
934 memcpy(&key.key[length], &spec, size);
935 memcpy(&key_mask.key[length], &mask, size);
939 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
940 /* VOID or disabled protos only, if any. */
941 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
946 if (item->type != RTE_FLOW_ITEM_TYPE_END)
947 return rte_flow_error_set(error,
949 RTE_FLOW_ERROR_TYPE_ITEM,
951 "HASH: Expecting END item");
954 /* Compare flow key mask against table key mask. */
955 offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
957 if (!hash_key_mask_is_same(params->key_mask,
965 return rte_flow_error_set(error,
967 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
969 "HASH: Item list is not observing the match format");
972 memset(rule_match, 0, sizeof(*rule_match));
973 rule_match->match_type = TABLE_HASH;
974 memcpy(&rule_match->match.hash.key[tpos],
976 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
983 flow_rule_match_get(struct pmd_internals *softnic,
984 struct pipeline *pipeline,
985 struct softnic_table *table,
986 const struct rte_flow_attr *attr,
987 const struct rte_flow_item *item,
988 struct softnic_table_rule_match *rule_match,
989 struct rte_flow_error *error)
991 switch (table->params.match_type) {
993 return flow_rule_match_acl_get(softnic,
1004 return flow_rule_match_hash_get(softnic,
1013 return rte_flow_error_set(error,
1015 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1017 "Unsupported pipeline table match type");
1022 pmd_flow_validate(struct rte_eth_dev *dev,
1023 const struct rte_flow_attr *attr,
1024 const struct rte_flow_item item[],
1025 const struct rte_flow_action action[],
1026 struct rte_flow_error *error)
1028 struct softnic_table_rule_match rule_match;
1030 struct pmd_internals *softnic = dev->data->dev_private;
1031 struct pipeline *pipeline;
1032 struct softnic_table *table;
1033 const char *pipeline_name = NULL;
1034 uint32_t table_id = 0;
1037 /* Check input parameters. */
1039 return rte_flow_error_set(error,
1041 RTE_FLOW_ERROR_TYPE_ATTR,
1045 return rte_flow_error_set(error,
1047 RTE_FLOW_ERROR_TYPE_ITEM,
1052 return rte_flow_error_set(error,
1054 RTE_FLOW_ERROR_TYPE_ACTION,
1058 /* Identify the pipeline table to add this flow to. */
1059 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1064 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1065 if (pipeline == NULL)
1066 return rte_flow_error_set(error,
1068 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1070 "Invalid pipeline name");
1072 if (table_id >= pipeline->n_tables)
1073 return rte_flow_error_set(error,
1075 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1077 "Invalid pipeline table ID");
1079 table = &pipeline->table[table_id];
1082 memset(&rule_match, 0, sizeof(rule_match));
1083 status = flow_rule_match_get(softnic,
1096 const struct rte_flow_ops pmd_flow_ops = {
1097 .validate = pmd_flow_validate,