net/softnic: map flow match to hash table
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include "rte_eth_softnic_internals.h"
6 #include "rte_eth_softnic.h"
7
8 #define rte_ntohs rte_be_to_cpu_16
9 #define rte_ntohl rte_be_to_cpu_32
10
11 int
12 flow_attr_map_set(struct pmd_internals *softnic,
13                 uint32_t group_id,
14                 int ingress,
15                 const char *pipeline_name,
16                 uint32_t table_id)
17 {
18         struct pipeline *pipeline;
19         struct flow_attr_map *map;
20
21         if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
22                         pipeline_name == NULL)
23                 return -1;
24
25         pipeline = softnic_pipeline_find(softnic, pipeline_name);
26         if (pipeline == NULL ||
27                         table_id >= pipeline->n_tables)
28                 return -1;
29
30         map = (ingress) ? &softnic->flow.ingress_map[group_id] :
31                 &softnic->flow.egress_map[group_id];
32         strcpy(map->pipeline_name, pipeline_name);
33         map->table_id = table_id;
34         map->valid = 1;
35
36         return 0;
37 }
38
39 struct flow_attr_map *
40 flow_attr_map_get(struct pmd_internals *softnic,
41                 uint32_t group_id,
42                 int ingress)
43 {
44         if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
45                 return NULL;
46
47         return (ingress) ? &softnic->flow.ingress_map[group_id] :
48                 &softnic->flow.egress_map[group_id];
49 }
50
51 static int
52 flow_pipeline_table_get(struct pmd_internals *softnic,
53                 const struct rte_flow_attr *attr,
54                 const char **pipeline_name,
55                 uint32_t *table_id,
56                 struct rte_flow_error *error)
57 {
58         struct flow_attr_map *map;
59
60         if (attr == NULL)
61                 return rte_flow_error_set(error,
62                                 EINVAL,
63                                 RTE_FLOW_ERROR_TYPE_ATTR,
64                                 NULL,
65                                 "Null attr");
66
67         if (!attr->ingress && !attr->egress)
68                 return rte_flow_error_set(error,
69                                 EINVAL,
70                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
71                                 attr,
72                                 "Ingress/egress not specified");
73
74         if (attr->ingress && attr->egress)
75                 return rte_flow_error_set(error,
76                                 EINVAL,
77                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
78                                 attr,
79                                 "Setting both ingress and egress is not allowed");
80
81         map = flow_attr_map_get(softnic,
82                         attr->group,
83                         attr->ingress);
84         if (map == NULL ||
85                         map->valid == 0)
86                 return rte_flow_error_set(error,
87                                 EINVAL,
88                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
89                                 attr,
90                                 "Invalid group ID");
91
92         if (pipeline_name)
93                 *pipeline_name = map->pipeline_name;
94
95         if (table_id)
96                 *table_id = map->table_id;
97
98         return 0;
99 }
100
101 union flow_item {
102         uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
103         struct rte_flow_item_eth eth;
104         struct rte_flow_item_vlan vlan;
105         struct rte_flow_item_ipv4 ipv4;
106         struct rte_flow_item_ipv6 ipv6;
107         struct rte_flow_item_icmp icmp;
108         struct rte_flow_item_udp udp;
109         struct rte_flow_item_tcp tcp;
110         struct rte_flow_item_sctp sctp;
111         struct rte_flow_item_vxlan vxlan;
112         struct rte_flow_item_e_tag e_tag;
113         struct rte_flow_item_nvgre nvgre;
114         struct rte_flow_item_mpls mpls;
115         struct rte_flow_item_gre gre;
116         struct rte_flow_item_gtp gtp;
117         struct rte_flow_item_esp esp;
118         struct rte_flow_item_geneve geneve;
119         struct rte_flow_item_vxlan_gpe vxlan_gpe;
120         struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
121         struct rte_flow_item_ipv6_ext ipv6_ext;
122         struct rte_flow_item_icmp6 icmp6;
123         struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
124         struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
125         struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
126         struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
127         struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
128 };
129
130 static const union flow_item flow_item_raw_mask;
131
132 static int
133 flow_item_is_proto(enum rte_flow_item_type type,
134         const void **mask,
135         size_t *size)
136 {
137         switch (type) {
138         case RTE_FLOW_ITEM_TYPE_RAW:
139                 *mask = &flow_item_raw_mask;
140                 *size = sizeof(flow_item_raw_mask);
141                 return 1; /* TRUE */
142
143         case RTE_FLOW_ITEM_TYPE_ETH:
144                 *mask = &rte_flow_item_eth_mask;
145                 *size = sizeof(struct rte_flow_item_eth);
146                 return 1; /* TRUE */
147
148         case RTE_FLOW_ITEM_TYPE_VLAN:
149                 *mask = &rte_flow_item_vlan_mask;
150                 *size = sizeof(struct rte_flow_item_vlan);
151                 return 1;
152
153         case RTE_FLOW_ITEM_TYPE_IPV4:
154                 *mask = &rte_flow_item_ipv4_mask;
155                 *size = sizeof(struct rte_flow_item_ipv4);
156                 return 1;
157
158         case RTE_FLOW_ITEM_TYPE_IPV6:
159                 *mask = &rte_flow_item_ipv6_mask;
160                 *size = sizeof(struct rte_flow_item_ipv6);
161                 return 1;
162
163         case RTE_FLOW_ITEM_TYPE_ICMP:
164                 *mask = &rte_flow_item_icmp_mask;
165                 *size = sizeof(struct rte_flow_item_icmp);
166                 return 1;
167
168         case RTE_FLOW_ITEM_TYPE_UDP:
169                 *mask = &rte_flow_item_udp_mask;
170                 *size = sizeof(struct rte_flow_item_udp);
171                 return 1;
172
173         case RTE_FLOW_ITEM_TYPE_TCP:
174                 *mask = &rte_flow_item_tcp_mask;
175                 *size = sizeof(struct rte_flow_item_tcp);
176                 return 1;
177
178         case RTE_FLOW_ITEM_TYPE_SCTP:
179                 *mask = &rte_flow_item_sctp_mask;
180                 *size = sizeof(struct rte_flow_item_sctp);
181                 return 1;
182
183         case RTE_FLOW_ITEM_TYPE_VXLAN:
184                 *mask = &rte_flow_item_vxlan_mask;
185                 *size = sizeof(struct rte_flow_item_vxlan);
186                 return 1;
187
188         case RTE_FLOW_ITEM_TYPE_E_TAG:
189                 *mask = &rte_flow_item_e_tag_mask;
190                 *size = sizeof(struct rte_flow_item_e_tag);
191                 return 1;
192
193         case RTE_FLOW_ITEM_TYPE_NVGRE:
194                 *mask = &rte_flow_item_nvgre_mask;
195                 *size = sizeof(struct rte_flow_item_nvgre);
196                 return 1;
197
198         case RTE_FLOW_ITEM_TYPE_MPLS:
199                 *mask = &rte_flow_item_mpls_mask;
200                 *size = sizeof(struct rte_flow_item_mpls);
201                 return 1;
202
203         case RTE_FLOW_ITEM_TYPE_GRE:
204                 *mask = &rte_flow_item_gre_mask;
205                 *size = sizeof(struct rte_flow_item_gre);
206                 return 1;
207
208         case RTE_FLOW_ITEM_TYPE_GTP:
209         case RTE_FLOW_ITEM_TYPE_GTPC:
210         case RTE_FLOW_ITEM_TYPE_GTPU:
211                 *mask = &rte_flow_item_gtp_mask;
212                 *size = sizeof(struct rte_flow_item_gtp);
213                 return 1;
214
215         case RTE_FLOW_ITEM_TYPE_ESP:
216                 *mask = &rte_flow_item_esp_mask;
217                 *size = sizeof(struct rte_flow_item_esp);
218                 return 1;
219
220         case RTE_FLOW_ITEM_TYPE_GENEVE:
221                 *mask = &rte_flow_item_geneve_mask;
222                 *size = sizeof(struct rte_flow_item_geneve);
223                 return 1;
224
225         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
226                 *mask = &rte_flow_item_vxlan_gpe_mask;
227                 *size = sizeof(struct rte_flow_item_vxlan_gpe);
228                 return 1;
229
230         case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
231                 *mask = &rte_flow_item_arp_eth_ipv4_mask;
232                 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
233                 return 1;
234
235         case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
236                 *mask = &rte_flow_item_ipv6_ext_mask;
237                 *size = sizeof(struct rte_flow_item_ipv6_ext);
238                 return 1;
239
240         case RTE_FLOW_ITEM_TYPE_ICMP6:
241                 *mask = &rte_flow_item_icmp6_mask;
242                 *size = sizeof(struct rte_flow_item_icmp6);
243                 return 1;
244
245         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
246                 *mask = &rte_flow_item_icmp6_nd_ns_mask;
247                 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
248                 return 1;
249
250         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
251                 *mask = &rte_flow_item_icmp6_nd_na_mask;
252                 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
253                 return 1;
254
255         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
256                 *mask = &rte_flow_item_icmp6_nd_opt_mask;
257                 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
258                 return 1;
259
260         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
261                 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
262                 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
263                 return 1;
264
265         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
266                 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
267                 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
268                 return 1;
269
270         default: return 0; /* FALSE */
271         }
272 }
273
274 static int
275 flow_item_proto_preprocess(const struct rte_flow_item *item,
276         union flow_item *item_spec,
277         union flow_item *item_mask,
278         size_t *item_size,
279         int *item_disabled,
280         struct rte_flow_error *error)
281 {
282         const void *mask_default;
283         uint8_t *spec = (uint8_t *)item_spec;
284         uint8_t *mask = (uint8_t *)item_mask;
285         size_t size, i;
286
287         if (!flow_item_is_proto(item->type, &mask_default, &size))
288                 return rte_flow_error_set(error,
289                         ENOTSUP,
290                         RTE_FLOW_ERROR_TYPE_ITEM,
291                         item,
292                         "Item type not supported");
293
294         /* spec */
295         if (!item->spec) {
296                 /* If spec is NULL, then last and mask also have to be NULL. */
297                 if (item->last || item->mask)
298                         return rte_flow_error_set(error,
299                                 EINVAL,
300                                 RTE_FLOW_ERROR_TYPE_ITEM,
301                                 item,
302                                 "Invalid item (NULL spec with non-NULL last or mask)");
303
304                 memset(item_spec, 0, size);
305                 memset(item_mask, 0, size);
306                 *item_size = size;
307                 *item_disabled = 1; /* TRUE */
308                 return 0;
309         }
310
311         memcpy(spec, item->spec, size);
312         *item_size = size;
313
314         /* mask */
315         if (item->mask)
316                 memcpy(mask, item->mask, size);
317         else
318                 memcpy(mask, mask_default, size);
319
320         /* disabled */
321         for (i = 0; i < size; i++)
322                 if (mask[i])
323                         break;
324         *item_disabled = (i == size) ? 1 : 0;
325
326         /* Apply mask over spec. */
327         for (i = 0; i < size; i++)
328                 spec[i] &= mask[i];
329
330         /* last */
331         if (item->last) {
332                 uint8_t last[size];
333
334                 /* init last */
335                 memcpy(last, item->last, size);
336                 for (i = 0; i < size; i++)
337                         last[i] &= mask[i];
338
339                 /* check for range */
340                 for (i = 0; i < size; i++)
341                         if (last[i] != spec[i])
342                                 return rte_flow_error_set(error,
343                                         ENOTSUP,
344                                         RTE_FLOW_ERROR_TYPE_ITEM,
345                                         item,
346                                         "Range not supported");
347         }
348
349         return 0;
350 }
351
352 /***
353  * Skip disabled protocol items and VOID items
354  * until any of the mutually exclusive conditions
355  * from the list below takes place:
356  *    (A) A protocol present in the proto_mask
357  *        is met (either ENABLED or DISABLED);
358  *    (B) A protocol NOT present in the proto_mask is met in ENABLED state;
359  *    (C) The END item is met.
360  */
361 static int
362 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
363         uint64_t proto_mask,
364         size_t *length,
365         struct rte_flow_error *error)
366 {
367         size_t len = 0;
368
369         for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
370                 union flow_item spec, mask;
371                 size_t size;
372                 int disabled = 0, status;
373
374                 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
375                         continue;
376
377                 status = flow_item_proto_preprocess(*item,
378                                 &spec,
379                                 &mask,
380                                 &size,
381                                 &disabled,
382                                 error);
383                 if (status)
384                         return status;
385
386                 if ((proto_mask & (1LLU << (*item)->type)) ||
387                                 !disabled)
388                         break;
389
390                 len += size;
391         }
392
393         if (length)
394                 *length = len;
395
396         return 0;
397 }
398
399 #define FLOW_ITEM_PROTO_IP \
400         ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
401          (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
402
403 static void
404 flow_item_skip_void(const struct rte_flow_item **item)
405 {
406         for ( ; ; (*item)++)
407                 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
408                         return;
409 }
410
411 #define IP_PROTOCOL_TCP 0x06
412 #define IP_PROTOCOL_UDP 0x11
413 #define IP_PROTOCOL_SCTP 0x84
414
415 static int
416 mask_to_depth(uint64_t mask,
417                 uint32_t *depth)
418 {
419         uint64_t n;
420
421         if (mask == UINT64_MAX) {
422                 if (depth)
423                         *depth = 64;
424
425                 return 0;
426         }
427
428         mask = ~mask;
429
430         if (mask & (mask + 1))
431                 return -1;
432
433         n = __builtin_popcountll(mask);
434         if (depth)
435                 *depth = (uint32_t)(64 - n);
436
437         return 0;
438 }
439
440 static int
441 ipv4_mask_to_depth(uint32_t mask,
442                 uint32_t *depth)
443 {
444         uint32_t d;
445         int status;
446
447         status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
448         if (status)
449                 return status;
450
451         d -= 32;
452         if (depth)
453                 *depth = d;
454
455         return 0;
456 }
457
458 static int
459 ipv6_mask_to_depth(uint8_t *mask,
460         uint32_t *depth)
461 {
462         uint64_t *m = (uint64_t *)mask;
463         uint64_t m0 = rte_be_to_cpu_64(m[0]);
464         uint64_t m1 = rte_be_to_cpu_64(m[1]);
465         uint32_t d0, d1;
466         int status;
467
468         status = mask_to_depth(m0, &d0);
469         if (status)
470                 return status;
471
472         status = mask_to_depth(m1, &d1);
473         if (status)
474                 return status;
475
476         if (d0 < 64 && d1)
477                 return -1;
478
479         if (depth)
480                 *depth = d0 + d1;
481
482         return 0;
483 }
484
485 static int
486 port_mask_to_range(uint16_t port,
487         uint16_t port_mask,
488         uint16_t *port0,
489         uint16_t *port1)
490 {
491         int status;
492         uint16_t p0, p1;
493
494         status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
495         if (status)
496                 return -1;
497
498         p0 = port & port_mask;
499         p1 = p0 | ~port_mask;
500
501         if (port0)
502                 *port0 = p0;
503
504         if (port1)
505                 *port1 = p1;
506
507         return 0;
508 }
509
510 static int
511 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
512                 struct pipeline *pipeline __rte_unused,
513                 struct softnic_table *table __rte_unused,
514                 const struct rte_flow_attr *attr,
515                 const struct rte_flow_item *item,
516                 struct softnic_table_rule_match *rule_match,
517                 struct rte_flow_error *error)
518 {
519         union flow_item spec, mask;
520         size_t size, length = 0;
521         int disabled = 0, status;
522         uint8_t ip_proto, ip_proto_mask;
523
524         memset(rule_match, 0, sizeof(*rule_match));
525         rule_match->match_type = TABLE_ACL;
526         rule_match->match.acl.priority = attr->priority;
527
528         /* VOID or disabled protos only, if any. */
529         status = flow_item_skip_disabled_protos(&item,
530                         FLOW_ITEM_PROTO_IP, &length, error);
531         if (status)
532                 return status;
533
534         /* IP only. */
535         status = flow_item_proto_preprocess(item, &spec, &mask,
536                         &size, &disabled, error);
537         if (status)
538                 return status;
539
540         switch (item->type) {
541         case RTE_FLOW_ITEM_TYPE_IPV4:
542         {
543                 uint32_t sa_depth, da_depth;
544
545                 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
546                                 &sa_depth);
547                 if (status)
548                         return rte_flow_error_set(error,
549                                 EINVAL,
550                                 RTE_FLOW_ERROR_TYPE_ITEM,
551                                 item,
552                                 "ACL: Illegal IPv4 header source address mask");
553
554                 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
555                                 &da_depth);
556                 if (status)
557                         return rte_flow_error_set(error,
558                                 EINVAL,
559                                 RTE_FLOW_ERROR_TYPE_ITEM,
560                                 item,
561                                 "ACL: Illegal IPv4 header destination address mask");
562
563                 ip_proto = spec.ipv4.hdr.next_proto_id;
564                 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
565
566                 rule_match->match.acl.ip_version = 1;
567                 rule_match->match.acl.ipv4.sa =
568                         rte_ntohl(spec.ipv4.hdr.src_addr);
569                 rule_match->match.acl.ipv4.da =
570                         rte_ntohl(spec.ipv4.hdr.dst_addr);
571                 rule_match->match.acl.sa_depth = sa_depth;
572                 rule_match->match.acl.da_depth = da_depth;
573                 rule_match->match.acl.proto = ip_proto;
574                 rule_match->match.acl.proto_mask = ip_proto_mask;
575                 break;
576         } /* RTE_FLOW_ITEM_TYPE_IPV4 */
577
578         case RTE_FLOW_ITEM_TYPE_IPV6:
579         {
580                 uint32_t sa_depth, da_depth;
581
582                 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
583                 if (status)
584                         return rte_flow_error_set(error,
585                                 EINVAL,
586                                 RTE_FLOW_ERROR_TYPE_ITEM,
587                                 item,
588                                 "ACL: Illegal IPv6 header source address mask");
589
590                 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
591                 if (status)
592                         return rte_flow_error_set(error,
593                                 EINVAL,
594                                 RTE_FLOW_ERROR_TYPE_ITEM,
595                                 item,
596                                 "ACL: Illegal IPv6 header destination address mask");
597
598                 ip_proto = spec.ipv6.hdr.proto;
599                 ip_proto_mask = mask.ipv6.hdr.proto;
600
601                 rule_match->match.acl.ip_version = 0;
602                 memcpy(rule_match->match.acl.ipv6.sa,
603                         spec.ipv6.hdr.src_addr,
604                         sizeof(spec.ipv6.hdr.src_addr));
605                 memcpy(rule_match->match.acl.ipv6.da,
606                         spec.ipv6.hdr.dst_addr,
607                         sizeof(spec.ipv6.hdr.dst_addr));
608                 rule_match->match.acl.sa_depth = sa_depth;
609                 rule_match->match.acl.da_depth = da_depth;
610                 rule_match->match.acl.proto = ip_proto;
611                 rule_match->match.acl.proto_mask = ip_proto_mask;
612                 break;
613         } /* RTE_FLOW_ITEM_TYPE_IPV6 */
614
615         default:
616                 return rte_flow_error_set(error,
617                         ENOTSUP,
618                         RTE_FLOW_ERROR_TYPE_ITEM,
619                         item,
620                         "ACL: IP protocol required");
621         } /* switch */
622
623         if (ip_proto_mask != UINT8_MAX)
624                 return rte_flow_error_set(error,
625                         EINVAL,
626                         RTE_FLOW_ERROR_TYPE_ITEM,
627                         item,
628                         "ACL: Illegal IP protocol mask");
629
630         item++;
631
632         /* VOID only, if any. */
633         flow_item_skip_void(&item);
634
635         /* TCP/UDP/SCTP only. */
636         status = flow_item_proto_preprocess(item, &spec, &mask,
637                         &size, &disabled, error);
638         if (status)
639                 return status;
640
641         switch (item->type) {
642         case RTE_FLOW_ITEM_TYPE_TCP:
643         {
644                 uint16_t sp0, sp1, dp0, dp1;
645
646                 if (ip_proto != IP_PROTOCOL_TCP)
647                         return rte_flow_error_set(error,
648                                 EINVAL,
649                                 RTE_FLOW_ERROR_TYPE_ITEM,
650                                 item,
651                                 "ACL: Item type is TCP, but IP protocol is not");
652
653                 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
654                                 rte_ntohs(mask.tcp.hdr.src_port),
655                                 &sp0,
656                                 &sp1);
657
658                 if (status)
659                         return rte_flow_error_set(error,
660                                 EINVAL,
661                                 RTE_FLOW_ERROR_TYPE_ITEM,
662                                 item,
663                                 "ACL: Illegal TCP source port mask");
664
665                 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
666                                 rte_ntohs(mask.tcp.hdr.dst_port),
667                                 &dp0,
668                                 &dp1);
669
670                 if (status)
671                         return rte_flow_error_set(error,
672                                 EINVAL,
673                                 RTE_FLOW_ERROR_TYPE_ITEM,
674                                 item,
675                                 "ACL: Illegal TCP destination port mask");
676
677                 rule_match->match.acl.sp0 = sp0;
678                 rule_match->match.acl.sp1 = sp1;
679                 rule_match->match.acl.dp0 = dp0;
680                 rule_match->match.acl.dp1 = dp1;
681
682                 break;
683         } /* RTE_FLOW_ITEM_TYPE_TCP */
684
685         case RTE_FLOW_ITEM_TYPE_UDP:
686         {
687                 uint16_t sp0, sp1, dp0, dp1;
688
689                 if (ip_proto != IP_PROTOCOL_UDP)
690                         return rte_flow_error_set(error,
691                                 EINVAL,
692                                 RTE_FLOW_ERROR_TYPE_ITEM,
693                                 item,
694                                 "ACL: Item type is UDP, but IP protocol is not");
695
696                 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
697                         rte_ntohs(mask.udp.hdr.src_port),
698                         &sp0,
699                         &sp1);
700                 if (status)
701                         return rte_flow_error_set(error,
702                                 EINVAL,
703                                 RTE_FLOW_ERROR_TYPE_ITEM,
704                                 item,
705                                 "ACL: Illegal UDP source port mask");
706
707                 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
708                         rte_ntohs(mask.udp.hdr.dst_port),
709                         &dp0,
710                         &dp1);
711                 if (status)
712                         return rte_flow_error_set(error,
713                                 EINVAL,
714                                 RTE_FLOW_ERROR_TYPE_ITEM,
715                                 item,
716                                 "ACL: Illegal UDP destination port mask");
717
718                 rule_match->match.acl.sp0 = sp0;
719                 rule_match->match.acl.sp1 = sp1;
720                 rule_match->match.acl.dp0 = dp0;
721                 rule_match->match.acl.dp1 = dp1;
722
723                 break;
724         } /* RTE_FLOW_ITEM_TYPE_UDP */
725
726         case RTE_FLOW_ITEM_TYPE_SCTP:
727         {
728                 uint16_t sp0, sp1, dp0, dp1;
729
730                 if (ip_proto != IP_PROTOCOL_SCTP)
731                         return rte_flow_error_set(error,
732                                 EINVAL,
733                                 RTE_FLOW_ERROR_TYPE_ITEM,
734                                 item,
735                                 "ACL: Item type is SCTP, but IP protocol is not");
736
737                 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
738                         rte_ntohs(mask.sctp.hdr.src_port),
739                         &sp0,
740                         &sp1);
741
742                 if (status)
743                         return rte_flow_error_set(error,
744                                 EINVAL,
745                                 RTE_FLOW_ERROR_TYPE_ITEM,
746                                 item,
747                                 "ACL: Illegal SCTP source port mask");
748
749                 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
750                         rte_ntohs(mask.sctp.hdr.dst_port),
751                         &dp0,
752                         &dp1);
753                 if (status)
754                         return rte_flow_error_set(error,
755                                 EINVAL,
756                                 RTE_FLOW_ERROR_TYPE_ITEM,
757                                 item,
758                                 "ACL: Illegal SCTP destination port mask");
759
760                 rule_match->match.acl.sp0 = sp0;
761                 rule_match->match.acl.sp1 = sp1;
762                 rule_match->match.acl.dp0 = dp0;
763                 rule_match->match.acl.dp1 = dp1;
764
765                 break;
766         } /* RTE_FLOW_ITEM_TYPE_SCTP */
767
768         default:
769                 return rte_flow_error_set(error,
770                         ENOTSUP,
771                         RTE_FLOW_ERROR_TYPE_ITEM,
772                         item,
773                         "ACL: TCP/UDP/SCTP required");
774         } /* switch */
775
776         item++;
777
778         /* VOID or disabled protos only, if any. */
779         status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
780         if (status)
781                 return status;
782
783         /* END only. */
784         if (item->type != RTE_FLOW_ITEM_TYPE_END)
785                 return rte_flow_error_set(error,
786                         EINVAL,
787                         RTE_FLOW_ERROR_TYPE_ITEM,
788                         item,
789                         "ACL: Expecting END item");
790
791         return 0;
792 }
793
794 /***
795  * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
796  * respectively.
797  * They are located within a larger buffer at offsets *toffset* and *foffset*
798  * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
799  * buffer.
800  * Question: are the two masks equivalent?
801  *
802  * Notes:
803  * 1. Offset basically indicates that the first offset bytes in the buffer
804  *    are "don't care", so offset is equivalent to pre-pending an "all-zeros"
805  *    array of *offset* bytes to the *mask*.
806  * 2. Each *mask* might contain a number of zero bytes at the beginning or
807  *    at the end.
808  * 3. Bytes in the larger buffer after the end of the *mask* are also considered
809  *    "don't care", so they are equivalent to appending an "all-zeros" array of
810  *    bytes to the *mask*.
811  *
812  * Example:
813  * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
814  * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
815  *    => buffer mask = [00 00 00 22 00 33 00 00]
816  * fmask = [22 00 33], foffset = 3, fsize = 3 =>
817  *    => buffer mask = [00 00 00 22 00 33 00 00]
818  * Therefore, the tmask and fmask from this example are equivalent.
819  */
820 static int
821 hash_key_mask_is_same(uint8_t *tmask,
822         size_t toffset,
823         size_t tsize,
824         uint8_t *fmask,
825         size_t foffset,
826         size_t fsize,
827         size_t *toffset_plus,
828         size_t *foffset_plus)
829 {
830         size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
831         size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
832
833         /* Compute tpos and fpos. */
834         for (tpos = 0; tmask[tpos] == 0; tpos++)
835                 ;
836         for (fpos = 0; fmask[fpos] == 0; fpos++)
837                 ;
838
839         if (toffset + tpos != foffset + fpos)
840                 return 0; /* FALSE */
841
842         tsize -= tpos;
843         fsize -= fpos;
844
845         if (tsize < fsize) {
846                 size_t i;
847
848                 for (i = 0; i < tsize; i++)
849                         if (tmask[tpos + i] != fmask[fpos + i])
850                                 return 0; /* FALSE */
851
852                 for ( ; i < fsize; i++)
853                         if (fmask[fpos + i])
854                                 return 0; /* FALSE */
855         } else {
856                 size_t i;
857
858                 for (i = 0; i < fsize; i++)
859                         if (tmask[tpos + i] != fmask[fpos + i])
860                                 return 0; /* FALSE */
861
862                 for ( ; i < tsize; i++)
863                         if (tmask[tpos + i])
864                                 return 0; /* FALSE */
865         }
866
867         if (toffset_plus)
868                 *toffset_plus = tpos;
869
870         if (foffset_plus)
871                 *foffset_plus = fpos;
872
873         return 1; /* TRUE */
874 }
875
876 static int
877 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
878         struct pipeline *pipeline __rte_unused,
879         struct softnic_table *table,
880         const struct rte_flow_attr *attr __rte_unused,
881         const struct rte_flow_item *item,
882         struct softnic_table_rule_match *rule_match,
883         struct rte_flow_error *error)
884 {
885         struct softnic_table_rule_match_hash key, key_mask;
886         struct softnic_table_hash_params *params = &table->params.match.hash;
887         size_t offset = 0, length = 0, tpos, fpos;
888         int status;
889
890         memset(&key, 0, sizeof(key));
891         memset(&key_mask, 0, sizeof(key_mask));
892
893         /* VOID or disabled protos only, if any. */
894         status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
895         if (status)
896                 return status;
897
898         if (item->type == RTE_FLOW_ITEM_TYPE_END)
899                 return rte_flow_error_set(error,
900                         EINVAL,
901                         RTE_FLOW_ERROR_TYPE_ITEM,
902                         item,
903                         "HASH: END detected too early");
904
905         /* VOID or any protocols (enabled or disabled). */
906         for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
907                 union flow_item spec, mask;
908                 size_t size;
909                 int disabled, status;
910
911                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
912                         continue;
913
914                 status = flow_item_proto_preprocess(item,
915                         &spec,
916                         &mask,
917                         &size,
918                         &disabled,
919                         error);
920                 if (status)
921                         return status;
922
923                 if (length + size > sizeof(key)) {
924                         if (disabled)
925                                 break;
926
927                         return rte_flow_error_set(error,
928                                 ENOTSUP,
929                                 RTE_FLOW_ERROR_TYPE_ITEM,
930                                 item,
931                                 "HASH: Item too big");
932                 }
933
934                 memcpy(&key.key[length], &spec, size);
935                 memcpy(&key_mask.key[length], &mask, size);
936                 length += size;
937         }
938
939         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
940                 /* VOID or disabled protos only, if any. */
941                 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
942                 if (status)
943                         return status;
944
945                 /* END only. */
946                 if (item->type != RTE_FLOW_ITEM_TYPE_END)
947                         return rte_flow_error_set(error,
948                                 EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM,
950                                 item,
951                                 "HASH: Expecting END item");
952         }
953
954         /* Compare flow key mask against table key mask. */
955         offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
956
957         if (!hash_key_mask_is_same(params->key_mask,
958                 params->key_offset,
959                 params->key_size,
960                 key_mask.key,
961                 offset,
962                 length,
963                 &tpos,
964                 &fpos))
965                 return rte_flow_error_set(error,
966                         EINVAL,
967                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
968                         NULL,
969                         "HASH: Item list is not observing the match format");
970
971         /* Rule match. */
972         memset(rule_match, 0, sizeof(*rule_match));
973         rule_match->match_type = TABLE_HASH;
974         memcpy(&rule_match->match.hash.key[tpos],
975                 &key.key[fpos],
976                 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
977                         length - fpos));
978
979         return 0;
980 }
981
982 static int
983 flow_rule_match_get(struct pmd_internals *softnic,
984                 struct pipeline *pipeline,
985                 struct softnic_table *table,
986                 const struct rte_flow_attr *attr,
987                 const struct rte_flow_item *item,
988                 struct softnic_table_rule_match *rule_match,
989                 struct rte_flow_error *error)
990 {
991         switch (table->params.match_type) {
992         case TABLE_ACL:
993                 return flow_rule_match_acl_get(softnic,
994                         pipeline,
995                         table,
996                         attr,
997                         item,
998                         rule_match,
999                         error);
1000
1001                 /* FALLTHROUGH */
1002
1003         case TABLE_HASH:
1004                 return flow_rule_match_hash_get(softnic,
1005                         pipeline,
1006                         table,
1007                         attr,
1008                         item,
1009                         rule_match,
1010                         error);
1011
1012         default:
1013                 return rte_flow_error_set(error,
1014                         ENOTSUP,
1015                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1016                         NULL,
1017                         "Unsupported pipeline table match type");
1018         }
1019 }
1020
1021 static int
1022 pmd_flow_validate(struct rte_eth_dev *dev,
1023                 const struct rte_flow_attr *attr,
1024                 const struct rte_flow_item item[],
1025                 const struct rte_flow_action action[],
1026                 struct rte_flow_error *error)
1027 {
1028         struct softnic_table_rule_match rule_match;
1029
1030         struct pmd_internals *softnic = dev->data->dev_private;
1031         struct pipeline *pipeline;
1032         struct softnic_table *table;
1033         const char *pipeline_name = NULL;
1034         uint32_t table_id = 0;
1035         int status;
1036
1037         /* Check input parameters. */
1038         if (attr == NULL)
1039                 return rte_flow_error_set(error,
1040                                 EINVAL,
1041                                 RTE_FLOW_ERROR_TYPE_ATTR,
1042                                 NULL, "Null attr");
1043
1044         if (item == NULL)
1045                 return rte_flow_error_set(error,
1046                                 EINVAL,
1047                                 RTE_FLOW_ERROR_TYPE_ITEM,
1048                                 NULL,
1049                                 "Null item");
1050
1051         if (action == NULL)
1052                 return rte_flow_error_set(error,
1053                                 EINVAL,
1054                                 RTE_FLOW_ERROR_TYPE_ACTION,
1055                                 NULL,
1056                                 "Null action");
1057
1058         /* Identify the pipeline table to add this flow to. */
1059         status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1060                                         &table_id, error);
1061         if (status)
1062                 return status;
1063
1064         pipeline = softnic_pipeline_find(softnic, pipeline_name);
1065         if (pipeline == NULL)
1066                 return rte_flow_error_set(error,
1067                                 EINVAL,
1068                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1069                                 NULL,
1070                                 "Invalid pipeline name");
1071
1072         if (table_id >= pipeline->n_tables)
1073                 return rte_flow_error_set(error,
1074                                 EINVAL,
1075                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1076                                 NULL,
1077                                 "Invalid pipeline table ID");
1078
1079         table = &pipeline->table[table_id];
1080
1081         /* Rule match. */
1082         memset(&rule_match, 0, sizeof(rule_match));
1083         status = flow_rule_match_get(softnic,
1084                         pipeline,
1085                         table,
1086                         attr,
1087                         item,
1088                         &rule_match,
1089                         error);
1090         if (status)
1091                 return status;
1092
1093         return 0;
1094 }
1095
1096 const struct rte_flow_ops pmd_flow_ops = {
1097         .validate = pmd_flow_validate,
1098 };