net/softnic: parse raw flow item
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <stdint.h>
5 #include <stdlib.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
11 #include <rte_string_fns.h>
12 #include <rte_flow.h>
13 #include <rte_flow_driver.h>
14
15 #include "rte_eth_softnic_internals.h"
16 #include "rte_eth_softnic.h"
17
18 #define rte_htons rte_cpu_to_be_16
19 #define rte_htonl rte_cpu_to_be_32
20
21 #define rte_ntohs rte_be_to_cpu_16
22 #define rte_ntohl rte_be_to_cpu_32
23
24 static struct rte_flow *
25 softnic_flow_find(struct softnic_table *table,
26         struct softnic_table_rule_match *rule_match)
27 {
28         struct rte_flow *flow;
29
30         TAILQ_FOREACH(flow, &table->flows, node)
31                 if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
32                         return flow;
33
34         return NULL;
35 }
36
37 int
38 flow_attr_map_set(struct pmd_internals *softnic,
39                 uint32_t group_id,
40                 int ingress,
41                 const char *pipeline_name,
42                 uint32_t table_id)
43 {
44         struct pipeline *pipeline;
45         struct flow_attr_map *map;
46
47         if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
48                         pipeline_name == NULL)
49                 return -1;
50
51         pipeline = softnic_pipeline_find(softnic, pipeline_name);
52         if (pipeline == NULL ||
53                         table_id >= pipeline->n_tables)
54                 return -1;
55
56         map = (ingress) ? &softnic->flow.ingress_map[group_id] :
57                 &softnic->flow.egress_map[group_id];
58         strcpy(map->pipeline_name, pipeline_name);
59         map->table_id = table_id;
60         map->valid = 1;
61
62         return 0;
63 }
64
65 struct flow_attr_map *
66 flow_attr_map_get(struct pmd_internals *softnic,
67                 uint32_t group_id,
68                 int ingress)
69 {
70         if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
71                 return NULL;
72
73         return (ingress) ? &softnic->flow.ingress_map[group_id] :
74                 &softnic->flow.egress_map[group_id];
75 }
76
77 static int
78 flow_pipeline_table_get(struct pmd_internals *softnic,
79                 const struct rte_flow_attr *attr,
80                 const char **pipeline_name,
81                 uint32_t *table_id,
82                 struct rte_flow_error *error)
83 {
84         struct flow_attr_map *map;
85
86         if (attr == NULL)
87                 return rte_flow_error_set(error,
88                                 EINVAL,
89                                 RTE_FLOW_ERROR_TYPE_ATTR,
90                                 NULL,
91                                 "Null attr");
92
93         if (!attr->ingress && !attr->egress)
94                 return rte_flow_error_set(error,
95                                 EINVAL,
96                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
97                                 attr,
98                                 "Ingress/egress not specified");
99
100         if (attr->ingress && attr->egress)
101                 return rte_flow_error_set(error,
102                                 EINVAL,
103                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
104                                 attr,
105                                 "Setting both ingress and egress is not allowed");
106
107         map = flow_attr_map_get(softnic,
108                         attr->group,
109                         attr->ingress);
110         if (map == NULL ||
111                         map->valid == 0)
112                 return rte_flow_error_set(error,
113                                 EINVAL,
114                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
115                                 attr,
116                                 "Invalid group ID");
117
118         if (pipeline_name)
119                 *pipeline_name = map->pipeline_name;
120
121         if (table_id)
122                 *table_id = map->table_id;
123
124         return 0;
125 }
126
127 union flow_item {
128         uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
129         struct rte_flow_item_eth eth;
130         struct rte_flow_item_vlan vlan;
131         struct rte_flow_item_ipv4 ipv4;
132         struct rte_flow_item_ipv6 ipv6;
133         struct rte_flow_item_icmp icmp;
134         struct rte_flow_item_udp udp;
135         struct rte_flow_item_tcp tcp;
136         struct rte_flow_item_sctp sctp;
137         struct rte_flow_item_vxlan vxlan;
138         struct rte_flow_item_e_tag e_tag;
139         struct rte_flow_item_nvgre nvgre;
140         struct rte_flow_item_mpls mpls;
141         struct rte_flow_item_gre gre;
142         struct rte_flow_item_gtp gtp;
143         struct rte_flow_item_esp esp;
144         struct rte_flow_item_geneve geneve;
145         struct rte_flow_item_vxlan_gpe vxlan_gpe;
146         struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
147         struct rte_flow_item_ipv6_ext ipv6_ext;
148         struct rte_flow_item_icmp6 icmp6;
149         struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
150         struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
151         struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
152         struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
153         struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
154 };
155
156 static const union flow_item flow_item_raw_mask;
157
158 static int
159 flow_item_is_proto(enum rte_flow_item_type type,
160         const void **mask,
161         size_t *size)
162 {
163         switch (type) {
164         case RTE_FLOW_ITEM_TYPE_RAW:
165                 *mask = &flow_item_raw_mask;
166                 *size = sizeof(flow_item_raw_mask);
167                 return 1; /* TRUE */
168
169         case RTE_FLOW_ITEM_TYPE_ETH:
170                 *mask = &rte_flow_item_eth_mask;
171                 *size = sizeof(struct rte_flow_item_eth);
172                 return 1; /* TRUE */
173
174         case RTE_FLOW_ITEM_TYPE_VLAN:
175                 *mask = &rte_flow_item_vlan_mask;
176                 *size = sizeof(struct rte_flow_item_vlan);
177                 return 1;
178
179         case RTE_FLOW_ITEM_TYPE_IPV4:
180                 *mask = &rte_flow_item_ipv4_mask;
181                 *size = sizeof(struct rte_flow_item_ipv4);
182                 return 1;
183
184         case RTE_FLOW_ITEM_TYPE_IPV6:
185                 *mask = &rte_flow_item_ipv6_mask;
186                 *size = sizeof(struct rte_flow_item_ipv6);
187                 return 1;
188
189         case RTE_FLOW_ITEM_TYPE_ICMP:
190                 *mask = &rte_flow_item_icmp_mask;
191                 *size = sizeof(struct rte_flow_item_icmp);
192                 return 1;
193
194         case RTE_FLOW_ITEM_TYPE_UDP:
195                 *mask = &rte_flow_item_udp_mask;
196                 *size = sizeof(struct rte_flow_item_udp);
197                 return 1;
198
199         case RTE_FLOW_ITEM_TYPE_TCP:
200                 *mask = &rte_flow_item_tcp_mask;
201                 *size = sizeof(struct rte_flow_item_tcp);
202                 return 1;
203
204         case RTE_FLOW_ITEM_TYPE_SCTP:
205                 *mask = &rte_flow_item_sctp_mask;
206                 *size = sizeof(struct rte_flow_item_sctp);
207                 return 1;
208
209         case RTE_FLOW_ITEM_TYPE_VXLAN:
210                 *mask = &rte_flow_item_vxlan_mask;
211                 *size = sizeof(struct rte_flow_item_vxlan);
212                 return 1;
213
214         case RTE_FLOW_ITEM_TYPE_E_TAG:
215                 *mask = &rte_flow_item_e_tag_mask;
216                 *size = sizeof(struct rte_flow_item_e_tag);
217                 return 1;
218
219         case RTE_FLOW_ITEM_TYPE_NVGRE:
220                 *mask = &rte_flow_item_nvgre_mask;
221                 *size = sizeof(struct rte_flow_item_nvgre);
222                 return 1;
223
224         case RTE_FLOW_ITEM_TYPE_MPLS:
225                 *mask = &rte_flow_item_mpls_mask;
226                 *size = sizeof(struct rte_flow_item_mpls);
227                 return 1;
228
229         case RTE_FLOW_ITEM_TYPE_GRE:
230                 *mask = &rte_flow_item_gre_mask;
231                 *size = sizeof(struct rte_flow_item_gre);
232                 return 1;
233
234         case RTE_FLOW_ITEM_TYPE_GTP:
235         case RTE_FLOW_ITEM_TYPE_GTPC:
236         case RTE_FLOW_ITEM_TYPE_GTPU:
237                 *mask = &rte_flow_item_gtp_mask;
238                 *size = sizeof(struct rte_flow_item_gtp);
239                 return 1;
240
241         case RTE_FLOW_ITEM_TYPE_ESP:
242                 *mask = &rte_flow_item_esp_mask;
243                 *size = sizeof(struct rte_flow_item_esp);
244                 return 1;
245
246         case RTE_FLOW_ITEM_TYPE_GENEVE:
247                 *mask = &rte_flow_item_geneve_mask;
248                 *size = sizeof(struct rte_flow_item_geneve);
249                 return 1;
250
251         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
252                 *mask = &rte_flow_item_vxlan_gpe_mask;
253                 *size = sizeof(struct rte_flow_item_vxlan_gpe);
254                 return 1;
255
256         case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
257                 *mask = &rte_flow_item_arp_eth_ipv4_mask;
258                 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
259                 return 1;
260
261         case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
262                 *mask = &rte_flow_item_ipv6_ext_mask;
263                 *size = sizeof(struct rte_flow_item_ipv6_ext);
264                 return 1;
265
266         case RTE_FLOW_ITEM_TYPE_ICMP6:
267                 *mask = &rte_flow_item_icmp6_mask;
268                 *size = sizeof(struct rte_flow_item_icmp6);
269                 return 1;
270
271         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
272                 *mask = &rte_flow_item_icmp6_nd_ns_mask;
273                 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
274                 return 1;
275
276         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
277                 *mask = &rte_flow_item_icmp6_nd_na_mask;
278                 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
279                 return 1;
280
281         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
282                 *mask = &rte_flow_item_icmp6_nd_opt_mask;
283                 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
284                 return 1;
285
286         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
287                 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
288                 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
289                 return 1;
290
291         case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
292                 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
293                 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
294                 return 1;
295
296         default: return 0; /* FALSE */
297         }
298 }
299
300 static int
301 flow_item_raw_preprocess(const struct rte_flow_item *item,
302         union flow_item *item_spec,
303         union flow_item *item_mask,
304         size_t *item_size,
305         int *item_disabled,
306         struct rte_flow_error *error)
307 {
308         const struct rte_flow_item_raw *item_raw_spec = item->spec;
309         const struct rte_flow_item_raw *item_raw_mask = item->mask;
310         const uint8_t *pattern;
311         const uint8_t *pattern_mask;
312         uint8_t *spec = (uint8_t *)item_spec;
313         uint8_t *mask = (uint8_t *)item_mask;
314         size_t pattern_length, pattern_offset, i;
315         int disabled;
316
317         if (!item->spec)
318                 return rte_flow_error_set(error,
319                         ENOTSUP,
320                         RTE_FLOW_ERROR_TYPE_ITEM,
321                         item,
322                         "RAW: Null specification");
323
324         if (item->last)
325                 return rte_flow_error_set(error,
326                         ENOTSUP,
327                         RTE_FLOW_ERROR_TYPE_ITEM,
328                         item,
329                         "RAW: Range not allowed (last must be NULL)");
330
331         if (item_raw_spec->relative == 0)
332                 return rte_flow_error_set(error,
333                         ENOTSUP,
334                         RTE_FLOW_ERROR_TYPE_ITEM,
335                         item,
336                         "RAW: Absolute offset not supported");
337
338         if (item_raw_spec->search)
339                 return rte_flow_error_set(error,
340                         ENOTSUP,
341                         RTE_FLOW_ERROR_TYPE_ITEM,
342                         item,
343                         "RAW: Search not supported");
344
345         if (item_raw_spec->offset < 0)
346                 return rte_flow_error_set(error,
347                         ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
348                         item,
349                         "RAW: Negative offset not supported");
350
351         if (item_raw_spec->length == 0)
352                 return rte_flow_error_set(error,
353                         ENOTSUP,
354                         RTE_FLOW_ERROR_TYPE_ITEM,
355                         item,
356                         "RAW: Zero pattern length");
357
358         if (item_raw_spec->offset + item_raw_spec->length >
359                 TABLE_RULE_MATCH_SIZE_MAX)
360                 return rte_flow_error_set(error,
361                         ENOTSUP,
362                         RTE_FLOW_ERROR_TYPE_ITEM,
363                         item,
364                         "RAW: Item too big");
365
366         if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern)
367                 return rte_flow_error_set(error,
368                         ENOTSUP,
369                         RTE_FLOW_ERROR_TYPE_ITEM,
370                         item,
371                         "RAW: Non-NULL pattern mask not allowed with NULL pattern");
372
373         pattern = item_raw_spec->pattern;
374         pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL;
375         pattern_length = (size_t)item_raw_spec->length;
376         pattern_offset = (size_t)item_raw_spec->offset;
377
378         disabled = 0;
379         if (pattern_mask == NULL)
380                 disabled = 1;
381         else
382                 for (i = 0; i < pattern_length; i++)
383                         if ((pattern)[i])
384                                 disabled = 1;
385
386         memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX);
387         if (pattern)
388                 memcpy(&spec[pattern_offset], pattern, pattern_length);
389
390         memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX);
391         if (pattern_mask)
392                 memcpy(&mask[pattern_offset], pattern_mask, pattern_length);
393
394         *item_size = pattern_offset + pattern_length;
395         *item_disabled = disabled;
396
397         return 0;
398 }
399
400 static int
401 flow_item_proto_preprocess(const struct rte_flow_item *item,
402         union flow_item *item_spec,
403         union flow_item *item_mask,
404         size_t *item_size,
405         int *item_disabled,
406         struct rte_flow_error *error)
407 {
408         const void *mask_default;
409         uint8_t *spec = (uint8_t *)item_spec;
410         uint8_t *mask = (uint8_t *)item_mask;
411         size_t size, i;
412
413         if (!flow_item_is_proto(item->type, &mask_default, &size))
414                 return rte_flow_error_set(error,
415                         ENOTSUP,
416                         RTE_FLOW_ERROR_TYPE_ITEM,
417                         item,
418                         "Item type not supported");
419
420         if (item->type == RTE_FLOW_ITEM_TYPE_RAW)
421                 return flow_item_raw_preprocess(item,
422                         item_spec,
423                         item_mask,
424                         item_size,
425                         item_disabled,
426                         error);
427
428         /* spec */
429         if (!item->spec) {
430                 /* If spec is NULL, then last and mask also have to be NULL. */
431                 if (item->last || item->mask)
432                         return rte_flow_error_set(error,
433                                 EINVAL,
434                                 RTE_FLOW_ERROR_TYPE_ITEM,
435                                 item,
436                                 "Invalid item (NULL spec with non-NULL last or mask)");
437
438                 memset(item_spec, 0, size);
439                 memset(item_mask, 0, size);
440                 *item_size = size;
441                 *item_disabled = 1; /* TRUE */
442                 return 0;
443         }
444
445         memcpy(spec, item->spec, size);
446         *item_size = size;
447
448         /* mask */
449         if (item->mask)
450                 memcpy(mask, item->mask, size);
451         else
452                 memcpy(mask, mask_default, size);
453
454         /* disabled */
455         for (i = 0; i < size; i++)
456                 if (mask[i])
457                         break;
458         *item_disabled = (i == size) ? 1 : 0;
459
460         /* Apply mask over spec. */
461         for (i = 0; i < size; i++)
462                 spec[i] &= mask[i];
463
464         /* last */
465         if (item->last) {
466                 uint8_t last[size];
467
468                 /* init last */
469                 memcpy(last, item->last, size);
470                 for (i = 0; i < size; i++)
471                         last[i] &= mask[i];
472
473                 /* check for range */
474                 for (i = 0; i < size; i++)
475                         if (last[i] != spec[i])
476                                 return rte_flow_error_set(error,
477                                         ENOTSUP,
478                                         RTE_FLOW_ERROR_TYPE_ITEM,
479                                         item,
480                                         "Range not supported");
481         }
482
483         return 0;
484 }
485
486 /***
487  * Skip disabled protocol items and VOID items
488  * until any of the mutually exclusive conditions
489  * from the list below takes place:
490  *    (A) A protocol present in the proto_mask
491  *        is met (either ENABLED or DISABLED);
492  *    (B) A protocol NOT present in the proto_mask is met in ENABLED state;
493  *    (C) The END item is met.
494  */
495 static int
496 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
497         uint64_t proto_mask,
498         size_t *length,
499         struct rte_flow_error *error)
500 {
501         size_t len = 0;
502
503         for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
504                 union flow_item spec, mask;
505                 size_t size;
506                 int disabled = 0, status;
507
508                 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
509                         continue;
510
511                 status = flow_item_proto_preprocess(*item,
512                                 &spec,
513                                 &mask,
514                                 &size,
515                                 &disabled,
516                                 error);
517                 if (status)
518                         return status;
519
520                 if ((proto_mask & (1LLU << (*item)->type)) ||
521                                 !disabled)
522                         break;
523
524                 len += size;
525         }
526
527         if (length)
528                 *length = len;
529
530         return 0;
531 }
532
533 #define FLOW_ITEM_PROTO_IP \
534         ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
535          (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
536
537 static void
538 flow_item_skip_void(const struct rte_flow_item **item)
539 {
540         for ( ; ; (*item)++)
541                 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
542                         return;
543 }
544
545 #define IP_PROTOCOL_TCP 0x06
546 #define IP_PROTOCOL_UDP 0x11
547 #define IP_PROTOCOL_SCTP 0x84
548
549 static int
550 mask_to_depth(uint64_t mask,
551                 uint32_t *depth)
552 {
553         uint64_t n;
554
555         if (mask == UINT64_MAX) {
556                 if (depth)
557                         *depth = 64;
558
559                 return 0;
560         }
561
562         mask = ~mask;
563
564         if (mask & (mask + 1))
565                 return -1;
566
567         n = __builtin_popcountll(mask);
568         if (depth)
569                 *depth = (uint32_t)(64 - n);
570
571         return 0;
572 }
573
574 static int
575 ipv4_mask_to_depth(uint32_t mask,
576                 uint32_t *depth)
577 {
578         uint32_t d;
579         int status;
580
581         status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
582         if (status)
583                 return status;
584
585         d -= 32;
586         if (depth)
587                 *depth = d;
588
589         return 0;
590 }
591
592 static int
593 ipv6_mask_to_depth(uint8_t *mask,
594         uint32_t *depth)
595 {
596         uint64_t *m = (uint64_t *)mask;
597         uint64_t m0 = rte_be_to_cpu_64(m[0]);
598         uint64_t m1 = rte_be_to_cpu_64(m[1]);
599         uint32_t d0, d1;
600         int status;
601
602         status = mask_to_depth(m0, &d0);
603         if (status)
604                 return status;
605
606         status = mask_to_depth(m1, &d1);
607         if (status)
608                 return status;
609
610         if (d0 < 64 && d1)
611                 return -1;
612
613         if (depth)
614                 *depth = d0 + d1;
615
616         return 0;
617 }
618
619 static int
620 port_mask_to_range(uint16_t port,
621         uint16_t port_mask,
622         uint16_t *port0,
623         uint16_t *port1)
624 {
625         int status;
626         uint16_t p0, p1;
627
628         status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
629         if (status)
630                 return -1;
631
632         p0 = port & port_mask;
633         p1 = p0 | ~port_mask;
634
635         if (port0)
636                 *port0 = p0;
637
638         if (port1)
639                 *port1 = p1;
640
641         return 0;
642 }
643
644 static int
645 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
646                 struct pipeline *pipeline __rte_unused,
647                 struct softnic_table *table __rte_unused,
648                 const struct rte_flow_attr *attr,
649                 const struct rte_flow_item *item,
650                 struct softnic_table_rule_match *rule_match,
651                 struct rte_flow_error *error)
652 {
653         union flow_item spec, mask;
654         size_t size, length = 0;
655         int disabled = 0, status;
656         uint8_t ip_proto, ip_proto_mask;
657
658         memset(rule_match, 0, sizeof(*rule_match));
659         rule_match->match_type = TABLE_ACL;
660         rule_match->match.acl.priority = attr->priority;
661
662         /* VOID or disabled protos only, if any. */
663         status = flow_item_skip_disabled_protos(&item,
664                         FLOW_ITEM_PROTO_IP, &length, error);
665         if (status)
666                 return status;
667
668         /* IP only. */
669         status = flow_item_proto_preprocess(item, &spec, &mask,
670                         &size, &disabled, error);
671         if (status)
672                 return status;
673
674         switch (item->type) {
675         case RTE_FLOW_ITEM_TYPE_IPV4:
676         {
677                 uint32_t sa_depth, da_depth;
678
679                 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
680                                 &sa_depth);
681                 if (status)
682                         return rte_flow_error_set(error,
683                                 EINVAL,
684                                 RTE_FLOW_ERROR_TYPE_ITEM,
685                                 item,
686                                 "ACL: Illegal IPv4 header source address mask");
687
688                 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
689                                 &da_depth);
690                 if (status)
691                         return rte_flow_error_set(error,
692                                 EINVAL,
693                                 RTE_FLOW_ERROR_TYPE_ITEM,
694                                 item,
695                                 "ACL: Illegal IPv4 header destination address mask");
696
697                 ip_proto = spec.ipv4.hdr.next_proto_id;
698                 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
699
700                 rule_match->match.acl.ip_version = 1;
701                 rule_match->match.acl.ipv4.sa =
702                         rte_ntohl(spec.ipv4.hdr.src_addr);
703                 rule_match->match.acl.ipv4.da =
704                         rte_ntohl(spec.ipv4.hdr.dst_addr);
705                 rule_match->match.acl.sa_depth = sa_depth;
706                 rule_match->match.acl.da_depth = da_depth;
707                 rule_match->match.acl.proto = ip_proto;
708                 rule_match->match.acl.proto_mask = ip_proto_mask;
709                 break;
710         } /* RTE_FLOW_ITEM_TYPE_IPV4 */
711
712         case RTE_FLOW_ITEM_TYPE_IPV6:
713         {
714                 uint32_t sa_depth, da_depth;
715
716                 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
717                 if (status)
718                         return rte_flow_error_set(error,
719                                 EINVAL,
720                                 RTE_FLOW_ERROR_TYPE_ITEM,
721                                 item,
722                                 "ACL: Illegal IPv6 header source address mask");
723
724                 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
725                 if (status)
726                         return rte_flow_error_set(error,
727                                 EINVAL,
728                                 RTE_FLOW_ERROR_TYPE_ITEM,
729                                 item,
730                                 "ACL: Illegal IPv6 header destination address mask");
731
732                 ip_proto = spec.ipv6.hdr.proto;
733                 ip_proto_mask = mask.ipv6.hdr.proto;
734
735                 rule_match->match.acl.ip_version = 0;
736                 memcpy(rule_match->match.acl.ipv6.sa,
737                         spec.ipv6.hdr.src_addr,
738                         sizeof(spec.ipv6.hdr.src_addr));
739                 memcpy(rule_match->match.acl.ipv6.da,
740                         spec.ipv6.hdr.dst_addr,
741                         sizeof(spec.ipv6.hdr.dst_addr));
742                 rule_match->match.acl.sa_depth = sa_depth;
743                 rule_match->match.acl.da_depth = da_depth;
744                 rule_match->match.acl.proto = ip_proto;
745                 rule_match->match.acl.proto_mask = ip_proto_mask;
746                 break;
747         } /* RTE_FLOW_ITEM_TYPE_IPV6 */
748
749         default:
750                 return rte_flow_error_set(error,
751                         ENOTSUP,
752                         RTE_FLOW_ERROR_TYPE_ITEM,
753                         item,
754                         "ACL: IP protocol required");
755         } /* switch */
756
757         if (ip_proto_mask != UINT8_MAX)
758                 return rte_flow_error_set(error,
759                         EINVAL,
760                         RTE_FLOW_ERROR_TYPE_ITEM,
761                         item,
762                         "ACL: Illegal IP protocol mask");
763
764         item++;
765
766         /* VOID only, if any. */
767         flow_item_skip_void(&item);
768
769         /* TCP/UDP/SCTP only. */
770         status = flow_item_proto_preprocess(item, &spec, &mask,
771                         &size, &disabled, error);
772         if (status)
773                 return status;
774
775         switch (item->type) {
776         case RTE_FLOW_ITEM_TYPE_TCP:
777         {
778                 uint16_t sp0, sp1, dp0, dp1;
779
780                 if (ip_proto != IP_PROTOCOL_TCP)
781                         return rte_flow_error_set(error,
782                                 EINVAL,
783                                 RTE_FLOW_ERROR_TYPE_ITEM,
784                                 item,
785                                 "ACL: Item type is TCP, but IP protocol is not");
786
787                 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
788                                 rte_ntohs(mask.tcp.hdr.src_port),
789                                 &sp0,
790                                 &sp1);
791
792                 if (status)
793                         return rte_flow_error_set(error,
794                                 EINVAL,
795                                 RTE_FLOW_ERROR_TYPE_ITEM,
796                                 item,
797                                 "ACL: Illegal TCP source port mask");
798
799                 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
800                                 rte_ntohs(mask.tcp.hdr.dst_port),
801                                 &dp0,
802                                 &dp1);
803
804                 if (status)
805                         return rte_flow_error_set(error,
806                                 EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ITEM,
808                                 item,
809                                 "ACL: Illegal TCP destination port mask");
810
811                 rule_match->match.acl.sp0 = sp0;
812                 rule_match->match.acl.sp1 = sp1;
813                 rule_match->match.acl.dp0 = dp0;
814                 rule_match->match.acl.dp1 = dp1;
815
816                 break;
817         } /* RTE_FLOW_ITEM_TYPE_TCP */
818
819         case RTE_FLOW_ITEM_TYPE_UDP:
820         {
821                 uint16_t sp0, sp1, dp0, dp1;
822
823                 if (ip_proto != IP_PROTOCOL_UDP)
824                         return rte_flow_error_set(error,
825                                 EINVAL,
826                                 RTE_FLOW_ERROR_TYPE_ITEM,
827                                 item,
828                                 "ACL: Item type is UDP, but IP protocol is not");
829
830                 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
831                         rte_ntohs(mask.udp.hdr.src_port),
832                         &sp0,
833                         &sp1);
834                 if (status)
835                         return rte_flow_error_set(error,
836                                 EINVAL,
837                                 RTE_FLOW_ERROR_TYPE_ITEM,
838                                 item,
839                                 "ACL: Illegal UDP source port mask");
840
841                 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
842                         rte_ntohs(mask.udp.hdr.dst_port),
843                         &dp0,
844                         &dp1);
845                 if (status)
846                         return rte_flow_error_set(error,
847                                 EINVAL,
848                                 RTE_FLOW_ERROR_TYPE_ITEM,
849                                 item,
850                                 "ACL: Illegal UDP destination port mask");
851
852                 rule_match->match.acl.sp0 = sp0;
853                 rule_match->match.acl.sp1 = sp1;
854                 rule_match->match.acl.dp0 = dp0;
855                 rule_match->match.acl.dp1 = dp1;
856
857                 break;
858         } /* RTE_FLOW_ITEM_TYPE_UDP */
859
860         case RTE_FLOW_ITEM_TYPE_SCTP:
861         {
862                 uint16_t sp0, sp1, dp0, dp1;
863
864                 if (ip_proto != IP_PROTOCOL_SCTP)
865                         return rte_flow_error_set(error,
866                                 EINVAL,
867                                 RTE_FLOW_ERROR_TYPE_ITEM,
868                                 item,
869                                 "ACL: Item type is SCTP, but IP protocol is not");
870
871                 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
872                         rte_ntohs(mask.sctp.hdr.src_port),
873                         &sp0,
874                         &sp1);
875
876                 if (status)
877                         return rte_flow_error_set(error,
878                                 EINVAL,
879                                 RTE_FLOW_ERROR_TYPE_ITEM,
880                                 item,
881                                 "ACL: Illegal SCTP source port mask");
882
883                 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
884                         rte_ntohs(mask.sctp.hdr.dst_port),
885                         &dp0,
886                         &dp1);
887                 if (status)
888                         return rte_flow_error_set(error,
889                                 EINVAL,
890                                 RTE_FLOW_ERROR_TYPE_ITEM,
891                                 item,
892                                 "ACL: Illegal SCTP destination port mask");
893
894                 rule_match->match.acl.sp0 = sp0;
895                 rule_match->match.acl.sp1 = sp1;
896                 rule_match->match.acl.dp0 = dp0;
897                 rule_match->match.acl.dp1 = dp1;
898
899                 break;
900         } /* RTE_FLOW_ITEM_TYPE_SCTP */
901
902         default:
903                 return rte_flow_error_set(error,
904                         ENOTSUP,
905                         RTE_FLOW_ERROR_TYPE_ITEM,
906                         item,
907                         "ACL: TCP/UDP/SCTP required");
908         } /* switch */
909
910         item++;
911
912         /* VOID or disabled protos only, if any. */
913         status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
914         if (status)
915                 return status;
916
917         /* END only. */
918         if (item->type != RTE_FLOW_ITEM_TYPE_END)
919                 return rte_flow_error_set(error,
920                         EINVAL,
921                         RTE_FLOW_ERROR_TYPE_ITEM,
922                         item,
923                         "ACL: Expecting END item");
924
925         return 0;
926 }
927
928 /***
929  * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
930  * respectively.
931  * They are located within a larger buffer at offsets *toffset* and *foffset*
932  * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
933  * buffer.
934  * Question: are the two masks equivalent?
935  *
936  * Notes:
937  * 1. Offset basically indicates that the first offset bytes in the buffer
938  *    are "don't care", so offset is equivalent to pre-pending an "all-zeros"
939  *    array of *offset* bytes to the *mask*.
940  * 2. Each *mask* might contain a number of zero bytes at the beginning or
941  *    at the end.
942  * 3. Bytes in the larger buffer after the end of the *mask* are also considered
943  *    "don't care", so they are equivalent to appending an "all-zeros" array of
944  *    bytes to the *mask*.
945  *
946  * Example:
947  * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
948  * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
949  *    => buffer mask = [00 00 00 22 00 33 00 00]
950  * fmask = [22 00 33], foffset = 3, fsize = 3 =>
951  *    => buffer mask = [00 00 00 22 00 33 00 00]
952  * Therefore, the tmask and fmask from this example are equivalent.
953  */
954 static int
955 hash_key_mask_is_same(uint8_t *tmask,
956         size_t toffset,
957         size_t tsize,
958         uint8_t *fmask,
959         size_t foffset,
960         size_t fsize,
961         size_t *toffset_plus,
962         size_t *foffset_plus)
963 {
964         size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
965         size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
966
967         /* Compute tpos and fpos. */
968         for (tpos = 0; tmask[tpos] == 0; tpos++)
969                 ;
970         for (fpos = 0; fmask[fpos] == 0; fpos++)
971                 ;
972
973         if (toffset + tpos != foffset + fpos)
974                 return 0; /* FALSE */
975
976         tsize -= tpos;
977         fsize -= fpos;
978
979         if (tsize < fsize) {
980                 size_t i;
981
982                 for (i = 0; i < tsize; i++)
983                         if (tmask[tpos + i] != fmask[fpos + i])
984                                 return 0; /* FALSE */
985
986                 for ( ; i < fsize; i++)
987                         if (fmask[fpos + i])
988                                 return 0; /* FALSE */
989         } else {
990                 size_t i;
991
992                 for (i = 0; i < fsize; i++)
993                         if (tmask[tpos + i] != fmask[fpos + i])
994                                 return 0; /* FALSE */
995
996                 for ( ; i < tsize; i++)
997                         if (tmask[tpos + i])
998                                 return 0; /* FALSE */
999         }
1000
1001         if (toffset_plus)
1002                 *toffset_plus = tpos;
1003
1004         if (foffset_plus)
1005                 *foffset_plus = fpos;
1006
1007         return 1; /* TRUE */
1008 }
1009
1010 static int
1011 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
1012         struct pipeline *pipeline __rte_unused,
1013         struct softnic_table *table,
1014         const struct rte_flow_attr *attr __rte_unused,
1015         const struct rte_flow_item *item,
1016         struct softnic_table_rule_match *rule_match,
1017         struct rte_flow_error *error)
1018 {
1019         struct softnic_table_rule_match_hash key, key_mask;
1020         struct softnic_table_hash_params *params = &table->params.match.hash;
1021         size_t offset = 0, length = 0, tpos, fpos;
1022         int status;
1023
1024         memset(&key, 0, sizeof(key));
1025         memset(&key_mask, 0, sizeof(key_mask));
1026
1027         /* VOID or disabled protos only, if any. */
1028         status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
1029         if (status)
1030                 return status;
1031
1032         if (item->type == RTE_FLOW_ITEM_TYPE_END)
1033                 return rte_flow_error_set(error,
1034                         EINVAL,
1035                         RTE_FLOW_ERROR_TYPE_ITEM,
1036                         item,
1037                         "HASH: END detected too early");
1038
1039         /* VOID or any protocols (enabled or disabled). */
1040         for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1041                 union flow_item spec, mask;
1042                 size_t size;
1043                 int disabled, status;
1044
1045                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1046                         continue;
1047
1048                 status = flow_item_proto_preprocess(item,
1049                         &spec,
1050                         &mask,
1051                         &size,
1052                         &disabled,
1053                         error);
1054                 if (status)
1055                         return status;
1056
1057                 if (length + size > sizeof(key)) {
1058                         if (disabled)
1059                                 break;
1060
1061                         return rte_flow_error_set(error,
1062                                 ENOTSUP,
1063                                 RTE_FLOW_ERROR_TYPE_ITEM,
1064                                 item,
1065                                 "HASH: Item too big");
1066                 }
1067
1068                 memcpy(&key.key[length], &spec, size);
1069                 memcpy(&key_mask.key[length], &mask, size);
1070                 length += size;
1071         }
1072
1073         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1074                 /* VOID or disabled protos only, if any. */
1075                 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
1076                 if (status)
1077                         return status;
1078
1079                 /* END only. */
1080                 if (item->type != RTE_FLOW_ITEM_TYPE_END)
1081                         return rte_flow_error_set(error,
1082                                 EINVAL,
1083                                 RTE_FLOW_ERROR_TYPE_ITEM,
1084                                 item,
1085                                 "HASH: Expecting END item");
1086         }
1087
1088         /* Compare flow key mask against table key mask. */
1089         offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
1090
1091         if (!hash_key_mask_is_same(params->key_mask,
1092                 params->key_offset,
1093                 params->key_size,
1094                 key_mask.key,
1095                 offset,
1096                 length,
1097                 &tpos,
1098                 &fpos))
1099                 return rte_flow_error_set(error,
1100                         EINVAL,
1101                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1102                         NULL,
1103                         "HASH: Item list is not observing the match format");
1104
1105         /* Rule match. */
1106         memset(rule_match, 0, sizeof(*rule_match));
1107         rule_match->match_type = TABLE_HASH;
1108         memcpy(&rule_match->match.hash.key[tpos],
1109                 &key.key[fpos],
1110                 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
1111                         length - fpos));
1112
1113         return 0;
1114 }
1115
1116 static int
1117 flow_rule_match_get(struct pmd_internals *softnic,
1118                 struct pipeline *pipeline,
1119                 struct softnic_table *table,
1120                 const struct rte_flow_attr *attr,
1121                 const struct rte_flow_item *item,
1122                 struct softnic_table_rule_match *rule_match,
1123                 struct rte_flow_error *error)
1124 {
1125         switch (table->params.match_type) {
1126         case TABLE_ACL:
1127                 return flow_rule_match_acl_get(softnic,
1128                         pipeline,
1129                         table,
1130                         attr,
1131                         item,
1132                         rule_match,
1133                         error);
1134
1135                 /* FALLTHROUGH */
1136
1137         case TABLE_HASH:
1138                 return flow_rule_match_hash_get(softnic,
1139                         pipeline,
1140                         table,
1141                         attr,
1142                         item,
1143                         rule_match,
1144                         error);
1145
1146                 /* FALLTHROUGH */
1147
1148         default:
1149                 return rte_flow_error_set(error,
1150                         ENOTSUP,
1151                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1152                         NULL,
1153                         "Unsupported pipeline table match type");
1154         }
1155 }
1156
1157 static int
1158 flow_rule_action_get(struct pmd_internals *softnic,
1159         struct pipeline *pipeline,
1160         struct softnic_table *table,
1161         const struct rte_flow_attr *attr,
1162         const struct rte_flow_action *action,
1163         struct softnic_table_rule_action *rule_action,
1164         struct rte_flow_error *error __rte_unused)
1165 {
1166         struct softnic_table_action_profile *profile;
1167         struct softnic_table_action_profile_params *params;
1168         int n_jump_queue_rss_drop = 0;
1169         int n_count = 0;
1170
1171         profile = softnic_table_action_profile_find(softnic,
1172                 table->params.action_profile_name);
1173         if (profile == NULL)
1174                 return rte_flow_error_set(error,
1175                         EINVAL,
1176                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1177                         action,
1178                         "JUMP: Table action profile");
1179
1180         params = &profile->params;
1181
1182         for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1183                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1184                         continue;
1185
1186                 switch (action->type) {
1187                 case RTE_FLOW_ACTION_TYPE_JUMP:
1188                 {
1189                         const struct rte_flow_action_jump *conf = action->conf;
1190                         struct flow_attr_map *map;
1191
1192                         if (conf == NULL)
1193                                 return rte_flow_error_set(error,
1194                                         EINVAL,
1195                                         RTE_FLOW_ERROR_TYPE_ACTION,
1196                                         action,
1197                                         "JUMP: Null configuration");
1198
1199                         if (n_jump_queue_rss_drop)
1200                                 return rte_flow_error_set(error,
1201                                         EINVAL,
1202                                         RTE_FLOW_ERROR_TYPE_ACTION,
1203                                         action,
1204                                         "Only one termination action is"
1205                                         " allowed per flow");
1206
1207                         if ((params->action_mask &
1208                                 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1209                                 return rte_flow_error_set(error,
1210                                         EINVAL,
1211                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1212                                         NULL,
1213                                         "JUMP action not enabled for this table");
1214
1215                         n_jump_queue_rss_drop = 1;
1216
1217                         map = flow_attr_map_get(softnic,
1218                                 conf->group,
1219                                 attr->ingress);
1220                         if (map == NULL || map->valid == 0)
1221                                 return rte_flow_error_set(error,
1222                                         EINVAL,
1223                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1224                                         NULL,
1225                                         "JUMP: Invalid group mapping");
1226
1227                         if (strcmp(pipeline->name, map->pipeline_name) != 0)
1228                                 return rte_flow_error_set(error,
1229                                         ENOTSUP,
1230                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1231                                         NULL,
1232                                         "JUMP: Jump to table in different pipeline");
1233
1234                         /* RTE_TABLE_ACTION_FWD */
1235                         rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
1236                         rule_action->fwd.id = map->table_id;
1237                         rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1238                         break;
1239                 } /* RTE_FLOW_ACTION_TYPE_JUMP */
1240
1241                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1242                 {
1243                         char name[NAME_SIZE];
1244                         struct rte_eth_dev *dev;
1245                         const struct rte_flow_action_queue *conf = action->conf;
1246                         uint32_t port_id;
1247                         int status;
1248
1249                         if (conf == NULL)
1250                                 return rte_flow_error_set(error,
1251                                         EINVAL,
1252                                         RTE_FLOW_ERROR_TYPE_ACTION,
1253                                         action,
1254                                         "QUEUE: Null configuration");
1255
1256                         if (n_jump_queue_rss_drop)
1257                                 return rte_flow_error_set(error,
1258                                         EINVAL,
1259                                         RTE_FLOW_ERROR_TYPE_ACTION,
1260                                         action,
1261                                         "Only one termination action is allowed"
1262                                         " per flow");
1263
1264                         if ((params->action_mask &
1265                                 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1266                                 return rte_flow_error_set(error,
1267                                         EINVAL,
1268                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1269                                         NULL,
1270                                         "QUEUE action not enabled for this table");
1271
1272                         n_jump_queue_rss_drop = 1;
1273
1274                         dev = ETHDEV(softnic);
1275                         if (dev == NULL ||
1276                                 conf->index >= dev->data->nb_rx_queues)
1277                                 return rte_flow_error_set(error,
1278                                         EINVAL,
1279                                         RTE_FLOW_ERROR_TYPE_ACTION,
1280                                         action,
1281                                         "QUEUE: Invalid RX queue ID");
1282
1283                         sprintf(name, "RXQ%u", (uint32_t)conf->index);
1284
1285                         status = softnic_pipeline_port_out_find(softnic,
1286                                 pipeline->name,
1287                                 name,
1288                                 &port_id);
1289                         if (status)
1290                                 return rte_flow_error_set(error,
1291                                         ENOTSUP,
1292                                         RTE_FLOW_ERROR_TYPE_ACTION,
1293                                         action,
1294                                         "QUEUE: RX queue not accessible from this pipeline");
1295
1296                         /* RTE_TABLE_ACTION_FWD */
1297                         rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
1298                         rule_action->fwd.id = port_id;
1299                         rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1300                         break;
1301                 } /*RTE_FLOW_ACTION_TYPE_QUEUE */
1302
1303                 case RTE_FLOW_ACTION_TYPE_RSS:
1304                 {
1305                         const struct rte_flow_action_rss *conf = action->conf;
1306                         uint32_t i;
1307
1308                         if (conf == NULL)
1309                                 return rte_flow_error_set(error,
1310                                         EINVAL,
1311                                         RTE_FLOW_ERROR_TYPE_ACTION,
1312                                         action,
1313                                         "RSS: Null configuration");
1314
1315                         if (!rte_is_power_of_2(conf->queue_num))
1316                                 return rte_flow_error_set(error,
1317                                         EINVAL,
1318                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1319                                         conf,
1320                                         "RSS: Number of queues must be a power of 2");
1321
1322                         if (conf->queue_num > RTE_DIM(rule_action->lb.out))
1323                                 return rte_flow_error_set(error,
1324                                         EINVAL,
1325                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1326                                         conf,
1327                                         "RSS: Number of queues too big");
1328
1329                         if (n_jump_queue_rss_drop)
1330                                 return rte_flow_error_set(error,
1331                                         EINVAL,
1332                                         RTE_FLOW_ERROR_TYPE_ACTION,
1333                                         action,
1334                                         "Only one termination action is allowed per flow");
1335
1336                         if (((params->action_mask &
1337                                 (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
1338                                 ((params->action_mask &
1339                                 (1LLU << RTE_TABLE_ACTION_LB)) == 0))
1340                                 return rte_flow_error_set(error,
1341                                         ENOTSUP,
1342                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1343                                         NULL,
1344                                         "RSS action not supported by this table");
1345
1346                         if (params->lb.out_offset !=
1347                                 pipeline->params.offset_port_id)
1348                                 return rte_flow_error_set(error,
1349                                         EINVAL,
1350                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1351                                         NULL,
1352                                         "RSS action not supported by this pipeline");
1353
1354                         n_jump_queue_rss_drop = 1;
1355
1356                         /* RTE_TABLE_ACTION_LB */
1357                         for (i = 0; i < conf->queue_num; i++) {
1358                                 char name[NAME_SIZE];
1359                                 struct rte_eth_dev *dev;
1360                                 uint32_t port_id;
1361                                 int status;
1362
1363                                 dev = ETHDEV(softnic);
1364                                 if (dev == NULL ||
1365                                         conf->queue[i] >=
1366                                                 dev->data->nb_rx_queues)
1367                                         return rte_flow_error_set(error,
1368                                                 EINVAL,
1369                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1370                                                 action,
1371                                                 "RSS: Invalid RX queue ID");
1372
1373                                 sprintf(name, "RXQ%u",
1374                                         (uint32_t)conf->queue[i]);
1375
1376                                 status = softnic_pipeline_port_out_find(softnic,
1377                                         pipeline->name,
1378                                         name,
1379                                         &port_id);
1380                                 if (status)
1381                                         return rte_flow_error_set(error,
1382                                                 ENOTSUP,
1383                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1384                                                 action,
1385                                                 "RSS: RX queue not accessible from this pipeline");
1386
1387                                 rule_action->lb.out[i] = port_id;
1388                         }
1389
1390                         for ( ; i < RTE_DIM(rule_action->lb.out); i++)
1391                                 rule_action->lb.out[i] =
1392                                 rule_action->lb.out[i % conf->queue_num];
1393
1394                         rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
1395
1396                         /* RTE_TABLE_ACTION_FWD */
1397                         rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
1398                         rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1399                         break;
1400                 } /* RTE_FLOW_ACTION_TYPE_RSS */
1401
1402                 case RTE_FLOW_ACTION_TYPE_DROP:
1403                 {
1404                         const void *conf = action->conf;
1405
1406                         if (conf != NULL)
1407                                 return rte_flow_error_set(error,
1408                                         EINVAL,
1409                                         RTE_FLOW_ERROR_TYPE_ACTION,
1410                                         action,
1411                                         "DROP: No configuration required");
1412
1413                         if (n_jump_queue_rss_drop)
1414                                 return rte_flow_error_set(error,
1415                                         EINVAL,
1416                                         RTE_FLOW_ERROR_TYPE_ACTION,
1417                                         action,
1418                                         "Only one termination action is allowed per flow");
1419                         if ((params->action_mask &
1420                                 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1421                                 return rte_flow_error_set(error,
1422                                         ENOTSUP,
1423                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1424                                         NULL,
1425                                         "DROP action not supported by this table");
1426
1427                         n_jump_queue_rss_drop = 1;
1428
1429                         /* RTE_TABLE_ACTION_FWD */
1430                         rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
1431                         rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1432                         break;
1433                 } /* RTE_FLOW_ACTION_TYPE_DROP */
1434
1435                 case RTE_FLOW_ACTION_TYPE_COUNT:
1436                 {
1437                         const struct rte_flow_action_count *conf = action->conf;
1438
1439                         if (conf == NULL)
1440                                 return rte_flow_error_set(error,
1441                                         EINVAL,
1442                                         RTE_FLOW_ERROR_TYPE_ACTION,
1443                                         action,
1444                                         "COUNT: Null configuration");
1445
1446                         if (conf->shared)
1447                                 return rte_flow_error_set(error,
1448                                         ENOTSUP,
1449                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1450                                         conf,
1451                                         "COUNT: Shared counters not supported");
1452
1453                         if (n_count)
1454                                 return rte_flow_error_set(error,
1455                                         ENOTSUP,
1456                                         RTE_FLOW_ERROR_TYPE_ACTION,
1457                                         action,
1458                                         "Only one COUNT action per flow");
1459
1460                         if ((params->action_mask &
1461                                 (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
1462                                 return rte_flow_error_set(error,
1463                                         ENOTSUP,
1464                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1465                                         NULL,
1466                                         "COUNT action not supported by this table");
1467
1468                         n_count = 1;
1469
1470                         /* RTE_TABLE_ACTION_STATS */
1471                         rule_action->stats.n_packets = 0;
1472                         rule_action->stats.n_bytes = 0;
1473                         rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
1474                         break;
1475                 } /* RTE_FLOW_ACTION_TYPE_COUNT */
1476
1477                 default:
1478                         return -ENOTSUP;
1479                 }
1480         }
1481
1482         if (n_jump_queue_rss_drop == 0)
1483                 return rte_flow_error_set(error,
1484                         EINVAL,
1485                         RTE_FLOW_ERROR_TYPE_ACTION,
1486                         action,
1487                         "Flow does not have any terminating action");
1488
1489         return 0;
1490 }
1491
1492 static int
1493 pmd_flow_validate(struct rte_eth_dev *dev,
1494                 const struct rte_flow_attr *attr,
1495                 const struct rte_flow_item item[],
1496                 const struct rte_flow_action action[],
1497                 struct rte_flow_error *error)
1498 {
1499         struct softnic_table_rule_match rule_match;
1500         struct softnic_table_rule_action rule_action;
1501
1502         struct pmd_internals *softnic = dev->data->dev_private;
1503         struct pipeline *pipeline;
1504         struct softnic_table *table;
1505         const char *pipeline_name = NULL;
1506         uint32_t table_id = 0;
1507         int status;
1508
1509         /* Check input parameters. */
1510         if (attr == NULL)
1511                 return rte_flow_error_set(error,
1512                                 EINVAL,
1513                                 RTE_FLOW_ERROR_TYPE_ATTR,
1514                                 NULL, "Null attr");
1515
1516         if (item == NULL)
1517                 return rte_flow_error_set(error,
1518                                 EINVAL,
1519                                 RTE_FLOW_ERROR_TYPE_ITEM,
1520                                 NULL,
1521                                 "Null item");
1522
1523         if (action == NULL)
1524                 return rte_flow_error_set(error,
1525                                 EINVAL,
1526                                 RTE_FLOW_ERROR_TYPE_ACTION,
1527                                 NULL,
1528                                 "Null action");
1529
1530         /* Identify the pipeline table to add this flow to. */
1531         status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1532                                         &table_id, error);
1533         if (status)
1534                 return status;
1535
1536         pipeline = softnic_pipeline_find(softnic, pipeline_name);
1537         if (pipeline == NULL)
1538                 return rte_flow_error_set(error,
1539                                 EINVAL,
1540                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1541                                 NULL,
1542                                 "Invalid pipeline name");
1543
1544         if (table_id >= pipeline->n_tables)
1545                 return rte_flow_error_set(error,
1546                                 EINVAL,
1547                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1548                                 NULL,
1549                                 "Invalid pipeline table ID");
1550
1551         table = &pipeline->table[table_id];
1552
1553         /* Rule match. */
1554         memset(&rule_match, 0, sizeof(rule_match));
1555         status = flow_rule_match_get(softnic,
1556                         pipeline,
1557                         table,
1558                         attr,
1559                         item,
1560                         &rule_match,
1561                         error);
1562         if (status)
1563                 return status;
1564
1565         /* Rule action. */
1566         memset(&rule_action, 0, sizeof(rule_action));
1567         status = flow_rule_action_get(softnic,
1568                 pipeline,
1569                 table,
1570                 attr,
1571                 action,
1572                 &rule_action,
1573                 error);
1574         if (status)
1575                 return status;
1576
1577         return 0;
1578 }
1579
1580 static struct rte_flow *
1581 pmd_flow_create(struct rte_eth_dev *dev,
1582         const struct rte_flow_attr *attr,
1583         const struct rte_flow_item item[],
1584         const struct rte_flow_action action[],
1585         struct rte_flow_error *error)
1586 {
1587         struct softnic_table_rule_match rule_match;
1588         struct softnic_table_rule_action rule_action;
1589         void *rule_data;
1590
1591         struct pmd_internals *softnic = dev->data->dev_private;
1592         struct pipeline *pipeline;
1593         struct softnic_table *table;
1594         struct rte_flow *flow;
1595         const char *pipeline_name = NULL;
1596         uint32_t table_id = 0;
1597         int new_flow, status;
1598
1599         /* Check input parameters. */
1600         if (attr == NULL) {
1601                 rte_flow_error_set(error,
1602                         EINVAL,
1603                         RTE_FLOW_ERROR_TYPE_ATTR,
1604                         NULL,
1605                         "Null attr");
1606                 return NULL;
1607         }
1608
1609         if (item == NULL) {
1610                 rte_flow_error_set(error,
1611                         EINVAL,
1612                         RTE_FLOW_ERROR_TYPE_ITEM,
1613                         NULL,
1614                         "Null item");
1615                 return NULL;
1616         }
1617
1618         if (action == NULL) {
1619                 rte_flow_error_set(error,
1620                         EINVAL,
1621                         RTE_FLOW_ERROR_TYPE_ACTION,
1622                         NULL,
1623                         "Null action");
1624                 return NULL;
1625         }
1626
1627         /* Identify the pipeline table to add this flow to. */
1628         status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1629                                         &table_id, error);
1630         if (status)
1631                 return NULL;
1632
1633         pipeline = softnic_pipeline_find(softnic, pipeline_name);
1634         if (pipeline == NULL) {
1635                 rte_flow_error_set(error,
1636                         EINVAL,
1637                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1638                         NULL,
1639                         "Invalid pipeline name");
1640                 return NULL;
1641         }
1642
1643         if (table_id >= pipeline->n_tables) {
1644                 rte_flow_error_set(error,
1645                         EINVAL,
1646                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1647                         NULL,
1648                         "Invalid pipeline table ID");
1649                 return NULL;
1650         }
1651
1652         table = &pipeline->table[table_id];
1653
1654         /* Rule match. */
1655         memset(&rule_match, 0, sizeof(rule_match));
1656         status = flow_rule_match_get(softnic,
1657                 pipeline,
1658                 table,
1659                 attr,
1660                 item,
1661                 &rule_match,
1662                 error);
1663         if (status)
1664                 return NULL;
1665
1666         /* Rule action. */
1667         memset(&rule_action, 0, sizeof(rule_action));
1668         status = flow_rule_action_get(softnic,
1669                 pipeline,
1670                 table,
1671                 attr,
1672                 action,
1673                 &rule_action,
1674                 error);
1675         if (status)
1676                 return NULL;
1677
1678         /* Flow find/allocate. */
1679         new_flow = 0;
1680         flow = softnic_flow_find(table, &rule_match);
1681         if (flow == NULL) {
1682                 new_flow = 1;
1683                 flow = calloc(1, sizeof(struct rte_flow));
1684                 if (flow == NULL) {
1685                         rte_flow_error_set(error,
1686                                 ENOMEM,
1687                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1688                                 NULL,
1689                                 "Not enough memory for new flow");
1690                         return NULL;
1691                 }
1692         }
1693
1694         /* Rule add. */
1695         status = softnic_pipeline_table_rule_add(softnic,
1696                 pipeline_name,
1697                 table_id,
1698                 &rule_match,
1699                 &rule_action,
1700                 &rule_data);
1701         if (status) {
1702                 if (new_flow)
1703                         free(flow);
1704
1705                 rte_flow_error_set(error,
1706                         EINVAL,
1707                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1708                         NULL,
1709                         "Pipeline table rule add failed");
1710                 return NULL;
1711         }
1712
1713         /* Flow fill in. */
1714         memcpy(&flow->match, &rule_match, sizeof(rule_match));
1715         memcpy(&flow->action, &rule_action, sizeof(rule_action));
1716         flow->data = rule_data;
1717         flow->pipeline = pipeline;
1718         flow->table_id = table_id;
1719
1720         /* Flow add to list. */
1721         if (new_flow)
1722                 TAILQ_INSERT_TAIL(&table->flows, flow, node);
1723
1724         return flow;
1725 }
1726
1727 static int
1728 pmd_flow_destroy(struct rte_eth_dev *dev,
1729         struct rte_flow *flow,
1730         struct rte_flow_error *error)
1731 {
1732         struct pmd_internals *softnic = dev->data->dev_private;
1733         struct softnic_table *table;
1734         int status;
1735
1736         /* Check input parameters. */
1737         if (flow == NULL)
1738                 return rte_flow_error_set(error,
1739                         EINVAL,
1740                         RTE_FLOW_ERROR_TYPE_HANDLE,
1741                         NULL,
1742                         "Null flow");
1743
1744         table = &flow->pipeline->table[flow->table_id];
1745
1746         /* Rule delete. */
1747         status = softnic_pipeline_table_rule_delete(softnic,
1748                 flow->pipeline->name,
1749                 flow->table_id,
1750                 &flow->match);
1751         if (status)
1752                 return rte_flow_error_set(error,
1753                         EINVAL,
1754                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1755                         NULL,
1756                         "Pipeline table rule delete failed");
1757
1758         /* Flow delete. */
1759         TAILQ_REMOVE(&table->flows, flow, node);
1760         free(flow);
1761
1762         return 0;
1763 }
1764
1765 static int
1766 pmd_flow_query(struct rte_eth_dev *dev __rte_unused,
1767         struct rte_flow *flow,
1768         const struct rte_flow_action *action __rte_unused,
1769         void *data,
1770         struct rte_flow_error *error)
1771 {
1772         struct rte_table_action_stats_counters stats;
1773         struct softnic_table *table;
1774         struct rte_flow_query_count *flow_stats = data;
1775         int status;
1776
1777         /* Check input parameters. */
1778         if (flow == NULL)
1779                 return rte_flow_error_set(error,
1780                         EINVAL,
1781                         RTE_FLOW_ERROR_TYPE_HANDLE,
1782                         NULL,
1783                         "Null flow");
1784
1785         if (data == NULL)
1786                 return rte_flow_error_set(error,
1787                         EINVAL,
1788                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1789                         NULL,
1790                         "Null data");
1791
1792         table = &flow->pipeline->table[flow->table_id];
1793
1794         /* Rule stats read. */
1795         status = rte_table_action_stats_read(table->a,
1796                 flow->data,
1797                 &stats,
1798                 flow_stats->reset);
1799         if (status)
1800                 return rte_flow_error_set(error,
1801                         EINVAL,
1802                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1803                         NULL,
1804                         "Pipeline table rule stats read failed");
1805
1806         /* Fill in flow stats. */
1807         flow_stats->hits_set =
1808                 (table->ap->params.stats.n_packets_enabled) ? 1 : 0;
1809         flow_stats->bytes_set =
1810                 (table->ap->params.stats.n_bytes_enabled) ? 1 : 0;
1811         flow_stats->hits = stats.n_packets;
1812         flow_stats->bytes = stats.n_bytes;
1813
1814         return 0;
1815 }
1816
1817 const struct rte_flow_ops pmd_flow_ops = {
1818         .validate = pmd_flow_validate,
1819         .create = pmd_flow_create,
1820         .destroy = pmd_flow_destroy,
1821         .flush = NULL,
1822         .query = pmd_flow_query,
1823         .isolate = NULL,
1824 };