net/sfc: support IPV6 in flow API filters
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /*-
2  * Copyright (c) 2017 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
35 #include <rte_flow.h>
36 #include <rte_flow_driver.h>
37
38 #include "efx.h"
39
40 #include "sfc.h"
41 #include "sfc_rx.h"
42 #include "sfc_filter.h"
43 #include "sfc_flow.h"
44 #include "sfc_log.h"
45
46 /*
47  * At now flow API is implemented in such a manner that each
48  * flow rule is converted to a hardware filter.
49  * All elements of flow rule (attributes, pattern items, actions)
50  * correspond to one or more fields in the efx_filter_spec_s structure
51  * that is responsible for the hardware filter.
52  */
53
54 enum sfc_flow_item_layers {
55         SFC_FLOW_ITEM_ANY_LAYER,
56         SFC_FLOW_ITEM_START_LAYER,
57         SFC_FLOW_ITEM_L2,
58         SFC_FLOW_ITEM_L3,
59 };
60
61 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
62                                   efx_filter_spec_t *spec,
63                                   struct rte_flow_error *error);
64
65 struct sfc_flow_item {
66         enum rte_flow_item_type type;           /* Type of item */
67         enum sfc_flow_item_layers layer;        /* Layer of item */
68         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
69         sfc_flow_item_parse *parse;             /* Parsing function */
70 };
71
72 static sfc_flow_item_parse sfc_flow_parse_void;
73 static sfc_flow_item_parse sfc_flow_parse_eth;
74 static sfc_flow_item_parse sfc_flow_parse_vlan;
75 static sfc_flow_item_parse sfc_flow_parse_ipv4;
76 static sfc_flow_item_parse sfc_flow_parse_ipv6;
77
78 static boolean_t
79 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
80 {
81         uint8_t sum = 0;
82         unsigned int i;
83
84         for (i = 0; i < size; i++)
85                 sum |= buf[i];
86
87         return (sum == 0) ? B_TRUE : B_FALSE;
88 }
89
90 /*
91  * Validate item and prepare structures spec and mask for parsing
92  */
93 static int
94 sfc_flow_parse_init(const struct rte_flow_item *item,
95                     const void **spec_ptr,
96                     const void **mask_ptr,
97                     const void *supp_mask,
98                     const void *def_mask,
99                     unsigned int size,
100                     struct rte_flow_error *error)
101 {
102         const uint8_t *spec;
103         const uint8_t *mask;
104         const uint8_t *last;
105         uint8_t match;
106         uint8_t supp;
107         unsigned int i;
108
109         if (item == NULL) {
110                 rte_flow_error_set(error, EINVAL,
111                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
112                                    "NULL item");
113                 return -rte_errno;
114         }
115
116         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
117                 rte_flow_error_set(error, EINVAL,
118                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
119                                    "Mask or last is set without spec");
120                 return -rte_errno;
121         }
122
123         /*
124          * If "mask" is not set, default mask is used,
125          * but if default mask is NULL, "mask" should be set
126          */
127         if (item->mask == NULL) {
128                 if (def_mask == NULL) {
129                         rte_flow_error_set(error, EINVAL,
130                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
131                                 "Mask should be specified");
132                         return -rte_errno;
133                 }
134
135                 mask = (const uint8_t *)def_mask;
136         } else {
137                 mask = (const uint8_t *)item->mask;
138         }
139
140         spec = (const uint8_t *)item->spec;
141         last = (const uint8_t *)item->last;
142
143         if (spec == NULL)
144                 goto exit;
145
146         /*
147          * If field values in "last" are either 0 or equal to the corresponding
148          * values in "spec" then they are ignored
149          */
150         if (last != NULL &&
151             !sfc_flow_is_zero(last, size) &&
152             memcmp(last, spec, size) != 0) {
153                 rte_flow_error_set(error, ENOTSUP,
154                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
155                                    "Ranging is not supported");
156                 return -rte_errno;
157         }
158
159         if (supp_mask == NULL) {
160                 rte_flow_error_set(error, EINVAL,
161                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
162                         "Supported mask for item should be specified");
163                 return -rte_errno;
164         }
165
166         /* Check that mask and spec not asks for more match than supp_mask */
167         for (i = 0; i < size; i++) {
168                 match = spec[i] | mask[i];
169                 supp = ((const uint8_t *)supp_mask)[i];
170
171                 if ((match | supp) != supp) {
172                         rte_flow_error_set(error, ENOTSUP,
173                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
174                                            "Item's field is not supported");
175                         return -rte_errno;
176                 }
177         }
178
179 exit:
180         *spec_ptr = spec;
181         *mask_ptr = mask;
182         return 0;
183 }
184
185 /*
186  * Protocol parsers.
187  * Masking is not supported, so masks in items should be either
188  * full or empty (zeroed) and set only for supported fields which
189  * are specified in the supp_mask.
190  */
191
192 static int
193 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
194                     __rte_unused efx_filter_spec_t *efx_spec,
195                     __rte_unused struct rte_flow_error *error)
196 {
197         return 0;
198 }
199
200 /**
201  * Convert Ethernet item to EFX filter specification.
202  *
203  * @param item[in]
204  *   Item specification. Only source and destination addresses and
205  *   Ethernet type fields are supported. If the mask is NULL, default
206  *   mask will be used. Ranging is not supported.
207  * @param efx_spec[in, out]
208  *   EFX filter specification to update.
209  * @param[out] error
210  *   Perform verbose error reporting if not NULL.
211  */
212 static int
213 sfc_flow_parse_eth(const struct rte_flow_item *item,
214                    efx_filter_spec_t *efx_spec,
215                    struct rte_flow_error *error)
216 {
217         int rc;
218         const struct rte_flow_item_eth *spec = NULL;
219         const struct rte_flow_item_eth *mask = NULL;
220         const struct rte_flow_item_eth supp_mask = {
221                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
222                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
223                 .type = 0xffff,
224         };
225
226         rc = sfc_flow_parse_init(item,
227                                  (const void **)&spec,
228                                  (const void **)&mask,
229                                  &supp_mask,
230                                  &rte_flow_item_eth_mask,
231                                  sizeof(struct rte_flow_item_eth),
232                                  error);
233         if (rc != 0)
234                 return rc;
235
236         /* If "spec" is not set, could be any Ethernet */
237         if (spec == NULL)
238                 return 0;
239
240         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
241                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
242                 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
243                            EFX_MAC_ADDR_LEN);
244         } else if (!is_zero_ether_addr(&mask->dst)) {
245                 goto fail_bad_mask;
246         }
247
248         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
249                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
250                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
251                            EFX_MAC_ADDR_LEN);
252         } else if (!is_zero_ether_addr(&mask->src)) {
253                 goto fail_bad_mask;
254         }
255
256         /*
257          * Ether type is in big-endian byte order in item and
258          * in little-endian in efx_spec, so byte swap is used
259          */
260         if (mask->type == supp_mask.type) {
261                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
262                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
263         } else if (mask->type != 0) {
264                 goto fail_bad_mask;
265         }
266
267         return 0;
268
269 fail_bad_mask:
270         rte_flow_error_set(error, EINVAL,
271                            RTE_FLOW_ERROR_TYPE_ITEM, item,
272                            "Bad mask in the ETH pattern item");
273         return -rte_errno;
274 }
275
276 /**
277  * Convert VLAN item to EFX filter specification.
278  *
279  * @param item[in]
280  *   Item specification. Only VID field is supported.
281  *   The mask can not be NULL. Ranging is not supported.
282  * @param efx_spec[in, out]
283  *   EFX filter specification to update.
284  * @param[out] error
285  *   Perform verbose error reporting if not NULL.
286  */
287 static int
288 sfc_flow_parse_vlan(const struct rte_flow_item *item,
289                     efx_filter_spec_t *efx_spec,
290                     struct rte_flow_error *error)
291 {
292         int rc;
293         uint16_t vid;
294         const struct rte_flow_item_vlan *spec = NULL;
295         const struct rte_flow_item_vlan *mask = NULL;
296         const struct rte_flow_item_vlan supp_mask = {
297                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
298         };
299
300         rc = sfc_flow_parse_init(item,
301                                  (const void **)&spec,
302                                  (const void **)&mask,
303                                  &supp_mask,
304                                  NULL,
305                                  sizeof(struct rte_flow_item_vlan),
306                                  error);
307         if (rc != 0)
308                 return rc;
309
310         /*
311          * VID is in big-endian byte order in item and
312          * in little-endian in efx_spec, so byte swap is used.
313          * If two VLAN items are included, the first matches
314          * the outer tag and the next matches the inner tag.
315          */
316         if (mask->tci == supp_mask.tci) {
317                 vid = rte_bswap16(spec->tci);
318
319                 if (!(efx_spec->efs_match_flags &
320                       EFX_FILTER_MATCH_OUTER_VID)) {
321                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
322                         efx_spec->efs_outer_vid = vid;
323                 } else if (!(efx_spec->efs_match_flags &
324                              EFX_FILTER_MATCH_INNER_VID)) {
325                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
326                         efx_spec->efs_inner_vid = vid;
327                 } else {
328                         rte_flow_error_set(error, EINVAL,
329                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
330                                            "More than two VLAN items");
331                         return -rte_errno;
332                 }
333         } else {
334                 rte_flow_error_set(error, EINVAL,
335                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
336                                    "VLAN ID in TCI match is required");
337                 return -rte_errno;
338         }
339
340         return 0;
341 }
342
343 /**
344  * Convert IPv4 item to EFX filter specification.
345  *
346  * @param item[in]
347  *   Item specification. Only source and destination addresses and
348  *   protocol fields are supported. If the mask is NULL, default
349  *   mask will be used. Ranging is not supported.
350  * @param efx_spec[in, out]
351  *   EFX filter specification to update.
352  * @param[out] error
353  *   Perform verbose error reporting if not NULL.
354  */
355 static int
356 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
357                     efx_filter_spec_t *efx_spec,
358                     struct rte_flow_error *error)
359 {
360         int rc;
361         const struct rte_flow_item_ipv4 *spec = NULL;
362         const struct rte_flow_item_ipv4 *mask = NULL;
363         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
364         const struct rte_flow_item_ipv4 supp_mask = {
365                 .hdr = {
366                         .src_addr = 0xffffffff,
367                         .dst_addr = 0xffffffff,
368                         .next_proto_id = 0xff,
369                 }
370         };
371
372         rc = sfc_flow_parse_init(item,
373                                  (const void **)&spec,
374                                  (const void **)&mask,
375                                  &supp_mask,
376                                  &rte_flow_item_ipv4_mask,
377                                  sizeof(struct rte_flow_item_ipv4),
378                                  error);
379         if (rc != 0)
380                 return rc;
381
382         /*
383          * Filtering by IPv4 source and destination addresses requires
384          * the appropriate ETHER_TYPE in hardware filters
385          */
386         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
387                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
388                 efx_spec->efs_ether_type = ether_type_ipv4;
389         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
390                 rte_flow_error_set(error, EINVAL,
391                         RTE_FLOW_ERROR_TYPE_ITEM, item,
392                         "Ethertype in pattern with IPV4 item should be appropriate");
393                 return -rte_errno;
394         }
395
396         if (spec == NULL)
397                 return 0;
398
399         /*
400          * IPv4 addresses are in big-endian byte order in item and in
401          * efx_spec
402          */
403         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
404                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
405                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
406         } else if (mask->hdr.src_addr != 0) {
407                 goto fail_bad_mask;
408         }
409
410         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
411                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
412                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
413         } else if (mask->hdr.dst_addr != 0) {
414                 goto fail_bad_mask;
415         }
416
417         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
418                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
419                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
420         } else if (mask->hdr.next_proto_id != 0) {
421                 goto fail_bad_mask;
422         }
423
424         return 0;
425
426 fail_bad_mask:
427         rte_flow_error_set(error, EINVAL,
428                            RTE_FLOW_ERROR_TYPE_ITEM, item,
429                            "Bad mask in the IPV4 pattern item");
430         return -rte_errno;
431 }
432
433 /**
434  * Convert IPv6 item to EFX filter specification.
435  *
436  * @param item[in]
437  *   Item specification. Only source and destination addresses and
438  *   next header fields are supported. If the mask is NULL, default
439  *   mask will be used. Ranging is not supported.
440  * @param efx_spec[in, out]
441  *   EFX filter specification to update.
442  * @param[out] error
443  *   Perform verbose error reporting if not NULL.
444  */
445 static int
446 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
447                     efx_filter_spec_t *efx_spec,
448                     struct rte_flow_error *error)
449 {
450         int rc;
451         const struct rte_flow_item_ipv6 *spec = NULL;
452         const struct rte_flow_item_ipv6 *mask = NULL;
453         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
454         const struct rte_flow_item_ipv6 supp_mask = {
455                 .hdr = {
456                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
457                                       0xff, 0xff, 0xff, 0xff,
458                                       0xff, 0xff, 0xff, 0xff,
459                                       0xff, 0xff, 0xff, 0xff },
460                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
461                                       0xff, 0xff, 0xff, 0xff,
462                                       0xff, 0xff, 0xff, 0xff,
463                                       0xff, 0xff, 0xff, 0xff },
464                         .proto = 0xff,
465                 }
466         };
467
468         rc = sfc_flow_parse_init(item,
469                                  (const void **)&spec,
470                                  (const void **)&mask,
471                                  &supp_mask,
472                                  &rte_flow_item_ipv6_mask,
473                                  sizeof(struct rte_flow_item_ipv6),
474                                  error);
475         if (rc != 0)
476                 return rc;
477
478         /*
479          * Filtering by IPv6 source and destination addresses requires
480          * the appropriate ETHER_TYPE in hardware filters
481          */
482         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
483                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
484                 efx_spec->efs_ether_type = ether_type_ipv6;
485         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
486                 rte_flow_error_set(error, EINVAL,
487                         RTE_FLOW_ERROR_TYPE_ITEM, item,
488                         "Ethertype in pattern with IPV6 item should be appropriate");
489                 return -rte_errno;
490         }
491
492         if (spec == NULL)
493                 return 0;
494
495         /*
496          * IPv6 addresses are in big-endian byte order in item and in
497          * efx_spec
498          */
499         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
500                    sizeof(mask->hdr.src_addr)) == 0) {
501                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
502
503                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
504                                  sizeof(spec->hdr.src_addr));
505                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
506                            sizeof(efx_spec->efs_rem_host));
507         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
508                                      sizeof(mask->hdr.src_addr))) {
509                 goto fail_bad_mask;
510         }
511
512         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
513                    sizeof(mask->hdr.dst_addr)) == 0) {
514                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
515
516                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
517                                  sizeof(spec->hdr.dst_addr));
518                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
519                            sizeof(efx_spec->efs_loc_host));
520         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
521                                      sizeof(mask->hdr.dst_addr))) {
522                 goto fail_bad_mask;
523         }
524
525         if (mask->hdr.proto == supp_mask.hdr.proto) {
526                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
527                 efx_spec->efs_ip_proto = spec->hdr.proto;
528         } else if (mask->hdr.proto != 0) {
529                 goto fail_bad_mask;
530         }
531
532         return 0;
533
534 fail_bad_mask:
535         rte_flow_error_set(error, EINVAL,
536                            RTE_FLOW_ERROR_TYPE_ITEM, item,
537                            "Bad mask in the IPV6 pattern item");
538         return -rte_errno;
539 }
540
541 static const struct sfc_flow_item sfc_flow_items[] = {
542         {
543                 .type = RTE_FLOW_ITEM_TYPE_VOID,
544                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
545                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
546                 .parse = sfc_flow_parse_void,
547         },
548         {
549                 .type = RTE_FLOW_ITEM_TYPE_ETH,
550                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
551                 .layer = SFC_FLOW_ITEM_L2,
552                 .parse = sfc_flow_parse_eth,
553         },
554         {
555                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
556                 .prev_layer = SFC_FLOW_ITEM_L2,
557                 .layer = SFC_FLOW_ITEM_L2,
558                 .parse = sfc_flow_parse_vlan,
559         },
560         {
561                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
562                 .prev_layer = SFC_FLOW_ITEM_L2,
563                 .layer = SFC_FLOW_ITEM_L3,
564                 .parse = sfc_flow_parse_ipv4,
565         },
566         {
567                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
568                 .prev_layer = SFC_FLOW_ITEM_L2,
569                 .layer = SFC_FLOW_ITEM_L3,
570                 .parse = sfc_flow_parse_ipv6,
571         },
572 };
573
574 /*
575  * Protocol-independent flow API support
576  */
577 static int
578 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
579                     struct rte_flow *flow,
580                     struct rte_flow_error *error)
581 {
582         if (attr == NULL) {
583                 rte_flow_error_set(error, EINVAL,
584                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
585                                    "NULL attribute");
586                 return -rte_errno;
587         }
588         if (attr->group != 0) {
589                 rte_flow_error_set(error, ENOTSUP,
590                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
591                                    "Groups are not supported");
592                 return -rte_errno;
593         }
594         if (attr->priority != 0) {
595                 rte_flow_error_set(error, ENOTSUP,
596                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
597                                    "Priorities are not supported");
598                 return -rte_errno;
599         }
600         if (attr->egress != 0) {
601                 rte_flow_error_set(error, ENOTSUP,
602                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
603                                    "Egress is not supported");
604                 return -rte_errno;
605         }
606         if (attr->ingress == 0) {
607                 rte_flow_error_set(error, ENOTSUP,
608                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
609                                    "Only ingress is supported");
610                 return -rte_errno;
611         }
612
613         flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
614         flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
615
616         return 0;
617 }
618
619 /* Get item from array sfc_flow_items */
620 static const struct sfc_flow_item *
621 sfc_flow_get_item(enum rte_flow_item_type type)
622 {
623         unsigned int i;
624
625         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
626                 if (sfc_flow_items[i].type == type)
627                         return &sfc_flow_items[i];
628
629         return NULL;
630 }
631
632 static int
633 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
634                        struct rte_flow *flow,
635                        struct rte_flow_error *error)
636 {
637         int rc;
638         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
639         const struct sfc_flow_item *item;
640
641         if (pattern == NULL) {
642                 rte_flow_error_set(error, EINVAL,
643                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
644                                    "NULL pattern");
645                 return -rte_errno;
646         }
647
648         for (; pattern != NULL &&
649                pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
650                 item = sfc_flow_get_item(pattern->type);
651                 if (item == NULL) {
652                         rte_flow_error_set(error, ENOTSUP,
653                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
654                                            "Unsupported pattern item");
655                         return -rte_errno;
656                 }
657
658                 /*
659                  * Omitting one or several protocol layers at the beginning
660                  * of pattern is supported
661                  */
662                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
663                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
664                     item->prev_layer != prev_layer) {
665                         rte_flow_error_set(error, ENOTSUP,
666                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
667                                            "Unexpected sequence of pattern items");
668                         return -rte_errno;
669                 }
670
671                 rc = item->parse(pattern, &flow->spec, error);
672                 if (rc != 0)
673                         return rc;
674
675                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
676                         prev_layer = item->layer;
677         }
678
679         if (pattern == NULL) {
680                 rte_flow_error_set(error, EINVAL,
681                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
682                                    "NULL item");
683                 return -rte_errno;
684         }
685
686         return 0;
687 }
688
689 static int
690 sfc_flow_parse_queue(struct sfc_adapter *sa,
691                      const struct rte_flow_action_queue *queue,
692                      struct rte_flow *flow)
693 {
694         struct sfc_rxq *rxq;
695
696         if (queue->index >= sa->rxq_count)
697                 return -EINVAL;
698
699         rxq = sa->rxq_info[queue->index].rxq;
700         flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
701
702         return 0;
703 }
704
705 static int
706 sfc_flow_parse_actions(struct sfc_adapter *sa,
707                        const struct rte_flow_action actions[],
708                        struct rte_flow *flow,
709                        struct rte_flow_error *error)
710 {
711         int rc;
712         boolean_t is_specified = B_FALSE;
713
714         if (actions == NULL) {
715                 rte_flow_error_set(error, EINVAL,
716                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
717                                    "NULL actions");
718                 return -rte_errno;
719         }
720
721         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
722                 switch (actions->type) {
723                 case RTE_FLOW_ACTION_TYPE_VOID:
724                         break;
725
726                 case RTE_FLOW_ACTION_TYPE_QUEUE:
727                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
728                         if (rc != 0) {
729                                 rte_flow_error_set(error, EINVAL,
730                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
731                                         "Bad QUEUE action");
732                                 return -rte_errno;
733                         }
734
735                         is_specified = B_TRUE;
736                         break;
737
738                 default:
739                         rte_flow_error_set(error, ENOTSUP,
740                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
741                                            "Action is not supported");
742                         return -rte_errno;
743                 }
744         }
745
746         if (!is_specified) {
747                 rte_flow_error_set(error, EINVAL,
748                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
749                                    "Action is unspecified");
750                 return -rte_errno;
751         }
752
753         return 0;
754 }
755
756 static int
757 sfc_flow_parse(struct rte_eth_dev *dev,
758                const struct rte_flow_attr *attr,
759                const struct rte_flow_item pattern[],
760                const struct rte_flow_action actions[],
761                struct rte_flow *flow,
762                struct rte_flow_error *error)
763 {
764         struct sfc_adapter *sa = dev->data->dev_private;
765         int rc;
766
767         memset(&flow->spec, 0, sizeof(flow->spec));
768
769         rc = sfc_flow_parse_attr(attr, flow, error);
770         if (rc != 0)
771                 goto fail_bad_value;
772
773         rc = sfc_flow_parse_pattern(pattern, flow, error);
774         if (rc != 0)
775                 goto fail_bad_value;
776
777         rc = sfc_flow_parse_actions(sa, actions, flow, error);
778         if (rc != 0)
779                 goto fail_bad_value;
780
781         if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
782                 rte_flow_error_set(error, ENOTSUP,
783                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
784                                    "Flow rule pattern is not supported");
785                 return -rte_errno;
786         }
787
788 fail_bad_value:
789         return rc;
790 }
791
792 static int
793 sfc_flow_validate(struct rte_eth_dev *dev,
794                   const struct rte_flow_attr *attr,
795                   const struct rte_flow_item pattern[],
796                   const struct rte_flow_action actions[],
797                   struct rte_flow_error *error)
798 {
799         struct rte_flow flow;
800
801         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
802 }
803
804 static struct rte_flow *
805 sfc_flow_create(struct rte_eth_dev *dev,
806                 const struct rte_flow_attr *attr,
807                 const struct rte_flow_item pattern[],
808                 const struct rte_flow_action actions[],
809                 struct rte_flow_error *error)
810 {
811         struct sfc_adapter *sa = dev->data->dev_private;
812         struct rte_flow *flow = NULL;
813         int rc;
814
815         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
816         if (flow == NULL) {
817                 rte_flow_error_set(error, ENOMEM,
818                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
819                                    "Failed to allocate memory");
820                 goto fail_no_mem;
821         }
822
823         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
824         if (rc != 0)
825                 goto fail_bad_value;
826
827         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
828
829         sfc_adapter_lock(sa);
830
831         if (sa->state == SFC_ADAPTER_STARTED) {
832                 rc = efx_filter_insert(sa->nic, &flow->spec);
833                 if (rc != 0) {
834                         rte_flow_error_set(error, rc,
835                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
836                                 "Failed to insert filter");
837                         goto fail_filter_insert;
838                 }
839         }
840
841         sfc_adapter_unlock(sa);
842
843         return flow;
844
845 fail_filter_insert:
846         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
847
848 fail_bad_value:
849         rte_free(flow);
850         sfc_adapter_unlock(sa);
851
852 fail_no_mem:
853         return NULL;
854 }
855
856 static int
857 sfc_flow_remove(struct sfc_adapter *sa,
858                 struct rte_flow *flow,
859                 struct rte_flow_error *error)
860 {
861         int rc = 0;
862
863         SFC_ASSERT(sfc_adapter_is_locked(sa));
864
865         if (sa->state == SFC_ADAPTER_STARTED) {
866                 rc = efx_filter_remove(sa->nic, &flow->spec);
867                 if (rc != 0)
868                         rte_flow_error_set(error, rc,
869                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
870                                 "Failed to destroy flow rule");
871         }
872
873         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
874         rte_free(flow);
875
876         return rc;
877 }
878
879 static int
880 sfc_flow_destroy(struct rte_eth_dev *dev,
881                  struct rte_flow *flow,
882                  struct rte_flow_error *error)
883 {
884         struct sfc_adapter *sa = dev->data->dev_private;
885         struct rte_flow *flow_ptr;
886         int rc = EINVAL;
887
888         sfc_adapter_lock(sa);
889
890         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
891                 if (flow_ptr == flow)
892                         rc = 0;
893         }
894         if (rc != 0) {
895                 rte_flow_error_set(error, rc,
896                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
897                                    "Failed to find flow rule to destroy");
898                 goto fail_bad_value;
899         }
900
901         rc = sfc_flow_remove(sa, flow, error);
902
903 fail_bad_value:
904         sfc_adapter_unlock(sa);
905
906         return -rc;
907 }
908
909 static int
910 sfc_flow_flush(struct rte_eth_dev *dev,
911                struct rte_flow_error *error)
912 {
913         struct sfc_adapter *sa = dev->data->dev_private;
914         struct rte_flow *flow;
915         int rc = 0;
916         int ret = 0;
917
918         sfc_adapter_lock(sa);
919
920         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
921                 rc = sfc_flow_remove(sa, flow, error);
922                 if (rc != 0)
923                         ret = rc;
924         }
925
926         sfc_adapter_unlock(sa);
927
928         return -ret;
929 }
930
931 const struct rte_flow_ops sfc_flow_ops = {
932         .validate = sfc_flow_validate,
933         .create = sfc_flow_create,
934         .destroy = sfc_flow_destroy,
935         .flush = sfc_flow_flush,
936         .query = NULL,
937 };
938
939 void
940 sfc_flow_init(struct sfc_adapter *sa)
941 {
942         SFC_ASSERT(sfc_adapter_is_locked(sa));
943
944         TAILQ_INIT(&sa->filter.flow_list);
945 }
946
947 void
948 sfc_flow_fini(struct sfc_adapter *sa)
949 {
950         struct rte_flow *flow;
951
952         SFC_ASSERT(sfc_adapter_is_locked(sa));
953
954         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
955                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
956                 rte_free(flow);
957         }
958 }
959
960 void
961 sfc_flow_stop(struct sfc_adapter *sa)
962 {
963         struct rte_flow *flow;
964
965         SFC_ASSERT(sfc_adapter_is_locked(sa));
966
967         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
968                 efx_filter_remove(sa->nic, &flow->spec);
969 }
970
971 int
972 sfc_flow_start(struct sfc_adapter *sa)
973 {
974         struct rte_flow *flow;
975         int rc = 0;
976
977         sfc_log_init(sa, "entry");
978
979         SFC_ASSERT(sfc_adapter_is_locked(sa));
980
981         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
982                 rc = efx_filter_insert(sa->nic, &flow->spec);
983                 if (rc != 0)
984                         goto fail_bad_flow;
985         }
986
987         sfc_log_init(sa, "done");
988
989 fail_bad_flow:
990         return rc;
991 }