net/sfc: generalise flow parsing
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 #include "sfc_dp_rx.h"
26
27 struct sfc_flow_ops_by_spec {
28         sfc_flow_parse_cb_t     *parse;
29 };
30
31 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
32
33 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
34         .parse = sfc_flow_parse_rte_to_filter,
35 };
36
37 static const struct sfc_flow_ops_by_spec *
38 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
39 {
40         struct sfc_flow_spec *spec = &flow->spec;
41         const struct sfc_flow_ops_by_spec *ops = NULL;
42
43         switch (spec->type) {
44         case SFC_FLOW_SPEC_FILTER:
45                 ops = &sfc_flow_ops_filter;
46                 break;
47         default:
48                 SFC_ASSERT(false);
49                 break;
50         }
51
52         return ops;
53 }
54
55 /*
56  * Currently, filter-based (VNIC) flow API is implemented in such a manner
57  * that each flow rule is converted to one or more hardware filters.
58  * All elements of flow rule (attributes, pattern items, actions)
59  * correspond to one or more fields in the efx_filter_spec_s structure
60  * that is responsible for the hardware filter.
61  * If some required field is unset in the flow rule, then a handful
62  * of filter copies will be created to cover all possible values
63  * of such a field.
64  */
65
66 enum sfc_flow_item_layers {
67         SFC_FLOW_ITEM_ANY_LAYER,
68         SFC_FLOW_ITEM_START_LAYER,
69         SFC_FLOW_ITEM_L2,
70         SFC_FLOW_ITEM_L3,
71         SFC_FLOW_ITEM_L4,
72 };
73
74 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
75                                   efx_filter_spec_t *spec,
76                                   struct rte_flow_error *error);
77
78 struct sfc_flow_item {
79         enum rte_flow_item_type type;           /* Type of item */
80         enum sfc_flow_item_layers layer;        /* Layer of item */
81         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
82         sfc_flow_item_parse *parse;             /* Parsing function */
83 };
84
85 static sfc_flow_item_parse sfc_flow_parse_void;
86 static sfc_flow_item_parse sfc_flow_parse_eth;
87 static sfc_flow_item_parse sfc_flow_parse_vlan;
88 static sfc_flow_item_parse sfc_flow_parse_ipv4;
89 static sfc_flow_item_parse sfc_flow_parse_ipv6;
90 static sfc_flow_item_parse sfc_flow_parse_tcp;
91 static sfc_flow_item_parse sfc_flow_parse_udp;
92 static sfc_flow_item_parse sfc_flow_parse_vxlan;
93 static sfc_flow_item_parse sfc_flow_parse_geneve;
94 static sfc_flow_item_parse sfc_flow_parse_nvgre;
95
96 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
97                                      unsigned int filters_count_for_one_val,
98                                      struct rte_flow_error *error);
99
100 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
101                                         efx_filter_spec_t *spec,
102                                         struct sfc_filter *filter);
103
104 struct sfc_flow_copy_flag {
105         /* EFX filter specification match flag */
106         efx_filter_match_flags_t flag;
107         /* Number of values of corresponding field */
108         unsigned int vals_count;
109         /* Function to set values in specifications */
110         sfc_flow_spec_set_vals *set_vals;
111         /*
112          * Function to check that the specification is suitable
113          * for adding this match flag
114          */
115         sfc_flow_spec_check *spec_check;
116 };
117
118 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
119 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
120 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
121 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
122 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
123 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
124 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
125
126 static boolean_t
127 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
128 {
129         uint8_t sum = 0;
130         unsigned int i;
131
132         for (i = 0; i < size; i++)
133                 sum |= buf[i];
134
135         return (sum == 0) ? B_TRUE : B_FALSE;
136 }
137
138 /*
139  * Validate item and prepare structures spec and mask for parsing
140  */
141 static int
142 sfc_flow_parse_init(const struct rte_flow_item *item,
143                     const void **spec_ptr,
144                     const void **mask_ptr,
145                     const void *supp_mask,
146                     const void *def_mask,
147                     unsigned int size,
148                     struct rte_flow_error *error)
149 {
150         const uint8_t *spec;
151         const uint8_t *mask;
152         const uint8_t *last;
153         uint8_t supp;
154         unsigned int i;
155
156         if (item == NULL) {
157                 rte_flow_error_set(error, EINVAL,
158                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
159                                    "NULL item");
160                 return -rte_errno;
161         }
162
163         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
164                 rte_flow_error_set(error, EINVAL,
165                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
166                                    "Mask or last is set without spec");
167                 return -rte_errno;
168         }
169
170         /*
171          * If "mask" is not set, default mask is used,
172          * but if default mask is NULL, "mask" should be set
173          */
174         if (item->mask == NULL) {
175                 if (def_mask == NULL) {
176                         rte_flow_error_set(error, EINVAL,
177                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
178                                 "Mask should be specified");
179                         return -rte_errno;
180                 }
181
182                 mask = def_mask;
183         } else {
184                 mask = item->mask;
185         }
186
187         spec = item->spec;
188         last = item->last;
189
190         if (spec == NULL)
191                 goto exit;
192
193         /*
194          * If field values in "last" are either 0 or equal to the corresponding
195          * values in "spec" then they are ignored
196          */
197         if (last != NULL &&
198             !sfc_flow_is_zero(last, size) &&
199             memcmp(last, spec, size) != 0) {
200                 rte_flow_error_set(error, ENOTSUP,
201                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
202                                    "Ranging is not supported");
203                 return -rte_errno;
204         }
205
206         if (supp_mask == NULL) {
207                 rte_flow_error_set(error, EINVAL,
208                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
209                         "Supported mask for item should be specified");
210                 return -rte_errno;
211         }
212
213         /* Check that mask does not ask for more match than supp_mask */
214         for (i = 0; i < size; i++) {
215                 supp = ((const uint8_t *)supp_mask)[i];
216
217                 if (~supp & mask[i]) {
218                         rte_flow_error_set(error, ENOTSUP,
219                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
220                                            "Item's field is not supported");
221                         return -rte_errno;
222                 }
223         }
224
225 exit:
226         *spec_ptr = spec;
227         *mask_ptr = mask;
228         return 0;
229 }
230
231 /*
232  * Protocol parsers.
233  * Masking is not supported, so masks in items should be either
234  * full or empty (zeroed) and set only for supported fields which
235  * are specified in the supp_mask.
236  */
237
238 static int
239 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
240                     __rte_unused efx_filter_spec_t *efx_spec,
241                     __rte_unused struct rte_flow_error *error)
242 {
243         return 0;
244 }
245
246 /**
247  * Convert Ethernet item to EFX filter specification.
248  *
249  * @param item[in]
250  *   Item specification. Outer frame specification may only comprise
251  *   source/destination addresses and Ethertype field.
252  *   Inner frame specification may contain destination address only.
253  *   There is support for individual/group mask as well as for empty and full.
254  *   If the mask is NULL, default mask will be used. Ranging is not supported.
255  * @param efx_spec[in, out]
256  *   EFX filter specification to update.
257  * @param[out] error
258  *   Perform verbose error reporting if not NULL.
259  */
260 static int
261 sfc_flow_parse_eth(const struct rte_flow_item *item,
262                    efx_filter_spec_t *efx_spec,
263                    struct rte_flow_error *error)
264 {
265         int rc;
266         const struct rte_flow_item_eth *spec = NULL;
267         const struct rte_flow_item_eth *mask = NULL;
268         const struct rte_flow_item_eth supp_mask = {
269                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
270                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
271                 .type = 0xffff,
272         };
273         const struct rte_flow_item_eth ifrm_supp_mask = {
274                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
275         };
276         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
277                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
278         };
279         const struct rte_flow_item_eth *supp_mask_p;
280         const struct rte_flow_item_eth *def_mask_p;
281         uint8_t *loc_mac = NULL;
282         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
283                 EFX_TUNNEL_PROTOCOL_NONE);
284
285         if (is_ifrm) {
286                 supp_mask_p = &ifrm_supp_mask;
287                 def_mask_p = &ifrm_supp_mask;
288                 loc_mac = efx_spec->efs_ifrm_loc_mac;
289         } else {
290                 supp_mask_p = &supp_mask;
291                 def_mask_p = &rte_flow_item_eth_mask;
292                 loc_mac = efx_spec->efs_loc_mac;
293         }
294
295         rc = sfc_flow_parse_init(item,
296                                  (const void **)&spec,
297                                  (const void **)&mask,
298                                  supp_mask_p, def_mask_p,
299                                  sizeof(struct rte_flow_item_eth),
300                                  error);
301         if (rc != 0)
302                 return rc;
303
304         /* If "spec" is not set, could be any Ethernet */
305         if (spec == NULL)
306                 return 0;
307
308         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
309                 efx_spec->efs_match_flags |= is_ifrm ?
310                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
311                         EFX_FILTER_MATCH_LOC_MAC;
312                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
313                            EFX_MAC_ADDR_LEN);
314         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
315                           EFX_MAC_ADDR_LEN) == 0) {
316                 if (rte_is_unicast_ether_addr(&spec->dst))
317                         efx_spec->efs_match_flags |= is_ifrm ?
318                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
319                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
320                 else
321                         efx_spec->efs_match_flags |= is_ifrm ?
322                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
323                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
324         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
325                 goto fail_bad_mask;
326         }
327
328         /*
329          * ifrm_supp_mask ensures that the source address and
330          * ethertype masks are equal to zero in inner frame,
331          * so these fields are filled in only for the outer frame
332          */
333         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
334                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
335                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
336                            EFX_MAC_ADDR_LEN);
337         } else if (!rte_is_zero_ether_addr(&mask->src)) {
338                 goto fail_bad_mask;
339         }
340
341         /*
342          * Ether type is in big-endian byte order in item and
343          * in little-endian in efx_spec, so byte swap is used
344          */
345         if (mask->type == supp_mask.type) {
346                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
347                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
348         } else if (mask->type != 0) {
349                 goto fail_bad_mask;
350         }
351
352         return 0;
353
354 fail_bad_mask:
355         rte_flow_error_set(error, EINVAL,
356                            RTE_FLOW_ERROR_TYPE_ITEM, item,
357                            "Bad mask in the ETH pattern item");
358         return -rte_errno;
359 }
360
361 /**
362  * Convert VLAN item to EFX filter specification.
363  *
364  * @param item[in]
365  *   Item specification. Only VID field is supported.
366  *   The mask can not be NULL. Ranging is not supported.
367  * @param efx_spec[in, out]
368  *   EFX filter specification to update.
369  * @param[out] error
370  *   Perform verbose error reporting if not NULL.
371  */
372 static int
373 sfc_flow_parse_vlan(const struct rte_flow_item *item,
374                     efx_filter_spec_t *efx_spec,
375                     struct rte_flow_error *error)
376 {
377         int rc;
378         uint16_t vid;
379         const struct rte_flow_item_vlan *spec = NULL;
380         const struct rte_flow_item_vlan *mask = NULL;
381         const struct rte_flow_item_vlan supp_mask = {
382                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
383                 .inner_type = RTE_BE16(0xffff),
384         };
385
386         rc = sfc_flow_parse_init(item,
387                                  (const void **)&spec,
388                                  (const void **)&mask,
389                                  &supp_mask,
390                                  NULL,
391                                  sizeof(struct rte_flow_item_vlan),
392                                  error);
393         if (rc != 0)
394                 return rc;
395
396         /*
397          * VID is in big-endian byte order in item and
398          * in little-endian in efx_spec, so byte swap is used.
399          * If two VLAN items are included, the first matches
400          * the outer tag and the next matches the inner tag.
401          */
402         if (mask->tci == supp_mask.tci) {
403                 /* Apply mask to keep VID only */
404                 vid = rte_bswap16(spec->tci & mask->tci);
405
406                 if (!(efx_spec->efs_match_flags &
407                       EFX_FILTER_MATCH_OUTER_VID)) {
408                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
409                         efx_spec->efs_outer_vid = vid;
410                 } else if (!(efx_spec->efs_match_flags &
411                              EFX_FILTER_MATCH_INNER_VID)) {
412                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
413                         efx_spec->efs_inner_vid = vid;
414                 } else {
415                         rte_flow_error_set(error, EINVAL,
416                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
417                                            "More than two VLAN items");
418                         return -rte_errno;
419                 }
420         } else {
421                 rte_flow_error_set(error, EINVAL,
422                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
423                                    "VLAN ID in TCI match is required");
424                 return -rte_errno;
425         }
426
427         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
430                                    "VLAN TPID matching is not supported");
431                 return -rte_errno;
432         }
433         if (mask->inner_type == supp_mask.inner_type) {
434                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
435                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
436         } else if (mask->inner_type) {
437                 rte_flow_error_set(error, EINVAL,
438                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
439                                    "Bad mask for VLAN inner_type");
440                 return -rte_errno;
441         }
442
443         return 0;
444 }
445
446 /**
447  * Convert IPv4 item to EFX filter specification.
448  *
449  * @param item[in]
450  *   Item specification. Only source and destination addresses and
451  *   protocol fields are supported. If the mask is NULL, default
452  *   mask will be used. Ranging is not supported.
453  * @param efx_spec[in, out]
454  *   EFX filter specification to update.
455  * @param[out] error
456  *   Perform verbose error reporting if not NULL.
457  */
458 static int
459 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
460                     efx_filter_spec_t *efx_spec,
461                     struct rte_flow_error *error)
462 {
463         int rc;
464         const struct rte_flow_item_ipv4 *spec = NULL;
465         const struct rte_flow_item_ipv4 *mask = NULL;
466         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
467         const struct rte_flow_item_ipv4 supp_mask = {
468                 .hdr = {
469                         .src_addr = 0xffffffff,
470                         .dst_addr = 0xffffffff,
471                         .next_proto_id = 0xff,
472                 }
473         };
474
475         rc = sfc_flow_parse_init(item,
476                                  (const void **)&spec,
477                                  (const void **)&mask,
478                                  &supp_mask,
479                                  &rte_flow_item_ipv4_mask,
480                                  sizeof(struct rte_flow_item_ipv4),
481                                  error);
482         if (rc != 0)
483                 return rc;
484
485         /*
486          * Filtering by IPv4 source and destination addresses requires
487          * the appropriate ETHER_TYPE in hardware filters
488          */
489         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
490                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
491                 efx_spec->efs_ether_type = ether_type_ipv4;
492         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
493                 rte_flow_error_set(error, EINVAL,
494                         RTE_FLOW_ERROR_TYPE_ITEM, item,
495                         "Ethertype in pattern with IPV4 item should be appropriate");
496                 return -rte_errno;
497         }
498
499         if (spec == NULL)
500                 return 0;
501
502         /*
503          * IPv4 addresses are in big-endian byte order in item and in
504          * efx_spec
505          */
506         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
507                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
508                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
509         } else if (mask->hdr.src_addr != 0) {
510                 goto fail_bad_mask;
511         }
512
513         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
514                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
515                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
516         } else if (mask->hdr.dst_addr != 0) {
517                 goto fail_bad_mask;
518         }
519
520         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
521                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
522                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
523         } else if (mask->hdr.next_proto_id != 0) {
524                 goto fail_bad_mask;
525         }
526
527         return 0;
528
529 fail_bad_mask:
530         rte_flow_error_set(error, EINVAL,
531                            RTE_FLOW_ERROR_TYPE_ITEM, item,
532                            "Bad mask in the IPV4 pattern item");
533         return -rte_errno;
534 }
535
536 /**
537  * Convert IPv6 item to EFX filter specification.
538  *
539  * @param item[in]
540  *   Item specification. Only source and destination addresses and
541  *   next header fields are supported. If the mask is NULL, default
542  *   mask will be used. Ranging is not supported.
543  * @param efx_spec[in, out]
544  *   EFX filter specification to update.
545  * @param[out] error
546  *   Perform verbose error reporting if not NULL.
547  */
548 static int
549 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
550                     efx_filter_spec_t *efx_spec,
551                     struct rte_flow_error *error)
552 {
553         int rc;
554         const struct rte_flow_item_ipv6 *spec = NULL;
555         const struct rte_flow_item_ipv6 *mask = NULL;
556         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
557         const struct rte_flow_item_ipv6 supp_mask = {
558                 .hdr = {
559                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
560                                       0xff, 0xff, 0xff, 0xff,
561                                       0xff, 0xff, 0xff, 0xff,
562                                       0xff, 0xff, 0xff, 0xff },
563                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
564                                       0xff, 0xff, 0xff, 0xff,
565                                       0xff, 0xff, 0xff, 0xff,
566                                       0xff, 0xff, 0xff, 0xff },
567                         .proto = 0xff,
568                 }
569         };
570
571         rc = sfc_flow_parse_init(item,
572                                  (const void **)&spec,
573                                  (const void **)&mask,
574                                  &supp_mask,
575                                  &rte_flow_item_ipv6_mask,
576                                  sizeof(struct rte_flow_item_ipv6),
577                                  error);
578         if (rc != 0)
579                 return rc;
580
581         /*
582          * Filtering by IPv6 source and destination addresses requires
583          * the appropriate ETHER_TYPE in hardware filters
584          */
585         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
586                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
587                 efx_spec->efs_ether_type = ether_type_ipv6;
588         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
589                 rte_flow_error_set(error, EINVAL,
590                         RTE_FLOW_ERROR_TYPE_ITEM, item,
591                         "Ethertype in pattern with IPV6 item should be appropriate");
592                 return -rte_errno;
593         }
594
595         if (spec == NULL)
596                 return 0;
597
598         /*
599          * IPv6 addresses are in big-endian byte order in item and in
600          * efx_spec
601          */
602         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
603                    sizeof(mask->hdr.src_addr)) == 0) {
604                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
605
606                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
607                                  sizeof(spec->hdr.src_addr));
608                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
609                            sizeof(efx_spec->efs_rem_host));
610         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
611                                      sizeof(mask->hdr.src_addr))) {
612                 goto fail_bad_mask;
613         }
614
615         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
616                    sizeof(mask->hdr.dst_addr)) == 0) {
617                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
618
619                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
620                                  sizeof(spec->hdr.dst_addr));
621                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
622                            sizeof(efx_spec->efs_loc_host));
623         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
624                                      sizeof(mask->hdr.dst_addr))) {
625                 goto fail_bad_mask;
626         }
627
628         if (mask->hdr.proto == supp_mask.hdr.proto) {
629                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
630                 efx_spec->efs_ip_proto = spec->hdr.proto;
631         } else if (mask->hdr.proto != 0) {
632                 goto fail_bad_mask;
633         }
634
635         return 0;
636
637 fail_bad_mask:
638         rte_flow_error_set(error, EINVAL,
639                            RTE_FLOW_ERROR_TYPE_ITEM, item,
640                            "Bad mask in the IPV6 pattern item");
641         return -rte_errno;
642 }
643
644 /**
645  * Convert TCP item to EFX filter specification.
646  *
647  * @param item[in]
648  *   Item specification. Only source and destination ports fields
649  *   are supported. If the mask is NULL, default mask will be used.
650  *   Ranging is not supported.
651  * @param efx_spec[in, out]
652  *   EFX filter specification to update.
653  * @param[out] error
654  *   Perform verbose error reporting if not NULL.
655  */
656 static int
657 sfc_flow_parse_tcp(const struct rte_flow_item *item,
658                    efx_filter_spec_t *efx_spec,
659                    struct rte_flow_error *error)
660 {
661         int rc;
662         const struct rte_flow_item_tcp *spec = NULL;
663         const struct rte_flow_item_tcp *mask = NULL;
664         const struct rte_flow_item_tcp supp_mask = {
665                 .hdr = {
666                         .src_port = 0xffff,
667                         .dst_port = 0xffff,
668                 }
669         };
670
671         rc = sfc_flow_parse_init(item,
672                                  (const void **)&spec,
673                                  (const void **)&mask,
674                                  &supp_mask,
675                                  &rte_flow_item_tcp_mask,
676                                  sizeof(struct rte_flow_item_tcp),
677                                  error);
678         if (rc != 0)
679                 return rc;
680
681         /*
682          * Filtering by TCP source and destination ports requires
683          * the appropriate IP_PROTO in hardware filters
684          */
685         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
686                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
687                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
688         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
689                 rte_flow_error_set(error, EINVAL,
690                         RTE_FLOW_ERROR_TYPE_ITEM, item,
691                         "IP proto in pattern with TCP item should be appropriate");
692                 return -rte_errno;
693         }
694
695         if (spec == NULL)
696                 return 0;
697
698         /*
699          * Source and destination ports are in big-endian byte order in item and
700          * in little-endian in efx_spec, so byte swap is used
701          */
702         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
703                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
704                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
705         } else if (mask->hdr.src_port != 0) {
706                 goto fail_bad_mask;
707         }
708
709         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
710                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
711                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
712         } else if (mask->hdr.dst_port != 0) {
713                 goto fail_bad_mask;
714         }
715
716         return 0;
717
718 fail_bad_mask:
719         rte_flow_error_set(error, EINVAL,
720                            RTE_FLOW_ERROR_TYPE_ITEM, item,
721                            "Bad mask in the TCP pattern item");
722         return -rte_errno;
723 }
724
725 /**
726  * Convert UDP item to EFX filter specification.
727  *
728  * @param item[in]
729  *   Item specification. Only source and destination ports fields
730  *   are supported. If the mask is NULL, default mask will be used.
731  *   Ranging is not supported.
732  * @param efx_spec[in, out]
733  *   EFX filter specification to update.
734  * @param[out] error
735  *   Perform verbose error reporting if not NULL.
736  */
737 static int
738 sfc_flow_parse_udp(const struct rte_flow_item *item,
739                    efx_filter_spec_t *efx_spec,
740                    struct rte_flow_error *error)
741 {
742         int rc;
743         const struct rte_flow_item_udp *spec = NULL;
744         const struct rte_flow_item_udp *mask = NULL;
745         const struct rte_flow_item_udp supp_mask = {
746                 .hdr = {
747                         .src_port = 0xffff,
748                         .dst_port = 0xffff,
749                 }
750         };
751
752         rc = sfc_flow_parse_init(item,
753                                  (const void **)&spec,
754                                  (const void **)&mask,
755                                  &supp_mask,
756                                  &rte_flow_item_udp_mask,
757                                  sizeof(struct rte_flow_item_udp),
758                                  error);
759         if (rc != 0)
760                 return rc;
761
762         /*
763          * Filtering by UDP source and destination ports requires
764          * the appropriate IP_PROTO in hardware filters
765          */
766         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
767                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
768                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
769         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
770                 rte_flow_error_set(error, EINVAL,
771                         RTE_FLOW_ERROR_TYPE_ITEM, item,
772                         "IP proto in pattern with UDP item should be appropriate");
773                 return -rte_errno;
774         }
775
776         if (spec == NULL)
777                 return 0;
778
779         /*
780          * Source and destination ports are in big-endian byte order in item and
781          * in little-endian in efx_spec, so byte swap is used
782          */
783         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
784                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
785                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
786         } else if (mask->hdr.src_port != 0) {
787                 goto fail_bad_mask;
788         }
789
790         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
791                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
792                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
793         } else if (mask->hdr.dst_port != 0) {
794                 goto fail_bad_mask;
795         }
796
797         return 0;
798
799 fail_bad_mask:
800         rte_flow_error_set(error, EINVAL,
801                            RTE_FLOW_ERROR_TYPE_ITEM, item,
802                            "Bad mask in the UDP pattern item");
803         return -rte_errno;
804 }
805
806 /*
807  * Filters for encapsulated packets match based on the EtherType and IP
808  * protocol in the outer frame.
809  */
810 static int
811 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
812                                         efx_filter_spec_t *efx_spec,
813                                         uint8_t ip_proto,
814                                         struct rte_flow_error *error)
815 {
816         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
817                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
818                 efx_spec->efs_ip_proto = ip_proto;
819         } else if (efx_spec->efs_ip_proto != ip_proto) {
820                 switch (ip_proto) {
821                 case EFX_IPPROTO_UDP:
822                         rte_flow_error_set(error, EINVAL,
823                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
824                                 "Outer IP header protocol must be UDP "
825                                 "in VxLAN/GENEVE pattern");
826                         return -rte_errno;
827
828                 case EFX_IPPROTO_GRE:
829                         rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
831                                 "Outer IP header protocol must be GRE "
832                                 "in NVGRE pattern");
833                         return -rte_errno;
834
835                 default:
836                         rte_flow_error_set(error, EINVAL,
837                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
838                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
839                                 "are supported");
840                         return -rte_errno;
841                 }
842         }
843
844         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
845             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
846             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
847                 rte_flow_error_set(error, EINVAL,
848                         RTE_FLOW_ERROR_TYPE_ITEM, item,
849                         "Outer frame EtherType in pattern with tunneling "
850                         "must be IPv4 or IPv6");
851                 return -rte_errno;
852         }
853
854         return 0;
855 }
856
857 static int
858 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
859                                   const uint8_t *vni_or_vsid_val,
860                                   const uint8_t *vni_or_vsid_mask,
861                                   const struct rte_flow_item *item,
862                                   struct rte_flow_error *error)
863 {
864         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
865                 0xff, 0xff, 0xff
866         };
867
868         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
869                    EFX_VNI_OR_VSID_LEN) == 0) {
870                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
871                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
872                            EFX_VNI_OR_VSID_LEN);
873         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
874                 rte_flow_error_set(error, EINVAL,
875                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
876                                    "Unsupported VNI/VSID mask");
877                 return -rte_errno;
878         }
879
880         return 0;
881 }
882
883 /**
884  * Convert VXLAN item to EFX filter specification.
885  *
886  * @param item[in]
887  *   Item specification. Only VXLAN network identifier field is supported.
888  *   If the mask is NULL, default mask will be used.
889  *   Ranging is not supported.
890  * @param efx_spec[in, out]
891  *   EFX filter specification to update.
892  * @param[out] error
893  *   Perform verbose error reporting if not NULL.
894  */
895 static int
896 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
897                      efx_filter_spec_t *efx_spec,
898                      struct rte_flow_error *error)
899 {
900         int rc;
901         const struct rte_flow_item_vxlan *spec = NULL;
902         const struct rte_flow_item_vxlan *mask = NULL;
903         const struct rte_flow_item_vxlan supp_mask = {
904                 .vni = { 0xff, 0xff, 0xff }
905         };
906
907         rc = sfc_flow_parse_init(item,
908                                  (const void **)&spec,
909                                  (const void **)&mask,
910                                  &supp_mask,
911                                  &rte_flow_item_vxlan_mask,
912                                  sizeof(struct rte_flow_item_vxlan),
913                                  error);
914         if (rc != 0)
915                 return rc;
916
917         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
918                                                      EFX_IPPROTO_UDP, error);
919         if (rc != 0)
920                 return rc;
921
922         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
923         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
924
925         if (spec == NULL)
926                 return 0;
927
928         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
929                                                mask->vni, item, error);
930
931         return rc;
932 }
933
934 /**
935  * Convert GENEVE item to EFX filter specification.
936  *
937  * @param item[in]
938  *   Item specification. Only Virtual Network Identifier and protocol type
939  *   fields are supported. But protocol type can be only Ethernet (0x6558).
940  *   If the mask is NULL, default mask will be used.
941  *   Ranging is not supported.
942  * @param efx_spec[in, out]
943  *   EFX filter specification to update.
944  * @param[out] error
945  *   Perform verbose error reporting if not NULL.
946  */
947 static int
948 sfc_flow_parse_geneve(const struct rte_flow_item *item,
949                       efx_filter_spec_t *efx_spec,
950                       struct rte_flow_error *error)
951 {
952         int rc;
953         const struct rte_flow_item_geneve *spec = NULL;
954         const struct rte_flow_item_geneve *mask = NULL;
955         const struct rte_flow_item_geneve supp_mask = {
956                 .protocol = RTE_BE16(0xffff),
957                 .vni = { 0xff, 0xff, 0xff }
958         };
959
960         rc = sfc_flow_parse_init(item,
961                                  (const void **)&spec,
962                                  (const void **)&mask,
963                                  &supp_mask,
964                                  &rte_flow_item_geneve_mask,
965                                  sizeof(struct rte_flow_item_geneve),
966                                  error);
967         if (rc != 0)
968                 return rc;
969
970         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
971                                                      EFX_IPPROTO_UDP, error);
972         if (rc != 0)
973                 return rc;
974
975         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
976         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
977
978         if (spec == NULL)
979                 return 0;
980
981         if (mask->protocol == supp_mask.protocol) {
982                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
983                         rte_flow_error_set(error, EINVAL,
984                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
985                                 "GENEVE encap. protocol must be Ethernet "
986                                 "(0x6558) in the GENEVE pattern item");
987                         return -rte_errno;
988                 }
989         } else if (mask->protocol != 0) {
990                 rte_flow_error_set(error, EINVAL,
991                         RTE_FLOW_ERROR_TYPE_ITEM, item,
992                         "Unsupported mask for GENEVE encap. protocol");
993                 return -rte_errno;
994         }
995
996         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
997                                                mask->vni, item, error);
998
999         return rc;
1000 }
1001
1002 /**
1003  * Convert NVGRE item to EFX filter specification.
1004  *
1005  * @param item[in]
1006  *   Item specification. Only virtual subnet ID field is supported.
1007  *   If the mask is NULL, default mask will be used.
1008  *   Ranging is not supported.
1009  * @param efx_spec[in, out]
1010  *   EFX filter specification to update.
1011  * @param[out] error
1012  *   Perform verbose error reporting if not NULL.
1013  */
1014 static int
1015 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1016                      efx_filter_spec_t *efx_spec,
1017                      struct rte_flow_error *error)
1018 {
1019         int rc;
1020         const struct rte_flow_item_nvgre *spec = NULL;
1021         const struct rte_flow_item_nvgre *mask = NULL;
1022         const struct rte_flow_item_nvgre supp_mask = {
1023                 .tni = { 0xff, 0xff, 0xff }
1024         };
1025
1026         rc = sfc_flow_parse_init(item,
1027                                  (const void **)&spec,
1028                                  (const void **)&mask,
1029                                  &supp_mask,
1030                                  &rte_flow_item_nvgre_mask,
1031                                  sizeof(struct rte_flow_item_nvgre),
1032                                  error);
1033         if (rc != 0)
1034                 return rc;
1035
1036         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1037                                                      EFX_IPPROTO_GRE, error);
1038         if (rc != 0)
1039                 return rc;
1040
1041         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1042         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1043
1044         if (spec == NULL)
1045                 return 0;
1046
1047         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1048                                                mask->tni, item, error);
1049
1050         return rc;
1051 }
1052
1053 static const struct sfc_flow_item sfc_flow_items[] = {
1054         {
1055                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1056                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1057                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1058                 .parse = sfc_flow_parse_void,
1059         },
1060         {
1061                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1062                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1063                 .layer = SFC_FLOW_ITEM_L2,
1064                 .parse = sfc_flow_parse_eth,
1065         },
1066         {
1067                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1068                 .prev_layer = SFC_FLOW_ITEM_L2,
1069                 .layer = SFC_FLOW_ITEM_L2,
1070                 .parse = sfc_flow_parse_vlan,
1071         },
1072         {
1073                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1074                 .prev_layer = SFC_FLOW_ITEM_L2,
1075                 .layer = SFC_FLOW_ITEM_L3,
1076                 .parse = sfc_flow_parse_ipv4,
1077         },
1078         {
1079                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1080                 .prev_layer = SFC_FLOW_ITEM_L2,
1081                 .layer = SFC_FLOW_ITEM_L3,
1082                 .parse = sfc_flow_parse_ipv6,
1083         },
1084         {
1085                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1086                 .prev_layer = SFC_FLOW_ITEM_L3,
1087                 .layer = SFC_FLOW_ITEM_L4,
1088                 .parse = sfc_flow_parse_tcp,
1089         },
1090         {
1091                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1092                 .prev_layer = SFC_FLOW_ITEM_L3,
1093                 .layer = SFC_FLOW_ITEM_L4,
1094                 .parse = sfc_flow_parse_udp,
1095         },
1096         {
1097                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1098                 .prev_layer = SFC_FLOW_ITEM_L4,
1099                 .layer = SFC_FLOW_ITEM_START_LAYER,
1100                 .parse = sfc_flow_parse_vxlan,
1101         },
1102         {
1103                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1104                 .prev_layer = SFC_FLOW_ITEM_L4,
1105                 .layer = SFC_FLOW_ITEM_START_LAYER,
1106                 .parse = sfc_flow_parse_geneve,
1107         },
1108         {
1109                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1110                 .prev_layer = SFC_FLOW_ITEM_L3,
1111                 .layer = SFC_FLOW_ITEM_START_LAYER,
1112                 .parse = sfc_flow_parse_nvgre,
1113         },
1114 };
1115
1116 /*
1117  * Protocol-independent flow API support
1118  */
1119 static int
1120 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1121                     struct rte_flow *flow,
1122                     struct rte_flow_error *error)
1123 {
1124         struct sfc_flow_spec *spec = &flow->spec;
1125         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1126
1127         if (attr == NULL) {
1128                 rte_flow_error_set(error, EINVAL,
1129                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1130                                    "NULL attribute");
1131                 return -rte_errno;
1132         }
1133         if (attr->group != 0) {
1134                 rte_flow_error_set(error, ENOTSUP,
1135                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1136                                    "Groups are not supported");
1137                 return -rte_errno;
1138         }
1139         if (attr->egress != 0) {
1140                 rte_flow_error_set(error, ENOTSUP,
1141                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1142                                    "Egress is not supported");
1143                 return -rte_errno;
1144         }
1145         if (attr->ingress == 0) {
1146                 rte_flow_error_set(error, ENOTSUP,
1147                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1148                                    "Ingress is compulsory");
1149                 return -rte_errno;
1150         }
1151         if (attr->transfer == 0) {
1152                 if (attr->priority != 0) {
1153                         rte_flow_error_set(error, ENOTSUP,
1154                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1155                                            attr, "Priorities are unsupported");
1156                         return -rte_errno;
1157                 }
1158                 spec->type = SFC_FLOW_SPEC_FILTER;
1159                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1160                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1161         } else {
1162                 rte_flow_error_set(error, ENOTSUP,
1163                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1164                                    "Transfer is not supported");
1165                 return -rte_errno;
1166         }
1167
1168         return 0;
1169 }
1170
1171 /* Get item from array sfc_flow_items */
1172 static const struct sfc_flow_item *
1173 sfc_flow_get_item(enum rte_flow_item_type type)
1174 {
1175         unsigned int i;
1176
1177         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1178                 if (sfc_flow_items[i].type == type)
1179                         return &sfc_flow_items[i];
1180
1181         return NULL;
1182 }
1183
1184 static int
1185 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1186                        struct rte_flow *flow,
1187                        struct rte_flow_error *error)
1188 {
1189         int rc;
1190         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1191         boolean_t is_ifrm = B_FALSE;
1192         const struct sfc_flow_item *item;
1193         struct sfc_flow_spec *spec = &flow->spec;
1194         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1195
1196         if (pattern == NULL) {
1197                 rte_flow_error_set(error, EINVAL,
1198                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1199                                    "NULL pattern");
1200                 return -rte_errno;
1201         }
1202
1203         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1204                 item = sfc_flow_get_item(pattern->type);
1205                 if (item == NULL) {
1206                         rte_flow_error_set(error, ENOTSUP,
1207                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1208                                            "Unsupported pattern item");
1209                         return -rte_errno;
1210                 }
1211
1212                 /*
1213                  * Omitting one or several protocol layers at the beginning
1214                  * of pattern is supported
1215                  */
1216                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1217                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1218                     item->prev_layer != prev_layer) {
1219                         rte_flow_error_set(error, ENOTSUP,
1220                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1221                                            "Unexpected sequence of pattern items");
1222                         return -rte_errno;
1223                 }
1224
1225                 /*
1226                  * Allow only VOID and ETH pattern items in the inner frame.
1227                  * Also check that there is only one tunneling protocol.
1228                  */
1229                 switch (item->type) {
1230                 case RTE_FLOW_ITEM_TYPE_VOID:
1231                 case RTE_FLOW_ITEM_TYPE_ETH:
1232                         break;
1233
1234                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1235                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1236                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1237                         if (is_ifrm) {
1238                                 rte_flow_error_set(error, EINVAL,
1239                                         RTE_FLOW_ERROR_TYPE_ITEM,
1240                                         pattern,
1241                                         "More than one tunneling protocol");
1242                                 return -rte_errno;
1243                         }
1244                         is_ifrm = B_TRUE;
1245                         break;
1246
1247                 default:
1248                         if (is_ifrm) {
1249                                 rte_flow_error_set(error, EINVAL,
1250                                         RTE_FLOW_ERROR_TYPE_ITEM,
1251                                         pattern,
1252                                         "There is an unsupported pattern item "
1253                                         "in the inner frame");
1254                                 return -rte_errno;
1255                         }
1256                         break;
1257                 }
1258
1259                 rc = item->parse(pattern, &spec_filter->template, error);
1260                 if (rc != 0)
1261                         return rc;
1262
1263                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1264                         prev_layer = item->layer;
1265         }
1266
1267         return 0;
1268 }
1269
1270 static int
1271 sfc_flow_parse_queue(struct sfc_adapter *sa,
1272                      const struct rte_flow_action_queue *queue,
1273                      struct rte_flow *flow)
1274 {
1275         struct sfc_flow_spec *spec = &flow->spec;
1276         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1277         struct sfc_rxq *rxq;
1278
1279         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1280                 return -EINVAL;
1281
1282         rxq = &sa->rxq_ctrl[queue->index];
1283         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1284
1285         return 0;
1286 }
1287
1288 static int
1289 sfc_flow_parse_rss(struct sfc_adapter *sa,
1290                    const struct rte_flow_action_rss *action_rss,
1291                    struct rte_flow *flow)
1292 {
1293         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1294         struct sfc_rss *rss = &sas->rss;
1295         unsigned int rxq_sw_index;
1296         struct sfc_rxq *rxq;
1297         unsigned int rxq_hw_index_min;
1298         unsigned int rxq_hw_index_max;
1299         efx_rx_hash_type_t efx_hash_types;
1300         const uint8_t *rss_key;
1301         struct sfc_flow_spec *spec = &flow->spec;
1302         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1303         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1304         unsigned int i;
1305
1306         if (action_rss->queue_num == 0)
1307                 return -EINVAL;
1308
1309         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1310         rxq = &sa->rxq_ctrl[rxq_sw_index];
1311         rxq_hw_index_min = rxq->hw_index;
1312         rxq_hw_index_max = 0;
1313
1314         for (i = 0; i < action_rss->queue_num; ++i) {
1315                 rxq_sw_index = action_rss->queue[i];
1316
1317                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1318                         return -EINVAL;
1319
1320                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1321
1322                 if (rxq->hw_index < rxq_hw_index_min)
1323                         rxq_hw_index_min = rxq->hw_index;
1324
1325                 if (rxq->hw_index > rxq_hw_index_max)
1326                         rxq_hw_index_max = rxq->hw_index;
1327         }
1328
1329         switch (action_rss->func) {
1330         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1331         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1332                 break;
1333         default:
1334                 return -EINVAL;
1335         }
1336
1337         if (action_rss->level)
1338                 return -EINVAL;
1339
1340         /*
1341          * Dummy RSS action with only one queue and no specific settings
1342          * for hash types and key does not require dedicated RSS context
1343          * and may be simplified to single queue action.
1344          */
1345         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1346             action_rss->key_len == 0) {
1347                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1348                 return 0;
1349         }
1350
1351         if (action_rss->types) {
1352                 int rc;
1353
1354                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1355                                           &efx_hash_types);
1356                 if (rc != 0)
1357                         return -rc;
1358         } else {
1359                 unsigned int i;
1360
1361                 efx_hash_types = 0;
1362                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1363                         efx_hash_types |= rss->hf_map[i].efx;
1364         }
1365
1366         if (action_rss->key_len) {
1367                 if (action_rss->key_len != sizeof(rss->key))
1368                         return -EINVAL;
1369
1370                 rss_key = action_rss->key;
1371         } else {
1372                 rss_key = rss->key;
1373         }
1374
1375         spec_filter->rss = B_TRUE;
1376
1377         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1378         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1379         sfc_rss_conf->rss_hash_types = efx_hash_types;
1380         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1381
1382         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1383                 unsigned int nb_queues = action_rss->queue_num;
1384                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1385                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1386
1387                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1395                     unsigned int filters_count)
1396 {
1397         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1398         unsigned int i;
1399         int ret = 0;
1400
1401         for (i = 0; i < filters_count; i++) {
1402                 int rc;
1403
1404                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1405                 if (ret == 0 && rc != 0) {
1406                         sfc_err(sa, "failed to remove filter specification "
1407                                 "(rc = %d)", rc);
1408                         ret = rc;
1409                 }
1410         }
1411
1412         return ret;
1413 }
1414
1415 static int
1416 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1417 {
1418         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1419         unsigned int i;
1420         int rc = 0;
1421
1422         for (i = 0; i < spec_filter->count; i++) {
1423                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1424                 if (rc != 0) {
1425                         sfc_flow_spec_flush(sa, spec, i);
1426                         break;
1427                 }
1428         }
1429
1430         return rc;
1431 }
1432
1433 static int
1434 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1435 {
1436         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1437
1438         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1439 }
1440
1441 static int
1442 sfc_flow_filter_insert(struct sfc_adapter *sa,
1443                        struct rte_flow *flow)
1444 {
1445         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1446         struct sfc_rss *rss = &sas->rss;
1447         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1448         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1449         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1450         unsigned int i;
1451         int rc = 0;
1452
1453         if (spec_filter->rss) {
1454                 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1455                                               flow_rss->rxq_hw_index_min + 1,
1456                                               EFX_MAXRSS);
1457
1458                 rc = efx_rx_scale_context_alloc(sa->nic,
1459                                                 EFX_RX_SCALE_EXCLUSIVE,
1460                                                 rss_spread,
1461                                                 &efs_rss_context);
1462                 if (rc != 0)
1463                         goto fail_scale_context_alloc;
1464
1465                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1466                                            rss->hash_alg,
1467                                            flow_rss->rss_hash_types, B_TRUE);
1468                 if (rc != 0)
1469                         goto fail_scale_mode_set;
1470
1471                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1472                                           flow_rss->rss_key,
1473                                           sizeof(rss->key));
1474                 if (rc != 0)
1475                         goto fail_scale_key_set;
1476
1477                 /*
1478                  * At this point, fully elaborated filter specifications
1479                  * have been produced from the template. To make sure that
1480                  * RSS behaviour is consistent between them, set the same
1481                  * RSS context value everywhere.
1482                  */
1483                 for (i = 0; i < spec_filter->count; i++) {
1484                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1485
1486                         spec->efs_rss_context = efs_rss_context;
1487                         spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1488                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1489                 }
1490         }
1491
1492         rc = sfc_flow_spec_insert(sa, &flow->spec);
1493         if (rc != 0)
1494                 goto fail_filter_insert;
1495
1496         if (spec_filter->rss) {
1497                 /*
1498                  * Scale table is set after filter insertion because
1499                  * the table entries are relative to the base RxQ ID
1500                  * and the latter is submitted to the HW by means of
1501                  * inserting a filter, so by the time of the request
1502                  * the HW knows all the information needed to verify
1503                  * the table entries, and the operation will succeed
1504                  */
1505                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1506                                           flow_rss->rss_tbl,
1507                                           RTE_DIM(flow_rss->rss_tbl));
1508                 if (rc != 0)
1509                         goto fail_scale_tbl_set;
1510         }
1511
1512         return 0;
1513
1514 fail_scale_tbl_set:
1515         sfc_flow_spec_remove(sa, &flow->spec);
1516
1517 fail_filter_insert:
1518 fail_scale_key_set:
1519 fail_scale_mode_set:
1520         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1521                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1522
1523 fail_scale_context_alloc:
1524         return rc;
1525 }
1526
1527 static int
1528 sfc_flow_filter_remove(struct sfc_adapter *sa,
1529                        struct rte_flow *flow)
1530 {
1531         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1532         int rc = 0;
1533
1534         rc = sfc_flow_spec_remove(sa, &flow->spec);
1535         if (rc != 0)
1536                 return rc;
1537
1538         if (spec_filter->rss) {
1539                 /*
1540                  * All specifications for a given flow rule have the same RSS
1541                  * context, so that RSS context value is taken from the first
1542                  * filter specification
1543                  */
1544                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1545
1546                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1547         }
1548
1549         return rc;
1550 }
1551
1552 static int
1553 sfc_flow_parse_mark(struct sfc_adapter *sa,
1554                     const struct rte_flow_action_mark *mark,
1555                     struct rte_flow *flow)
1556 {
1557         struct sfc_flow_spec *spec = &flow->spec;
1558         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1559         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1560
1561         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1562                 return EINVAL;
1563
1564         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1565         spec_filter->template.efs_mark = mark->id;
1566
1567         return 0;
1568 }
1569
1570 static int
1571 sfc_flow_parse_actions(struct sfc_adapter *sa,
1572                        const struct rte_flow_action actions[],
1573                        struct rte_flow *flow,
1574                        struct rte_flow_error *error)
1575 {
1576         int rc;
1577         struct sfc_flow_spec *spec = &flow->spec;
1578         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1579         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1580         uint32_t actions_set = 0;
1581         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1582                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1583                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1584         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1585                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1586
1587         if (actions == NULL) {
1588                 rte_flow_error_set(error, EINVAL,
1589                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1590                                    "NULL actions");
1591                 return -rte_errno;
1592         }
1593
1594 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1595         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1596
1597         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1598                 switch (actions->type) {
1599                 case RTE_FLOW_ACTION_TYPE_VOID:
1600                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1601                                                actions_set);
1602                         break;
1603
1604                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1605                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1606                                                actions_set);
1607                         if ((actions_set & fate_actions_mask) != 0)
1608                                 goto fail_fate_actions;
1609
1610                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1611                         if (rc != 0) {
1612                                 rte_flow_error_set(error, EINVAL,
1613                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1614                                         "Bad QUEUE action");
1615                                 return -rte_errno;
1616                         }
1617                         break;
1618
1619                 case RTE_FLOW_ACTION_TYPE_RSS:
1620                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1621                                                actions_set);
1622                         if ((actions_set & fate_actions_mask) != 0)
1623                                 goto fail_fate_actions;
1624
1625                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1626                         if (rc != 0) {
1627                                 rte_flow_error_set(error, -rc,
1628                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1629                                         "Bad RSS action");
1630                                 return -rte_errno;
1631                         }
1632                         break;
1633
1634                 case RTE_FLOW_ACTION_TYPE_DROP:
1635                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1636                                                actions_set);
1637                         if ((actions_set & fate_actions_mask) != 0)
1638                                 goto fail_fate_actions;
1639
1640                         spec_filter->template.efs_dmaq_id =
1641                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1642                         break;
1643
1644                 case RTE_FLOW_ACTION_TYPE_FLAG:
1645                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1646                                                actions_set);
1647                         if ((actions_set & mark_actions_mask) != 0)
1648                                 goto fail_actions_overlap;
1649
1650                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1651                                 rte_flow_error_set(error, ENOTSUP,
1652                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1653                                         "FLAG action is not supported on the current Rx datapath");
1654                                 return -rte_errno;
1655                         }
1656
1657                         spec_filter->template.efs_flags |=
1658                                 EFX_FILTER_FLAG_ACTION_FLAG;
1659                         break;
1660
1661                 case RTE_FLOW_ACTION_TYPE_MARK:
1662                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1663                                                actions_set);
1664                         if ((actions_set & mark_actions_mask) != 0)
1665                                 goto fail_actions_overlap;
1666
1667                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1668                                 rte_flow_error_set(error, ENOTSUP,
1669                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1670                                         "MARK action is not supported on the current Rx datapath");
1671                                 return -rte_errno;
1672                         }
1673
1674                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1675                         if (rc != 0) {
1676                                 rte_flow_error_set(error, rc,
1677                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1678                                         "Bad MARK action");
1679                                 return -rte_errno;
1680                         }
1681                         break;
1682
1683                 default:
1684                         rte_flow_error_set(error, ENOTSUP,
1685                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1686                                            "Action is not supported");
1687                         return -rte_errno;
1688                 }
1689
1690                 actions_set |= (1UL << actions->type);
1691         }
1692 #undef SFC_BUILD_SET_OVERFLOW
1693
1694         /* When fate is unknown, drop traffic. */
1695         if ((actions_set & fate_actions_mask) == 0) {
1696                 spec_filter->template.efs_dmaq_id =
1697                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1698         }
1699
1700         return 0;
1701
1702 fail_fate_actions:
1703         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1704                            "Cannot combine several fate-deciding actions, "
1705                            "choose between QUEUE, RSS or DROP");
1706         return -rte_errno;
1707
1708 fail_actions_overlap:
1709         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1710                            "Overlapping actions are not supported");
1711         return -rte_errno;
1712 }
1713
1714 /**
1715  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1716  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1717  * specifications after copying.
1718  *
1719  * @param spec[in, out]
1720  *   SFC flow specification to update.
1721  * @param filters_count_for_one_val[in]
1722  *   How many specifications should have the same match flag, what is the
1723  *   number of specifications before copying.
1724  * @param error[out]
1725  *   Perform verbose error reporting if not NULL.
1726  */
1727 static int
1728 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1729                                unsigned int filters_count_for_one_val,
1730                                struct rte_flow_error *error)
1731 {
1732         unsigned int i;
1733         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1734         static const efx_filter_match_flags_t vals[] = {
1735                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1736                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1737         };
1738
1739         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1740                 rte_flow_error_set(error, EINVAL,
1741                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1742                         "Number of specifications is incorrect while copying "
1743                         "by unknown destination flags");
1744                 return -rte_errno;
1745         }
1746
1747         for (i = 0; i < spec_filter->count; i++) {
1748                 /* The check above ensures that divisor can't be zero here */
1749                 spec_filter->filters[i].efs_match_flags |=
1750                         vals[i / filters_count_for_one_val];
1751         }
1752
1753         return 0;
1754 }
1755
1756 /**
1757  * Check that the following conditions are met:
1758  * - the list of supported filters has a filter
1759  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1760  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1761  *   be inserted.
1762  *
1763  * @param match[in]
1764  *   The match flags of filter.
1765  * @param spec[in]
1766  *   Specification to be supplemented.
1767  * @param filter[in]
1768  *   SFC filter with list of supported filters.
1769  */
1770 static boolean_t
1771 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1772                                  __rte_unused efx_filter_spec_t *spec,
1773                                  struct sfc_filter *filter)
1774 {
1775         unsigned int i;
1776         efx_filter_match_flags_t match_mcast_dst;
1777
1778         match_mcast_dst =
1779                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1780                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1781         for (i = 0; i < filter->supported_match_num; i++) {
1782                 if (match_mcast_dst == filter->supported_match[i])
1783                         return B_TRUE;
1784         }
1785
1786         return B_FALSE;
1787 }
1788
1789 /**
1790  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1791  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1792  * specifications after copying.
1793  *
1794  * @param spec[in, out]
1795  *   SFC flow specification to update.
1796  * @param filters_count_for_one_val[in]
1797  *   How many specifications should have the same EtherType value, what is the
1798  *   number of specifications before copying.
1799  * @param error[out]
1800  *   Perform verbose error reporting if not NULL.
1801  */
1802 static int
1803 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1804                         unsigned int filters_count_for_one_val,
1805                         struct rte_flow_error *error)
1806 {
1807         unsigned int i;
1808         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1809         static const uint16_t vals[] = {
1810                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1811         };
1812
1813         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1814                 rte_flow_error_set(error, EINVAL,
1815                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1816                         "Number of specifications is incorrect "
1817                         "while copying by Ethertype");
1818                 return -rte_errno;
1819         }
1820
1821         for (i = 0; i < spec_filter->count; i++) {
1822                 spec_filter->filters[i].efs_match_flags |=
1823                         EFX_FILTER_MATCH_ETHER_TYPE;
1824
1825                 /*
1826                  * The check above ensures that
1827                  * filters_count_for_one_val is not 0
1828                  */
1829                 spec_filter->filters[i].efs_ether_type =
1830                         vals[i / filters_count_for_one_val];
1831         }
1832
1833         return 0;
1834 }
1835
1836 /**
1837  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1838  * in the same specifications after copying.
1839  *
1840  * @param spec[in, out]
1841  *   SFC flow specification to update.
1842  * @param filters_count_for_one_val[in]
1843  *   How many specifications should have the same match flag, what is the
1844  *   number of specifications before copying.
1845  * @param error[out]
1846  *   Perform verbose error reporting if not NULL.
1847  */
1848 static int
1849 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1850                             unsigned int filters_count_for_one_val,
1851                             struct rte_flow_error *error)
1852 {
1853         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1854         unsigned int i;
1855
1856         if (filters_count_for_one_val != spec_filter->count) {
1857                 rte_flow_error_set(error, EINVAL,
1858                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1859                         "Number of specifications is incorrect "
1860                         "while copying by outer VLAN ID");
1861                 return -rte_errno;
1862         }
1863
1864         for (i = 0; i < spec_filter->count; i++) {
1865                 spec_filter->filters[i].efs_match_flags |=
1866                         EFX_FILTER_MATCH_OUTER_VID;
1867
1868                 spec_filter->filters[i].efs_outer_vid = 0;
1869         }
1870
1871         return 0;
1872 }
1873
1874 /**
1875  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1876  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1877  * specifications after copying.
1878  *
1879  * @param spec[in, out]
1880  *   SFC flow specification to update.
1881  * @param filters_count_for_one_val[in]
1882  *   How many specifications should have the same match flag, what is the
1883  *   number of specifications before copying.
1884  * @param error[out]
1885  *   Perform verbose error reporting if not NULL.
1886  */
1887 static int
1888 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1889                                     unsigned int filters_count_for_one_val,
1890                                     struct rte_flow_error *error)
1891 {
1892         unsigned int i;
1893         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1894         static const efx_filter_match_flags_t vals[] = {
1895                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1896                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1897         };
1898
1899         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1900                 rte_flow_error_set(error, EINVAL,
1901                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1902                         "Number of specifications is incorrect while copying "
1903                         "by inner frame unknown destination flags");
1904                 return -rte_errno;
1905         }
1906
1907         for (i = 0; i < spec_filter->count; i++) {
1908                 /* The check above ensures that divisor can't be zero here */
1909                 spec_filter->filters[i].efs_match_flags |=
1910                         vals[i / filters_count_for_one_val];
1911         }
1912
1913         return 0;
1914 }
1915
1916 /**
1917  * Check that the following conditions are met:
1918  * - the specification corresponds to a filter for encapsulated traffic
1919  * - the list of supported filters has a filter
1920  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1921  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1922  *   be inserted.
1923  *
1924  * @param match[in]
1925  *   The match flags of filter.
1926  * @param spec[in]
1927  *   Specification to be supplemented.
1928  * @param filter[in]
1929  *   SFC filter with list of supported filters.
1930  */
1931 static boolean_t
1932 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1933                                       efx_filter_spec_t *spec,
1934                                       struct sfc_filter *filter)
1935 {
1936         unsigned int i;
1937         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1938         efx_filter_match_flags_t match_mcast_dst;
1939
1940         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1941                 return B_FALSE;
1942
1943         match_mcast_dst =
1944                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1945                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1946         for (i = 0; i < filter->supported_match_num; i++) {
1947                 if (match_mcast_dst == filter->supported_match[i])
1948                         return B_TRUE;
1949         }
1950
1951         return B_FALSE;
1952 }
1953
1954 /**
1955  * Check that the list of supported filters has a filter that differs
1956  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1957  * in this case that filter will be used and the flag
1958  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1959  *
1960  * @param match[in]
1961  *   The match flags of filter.
1962  * @param spec[in]
1963  *   Specification to be supplemented.
1964  * @param filter[in]
1965  *   SFC filter with list of supported filters.
1966  */
1967 static boolean_t
1968 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1969                               __rte_unused efx_filter_spec_t *spec,
1970                               struct sfc_filter *filter)
1971 {
1972         unsigned int i;
1973         efx_filter_match_flags_t match_without_vid =
1974                 match & ~EFX_FILTER_MATCH_OUTER_VID;
1975
1976         for (i = 0; i < filter->supported_match_num; i++) {
1977                 if (match_without_vid == filter->supported_match[i])
1978                         return B_FALSE;
1979         }
1980
1981         return B_TRUE;
1982 }
1983
1984 /*
1985  * Match flags that can be automatically added to filters.
1986  * Selecting the last minimum when searching for the copy flag ensures that the
1987  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1988  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1989  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1990  * filters.
1991  */
1992 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1993         {
1994                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1995                 .vals_count = 2,
1996                 .set_vals = sfc_flow_set_unknown_dst_flags,
1997                 .spec_check = sfc_flow_check_unknown_dst_flags,
1998         },
1999         {
2000                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2001                 .vals_count = 2,
2002                 .set_vals = sfc_flow_set_ethertypes,
2003                 .spec_check = NULL,
2004         },
2005         {
2006                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2007                 .vals_count = 2,
2008                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2009                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2010         },
2011         {
2012                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2013                 .vals_count = 1,
2014                 .set_vals = sfc_flow_set_outer_vid_flag,
2015                 .spec_check = sfc_flow_check_outer_vid_flag,
2016         },
2017 };
2018
2019 /* Get item from array sfc_flow_copy_flags */
2020 static const struct sfc_flow_copy_flag *
2021 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2022 {
2023         unsigned int i;
2024
2025         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2026                 if (sfc_flow_copy_flags[i].flag == flag)
2027                         return &sfc_flow_copy_flags[i];
2028         }
2029
2030         return NULL;
2031 }
2032
2033 /**
2034  * Make copies of the specifications, set match flag and values
2035  * of the field that corresponds to it.
2036  *
2037  * @param spec[in, out]
2038  *   SFC flow specification to update.
2039  * @param flag[in]
2040  *   The match flag to add.
2041  * @param error[out]
2042  *   Perform verbose error reporting if not NULL.
2043  */
2044 static int
2045 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2046                              efx_filter_match_flags_t flag,
2047                              struct rte_flow_error *error)
2048 {
2049         unsigned int i;
2050         unsigned int new_filters_count;
2051         unsigned int filters_count_for_one_val;
2052         const struct sfc_flow_copy_flag *copy_flag;
2053         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2054         int rc;
2055
2056         copy_flag = sfc_flow_get_copy_flag(flag);
2057         if (copy_flag == NULL) {
2058                 rte_flow_error_set(error, ENOTSUP,
2059                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2060                                    "Unsupported spec field for copying");
2061                 return -rte_errno;
2062         }
2063
2064         new_filters_count = spec_filter->count * copy_flag->vals_count;
2065         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2066                 rte_flow_error_set(error, EINVAL,
2067                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2068                         "Too much EFX specifications in the flow rule");
2069                 return -rte_errno;
2070         }
2071
2072         /* Copy filters specifications */
2073         for (i = spec_filter->count; i < new_filters_count; i++) {
2074                 spec_filter->filters[i] =
2075                         spec_filter->filters[i - spec_filter->count];
2076         }
2077
2078         filters_count_for_one_val = spec_filter->count;
2079         spec_filter->count = new_filters_count;
2080
2081         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2082         if (rc != 0)
2083                 return rc;
2084
2085         return 0;
2086 }
2087
2088 /**
2089  * Check that the given set of match flags missing in the original filter spec
2090  * could be covered by adding spec copies which specify the corresponding
2091  * flags and packet field values to match.
2092  *
2093  * @param miss_flags[in]
2094  *   Flags that are missing until the supported filter.
2095  * @param spec[in]
2096  *   Specification to be supplemented.
2097  * @param filter[in]
2098  *   SFC filter.
2099  *
2100  * @return
2101  *   Number of specifications after copy or 0, if the flags can not be added.
2102  */
2103 static unsigned int
2104 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2105                              efx_filter_spec_t *spec,
2106                              struct sfc_filter *filter)
2107 {
2108         unsigned int i;
2109         efx_filter_match_flags_t copy_flags = 0;
2110         efx_filter_match_flags_t flag;
2111         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2112         sfc_flow_spec_check *check;
2113         unsigned int multiplier = 1;
2114
2115         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2116                 flag = sfc_flow_copy_flags[i].flag;
2117                 check = sfc_flow_copy_flags[i].spec_check;
2118                 if ((flag & miss_flags) == flag) {
2119                         if (check != NULL && (!check(match, spec, filter)))
2120                                 continue;
2121
2122                         copy_flags |= flag;
2123                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2124                 }
2125         }
2126
2127         if (copy_flags == miss_flags)
2128                 return multiplier;
2129
2130         return 0;
2131 }
2132
2133 /**
2134  * Attempt to supplement the specification template to the minimally
2135  * supported set of match flags. To do this, it is necessary to copy
2136  * the specifications, filling them with the values of fields that
2137  * correspond to the missing flags.
2138  * The necessary and sufficient filter is built from the fewest number
2139  * of copies which could be made to cover the minimally required set
2140  * of flags.
2141  *
2142  * @param sa[in]
2143  *   SFC adapter.
2144  * @param spec[in, out]
2145  *   SFC flow specification to update.
2146  * @param error[out]
2147  *   Perform verbose error reporting if not NULL.
2148  */
2149 static int
2150 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2151                                struct sfc_flow_spec *spec,
2152                                struct rte_flow_error *error)
2153 {
2154         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2155         struct sfc_filter *filter = &sa->filter;
2156         efx_filter_match_flags_t miss_flags;
2157         efx_filter_match_flags_t min_miss_flags = 0;
2158         efx_filter_match_flags_t match;
2159         unsigned int min_multiplier = UINT_MAX;
2160         unsigned int multiplier;
2161         unsigned int i;
2162         int rc;
2163
2164         match = spec_filter->template.efs_match_flags;
2165         for (i = 0; i < filter->supported_match_num; i++) {
2166                 if ((match & filter->supported_match[i]) == match) {
2167                         miss_flags = filter->supported_match[i] & (~match);
2168                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2169                                 &spec_filter->template, filter);
2170                         if (multiplier > 0) {
2171                                 if (multiplier <= min_multiplier) {
2172                                         min_multiplier = multiplier;
2173                                         min_miss_flags = miss_flags;
2174                                 }
2175                         }
2176                 }
2177         }
2178
2179         if (min_multiplier == UINT_MAX) {
2180                 rte_flow_error_set(error, ENOTSUP,
2181                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2182                                    "The flow rule pattern is unsupported");
2183                 return -rte_errno;
2184         }
2185
2186         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2187                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2188
2189                 if ((flag & min_miss_flags) == flag) {
2190                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2191                         if (rc != 0)
2192                                 return rc;
2193                 }
2194         }
2195
2196         return 0;
2197 }
2198
2199 /**
2200  * Check that set of match flags is referred to by a filter. Filter is
2201  * described by match flags with the ability to add OUTER_VID and INNER_VID
2202  * flags.
2203  *
2204  * @param match_flags[in]
2205  *   Set of match flags.
2206  * @param flags_pattern[in]
2207  *   Pattern of filter match flags.
2208  */
2209 static boolean_t
2210 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2211                             efx_filter_match_flags_t flags_pattern)
2212 {
2213         if ((match_flags & flags_pattern) != flags_pattern)
2214                 return B_FALSE;
2215
2216         switch (match_flags & ~flags_pattern) {
2217         case 0:
2218         case EFX_FILTER_MATCH_OUTER_VID:
2219         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2220                 return B_TRUE;
2221         default:
2222                 return B_FALSE;
2223         }
2224 }
2225
2226 /**
2227  * Check whether the spec maps to a hardware filter which is known to be
2228  * ineffective despite being valid.
2229  *
2230  * @param filter[in]
2231  *   SFC filter with list of supported filters.
2232  * @param spec[in]
2233  *   SFC flow specification.
2234  */
2235 static boolean_t
2236 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2237                                   struct sfc_flow_spec *spec)
2238 {
2239         unsigned int i;
2240         uint16_t ether_type;
2241         uint8_t ip_proto;
2242         efx_filter_match_flags_t match_flags;
2243         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2244
2245         for (i = 0; i < spec_filter->count; i++) {
2246                 match_flags = spec_filter->filters[i].efs_match_flags;
2247
2248                 if (sfc_flow_is_match_with_vids(match_flags,
2249                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2250                     sfc_flow_is_match_with_vids(match_flags,
2251                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2252                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2253                         ether_type = spec_filter->filters[i].efs_ether_type;
2254                         if (filter->supports_ip_proto_or_addr_filter &&
2255                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2256                              ether_type == EFX_ETHER_TYPE_IPV6))
2257                                 return B_TRUE;
2258                 } else if (sfc_flow_is_match_with_vids(match_flags,
2259                                 EFX_FILTER_MATCH_ETHER_TYPE |
2260                                 EFX_FILTER_MATCH_IP_PROTO) ||
2261                            sfc_flow_is_match_with_vids(match_flags,
2262                                 EFX_FILTER_MATCH_ETHER_TYPE |
2263                                 EFX_FILTER_MATCH_IP_PROTO |
2264                                 EFX_FILTER_MATCH_LOC_MAC)) {
2265                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2266                         if (filter->supports_rem_or_local_port_filter &&
2267                             (ip_proto == EFX_IPPROTO_TCP ||
2268                              ip_proto == EFX_IPPROTO_UDP))
2269                                 return B_TRUE;
2270                 }
2271         }
2272
2273         return B_FALSE;
2274 }
2275
2276 static int
2277 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2278                               struct rte_flow *flow,
2279                               struct rte_flow_error *error)
2280 {
2281         struct sfc_flow_spec *spec = &flow->spec;
2282         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2283         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2284         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2285         int rc;
2286
2287         /* Initialize the first filter spec with template */
2288         spec_filter->filters[0] = *spec_tmpl;
2289         spec_filter->count = 1;
2290
2291         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2292                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2293                 if (rc != 0)
2294                         return rc;
2295         }
2296
2297         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2298                 rte_flow_error_set(error, ENOTSUP,
2299                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2300                         "The flow rule pattern is unsupported");
2301                 return -rte_errno;
2302         }
2303
2304         return 0;
2305 }
2306
2307 static int
2308 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2309                              const struct rte_flow_item pattern[],
2310                              const struct rte_flow_action actions[],
2311                              struct rte_flow *flow,
2312                              struct rte_flow_error *error)
2313 {
2314         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2315         int rc;
2316
2317         rc = sfc_flow_parse_pattern(pattern, flow, error);
2318         if (rc != 0)
2319                 goto fail_bad_value;
2320
2321         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2322         if (rc != 0)
2323                 goto fail_bad_value;
2324
2325         rc = sfc_flow_validate_match_flags(sa, flow, error);
2326         if (rc != 0)
2327                 goto fail_bad_value;
2328
2329         return 0;
2330
2331 fail_bad_value:
2332         return rc;
2333 }
2334
2335 static int
2336 sfc_flow_parse(struct rte_eth_dev *dev,
2337                const struct rte_flow_attr *attr,
2338                const struct rte_flow_item pattern[],
2339                const struct rte_flow_action actions[],
2340                struct rte_flow *flow,
2341                struct rte_flow_error *error)
2342 {
2343         const struct sfc_flow_ops_by_spec *ops;
2344         int rc;
2345
2346         rc = sfc_flow_parse_attr(attr, flow, error);
2347         if (rc != 0)
2348                 return rc;
2349
2350         ops = sfc_flow_get_ops_by_spec(flow);
2351         if (ops == NULL || ops->parse == NULL) {
2352                 rte_flow_error_set(error, ENOTSUP,
2353                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2354                                    "No backend to handle this flow");
2355                 return -rte_errno;
2356         }
2357
2358         return ops->parse(dev, pattern, actions, flow, error);
2359 }
2360
2361 static struct rte_flow *
2362 sfc_flow_zmalloc(struct rte_flow_error *error)
2363 {
2364         struct rte_flow *flow;
2365
2366         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2367         if (flow == NULL) {
2368                 rte_flow_error_set(error, ENOMEM,
2369                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2370                                    "Failed to allocate memory");
2371         }
2372
2373         return flow;
2374 }
2375
2376 static void
2377 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2378 {
2379         rte_free(flow);
2380 }
2381
2382 static int
2383 sfc_flow_validate(struct rte_eth_dev *dev,
2384                   const struct rte_flow_attr *attr,
2385                   const struct rte_flow_item pattern[],
2386                   const struct rte_flow_action actions[],
2387                   struct rte_flow_error *error)
2388 {
2389         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2390         struct rte_flow *flow;
2391         int rc;
2392
2393         flow = sfc_flow_zmalloc(error);
2394         if (flow == NULL)
2395                 return -rte_errno;
2396
2397         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2398
2399         sfc_flow_free(sa, flow);
2400
2401         return rc;
2402 }
2403
2404 static struct rte_flow *
2405 sfc_flow_create(struct rte_eth_dev *dev,
2406                 const struct rte_flow_attr *attr,
2407                 const struct rte_flow_item pattern[],
2408                 const struct rte_flow_action actions[],
2409                 struct rte_flow_error *error)
2410 {
2411         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2412         struct rte_flow *flow = NULL;
2413         int rc;
2414
2415         flow = sfc_flow_zmalloc(error);
2416         if (flow == NULL)
2417                 goto fail_no_mem;
2418
2419         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2420         if (rc != 0)
2421                 goto fail_bad_value;
2422
2423         sfc_adapter_lock(sa);
2424
2425         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2426
2427         if (sa->state == SFC_ADAPTER_STARTED) {
2428                 rc = sfc_flow_filter_insert(sa, flow);
2429                 if (rc != 0) {
2430                         rte_flow_error_set(error, rc,
2431                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2432                                 "Failed to insert filter");
2433                         goto fail_filter_insert;
2434                 }
2435         }
2436
2437         sfc_adapter_unlock(sa);
2438
2439         return flow;
2440
2441 fail_filter_insert:
2442         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2443
2444 fail_bad_value:
2445         sfc_flow_free(sa, flow);
2446         sfc_adapter_unlock(sa);
2447
2448 fail_no_mem:
2449         return NULL;
2450 }
2451
2452 static int
2453 sfc_flow_remove(struct sfc_adapter *sa,
2454                 struct rte_flow *flow,
2455                 struct rte_flow_error *error)
2456 {
2457         int rc = 0;
2458
2459         SFC_ASSERT(sfc_adapter_is_locked(sa));
2460
2461         if (sa->state == SFC_ADAPTER_STARTED) {
2462                 rc = sfc_flow_filter_remove(sa, flow);
2463                 if (rc != 0)
2464                         rte_flow_error_set(error, rc,
2465                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2466                                 "Failed to destroy flow rule");
2467         }
2468
2469         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2470         sfc_flow_free(sa, flow);
2471
2472         return rc;
2473 }
2474
2475 static int
2476 sfc_flow_destroy(struct rte_eth_dev *dev,
2477                  struct rte_flow *flow,
2478                  struct rte_flow_error *error)
2479 {
2480         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2481         struct rte_flow *flow_ptr;
2482         int rc = EINVAL;
2483
2484         sfc_adapter_lock(sa);
2485
2486         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2487                 if (flow_ptr == flow)
2488                         rc = 0;
2489         }
2490         if (rc != 0) {
2491                 rte_flow_error_set(error, rc,
2492                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2493                                    "Failed to find flow rule to destroy");
2494                 goto fail_bad_value;
2495         }
2496
2497         rc = sfc_flow_remove(sa, flow, error);
2498
2499 fail_bad_value:
2500         sfc_adapter_unlock(sa);
2501
2502         return -rc;
2503 }
2504
2505 static int
2506 sfc_flow_flush(struct rte_eth_dev *dev,
2507                struct rte_flow_error *error)
2508 {
2509         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2510         struct rte_flow *flow;
2511         int rc = 0;
2512         int ret = 0;
2513
2514         sfc_adapter_lock(sa);
2515
2516         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2517                 rc = sfc_flow_remove(sa, flow, error);
2518                 if (rc != 0)
2519                         ret = rc;
2520         }
2521
2522         sfc_adapter_unlock(sa);
2523
2524         return -ret;
2525 }
2526
2527 static int
2528 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2529                  struct rte_flow_error *error)
2530 {
2531         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2532         int ret = 0;
2533
2534         sfc_adapter_lock(sa);
2535         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2536                 rte_flow_error_set(error, EBUSY,
2537                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2538                                    NULL, "please close the port first");
2539                 ret = -rte_errno;
2540         } else {
2541                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2542         }
2543         sfc_adapter_unlock(sa);
2544
2545         return ret;
2546 }
2547
2548 const struct rte_flow_ops sfc_flow_ops = {
2549         .validate = sfc_flow_validate,
2550         .create = sfc_flow_create,
2551         .destroy = sfc_flow_destroy,
2552         .flush = sfc_flow_flush,
2553         .query = NULL,
2554         .isolate = sfc_flow_isolate,
2555 };
2556
2557 void
2558 sfc_flow_init(struct sfc_adapter *sa)
2559 {
2560         SFC_ASSERT(sfc_adapter_is_locked(sa));
2561
2562         TAILQ_INIT(&sa->flow_list);
2563 }
2564
2565 void
2566 sfc_flow_fini(struct sfc_adapter *sa)
2567 {
2568         struct rte_flow *flow;
2569
2570         SFC_ASSERT(sfc_adapter_is_locked(sa));
2571
2572         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2573                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2574                 sfc_flow_free(sa, flow);
2575         }
2576 }
2577
2578 void
2579 sfc_flow_stop(struct sfc_adapter *sa)
2580 {
2581         struct rte_flow *flow;
2582
2583         SFC_ASSERT(sfc_adapter_is_locked(sa));
2584
2585         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2586                 sfc_flow_filter_remove(sa, flow);
2587 }
2588
2589 int
2590 sfc_flow_start(struct sfc_adapter *sa)
2591 {
2592         struct rte_flow *flow;
2593         int rc = 0;
2594
2595         sfc_log_init(sa, "entry");
2596
2597         SFC_ASSERT(sfc_adapter_is_locked(sa));
2598
2599         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2600                 rc = sfc_flow_filter_insert(sa, flow);
2601                 if (rc != 0)
2602                         goto fail_bad_flow;
2603         }
2604
2605         sfc_log_init(sa, "done");
2606
2607 fail_bad_flow:
2608         return rc;
2609 }