net/sfc: generalise flow specification structure
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 #include "sfc_dp_rx.h"
26
27 /*
28  * Currently, filter-based (VNIC) flow API is implemented in such a manner
29  * that each flow rule is converted to one or more hardware filters.
30  * All elements of flow rule (attributes, pattern items, actions)
31  * correspond to one or more fields in the efx_filter_spec_s structure
32  * that is responsible for the hardware filter.
33  * If some required field is unset in the flow rule, then a handful
34  * of filter copies will be created to cover all possible values
35  * of such a field.
36  */
37
38 enum sfc_flow_item_layers {
39         SFC_FLOW_ITEM_ANY_LAYER,
40         SFC_FLOW_ITEM_START_LAYER,
41         SFC_FLOW_ITEM_L2,
42         SFC_FLOW_ITEM_L3,
43         SFC_FLOW_ITEM_L4,
44 };
45
46 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
47                                   efx_filter_spec_t *spec,
48                                   struct rte_flow_error *error);
49
50 struct sfc_flow_item {
51         enum rte_flow_item_type type;           /* Type of item */
52         enum sfc_flow_item_layers layer;        /* Layer of item */
53         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
54         sfc_flow_item_parse *parse;             /* Parsing function */
55 };
56
57 static sfc_flow_item_parse sfc_flow_parse_void;
58 static sfc_flow_item_parse sfc_flow_parse_eth;
59 static sfc_flow_item_parse sfc_flow_parse_vlan;
60 static sfc_flow_item_parse sfc_flow_parse_ipv4;
61 static sfc_flow_item_parse sfc_flow_parse_ipv6;
62 static sfc_flow_item_parse sfc_flow_parse_tcp;
63 static sfc_flow_item_parse sfc_flow_parse_udp;
64 static sfc_flow_item_parse sfc_flow_parse_vxlan;
65 static sfc_flow_item_parse sfc_flow_parse_geneve;
66 static sfc_flow_item_parse sfc_flow_parse_nvgre;
67
68 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
69                                      unsigned int filters_count_for_one_val,
70                                      struct rte_flow_error *error);
71
72 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
73                                         efx_filter_spec_t *spec,
74                                         struct sfc_filter *filter);
75
76 struct sfc_flow_copy_flag {
77         /* EFX filter specification match flag */
78         efx_filter_match_flags_t flag;
79         /* Number of values of corresponding field */
80         unsigned int vals_count;
81         /* Function to set values in specifications */
82         sfc_flow_spec_set_vals *set_vals;
83         /*
84          * Function to check that the specification is suitable
85          * for adding this match flag
86          */
87         sfc_flow_spec_check *spec_check;
88 };
89
90 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
91 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
92 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
93 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
94 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
95 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
96 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
97
98 static boolean_t
99 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
100 {
101         uint8_t sum = 0;
102         unsigned int i;
103
104         for (i = 0; i < size; i++)
105                 sum |= buf[i];
106
107         return (sum == 0) ? B_TRUE : B_FALSE;
108 }
109
110 /*
111  * Validate item and prepare structures spec and mask for parsing
112  */
113 static int
114 sfc_flow_parse_init(const struct rte_flow_item *item,
115                     const void **spec_ptr,
116                     const void **mask_ptr,
117                     const void *supp_mask,
118                     const void *def_mask,
119                     unsigned int size,
120                     struct rte_flow_error *error)
121 {
122         const uint8_t *spec;
123         const uint8_t *mask;
124         const uint8_t *last;
125         uint8_t supp;
126         unsigned int i;
127
128         if (item == NULL) {
129                 rte_flow_error_set(error, EINVAL,
130                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
131                                    "NULL item");
132                 return -rte_errno;
133         }
134
135         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
136                 rte_flow_error_set(error, EINVAL,
137                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
138                                    "Mask or last is set without spec");
139                 return -rte_errno;
140         }
141
142         /*
143          * If "mask" is not set, default mask is used,
144          * but if default mask is NULL, "mask" should be set
145          */
146         if (item->mask == NULL) {
147                 if (def_mask == NULL) {
148                         rte_flow_error_set(error, EINVAL,
149                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
150                                 "Mask should be specified");
151                         return -rte_errno;
152                 }
153
154                 mask = def_mask;
155         } else {
156                 mask = item->mask;
157         }
158
159         spec = item->spec;
160         last = item->last;
161
162         if (spec == NULL)
163                 goto exit;
164
165         /*
166          * If field values in "last" are either 0 or equal to the corresponding
167          * values in "spec" then they are ignored
168          */
169         if (last != NULL &&
170             !sfc_flow_is_zero(last, size) &&
171             memcmp(last, spec, size) != 0) {
172                 rte_flow_error_set(error, ENOTSUP,
173                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
174                                    "Ranging is not supported");
175                 return -rte_errno;
176         }
177
178         if (supp_mask == NULL) {
179                 rte_flow_error_set(error, EINVAL,
180                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
181                         "Supported mask for item should be specified");
182                 return -rte_errno;
183         }
184
185         /* Check that mask does not ask for more match than supp_mask */
186         for (i = 0; i < size; i++) {
187                 supp = ((const uint8_t *)supp_mask)[i];
188
189                 if (~supp & mask[i]) {
190                         rte_flow_error_set(error, ENOTSUP,
191                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
192                                            "Item's field is not supported");
193                         return -rte_errno;
194                 }
195         }
196
197 exit:
198         *spec_ptr = spec;
199         *mask_ptr = mask;
200         return 0;
201 }
202
203 /*
204  * Protocol parsers.
205  * Masking is not supported, so masks in items should be either
206  * full or empty (zeroed) and set only for supported fields which
207  * are specified in the supp_mask.
208  */
209
210 static int
211 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
212                     __rte_unused efx_filter_spec_t *efx_spec,
213                     __rte_unused struct rte_flow_error *error)
214 {
215         return 0;
216 }
217
218 /**
219  * Convert Ethernet item to EFX filter specification.
220  *
221  * @param item[in]
222  *   Item specification. Outer frame specification may only comprise
223  *   source/destination addresses and Ethertype field.
224  *   Inner frame specification may contain destination address only.
225  *   There is support for individual/group mask as well as for empty and full.
226  *   If the mask is NULL, default mask will be used. Ranging is not supported.
227  * @param efx_spec[in, out]
228  *   EFX filter specification to update.
229  * @param[out] error
230  *   Perform verbose error reporting if not NULL.
231  */
232 static int
233 sfc_flow_parse_eth(const struct rte_flow_item *item,
234                    efx_filter_spec_t *efx_spec,
235                    struct rte_flow_error *error)
236 {
237         int rc;
238         const struct rte_flow_item_eth *spec = NULL;
239         const struct rte_flow_item_eth *mask = NULL;
240         const struct rte_flow_item_eth supp_mask = {
241                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
243                 .type = 0xffff,
244         };
245         const struct rte_flow_item_eth ifrm_supp_mask = {
246                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
247         };
248         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
249                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
250         };
251         const struct rte_flow_item_eth *supp_mask_p;
252         const struct rte_flow_item_eth *def_mask_p;
253         uint8_t *loc_mac = NULL;
254         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
255                 EFX_TUNNEL_PROTOCOL_NONE);
256
257         if (is_ifrm) {
258                 supp_mask_p = &ifrm_supp_mask;
259                 def_mask_p = &ifrm_supp_mask;
260                 loc_mac = efx_spec->efs_ifrm_loc_mac;
261         } else {
262                 supp_mask_p = &supp_mask;
263                 def_mask_p = &rte_flow_item_eth_mask;
264                 loc_mac = efx_spec->efs_loc_mac;
265         }
266
267         rc = sfc_flow_parse_init(item,
268                                  (const void **)&spec,
269                                  (const void **)&mask,
270                                  supp_mask_p, def_mask_p,
271                                  sizeof(struct rte_flow_item_eth),
272                                  error);
273         if (rc != 0)
274                 return rc;
275
276         /* If "spec" is not set, could be any Ethernet */
277         if (spec == NULL)
278                 return 0;
279
280         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
281                 efx_spec->efs_match_flags |= is_ifrm ?
282                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
283                         EFX_FILTER_MATCH_LOC_MAC;
284                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
285                            EFX_MAC_ADDR_LEN);
286         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
287                           EFX_MAC_ADDR_LEN) == 0) {
288                 if (rte_is_unicast_ether_addr(&spec->dst))
289                         efx_spec->efs_match_flags |= is_ifrm ?
290                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
291                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
292                 else
293                         efx_spec->efs_match_flags |= is_ifrm ?
294                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
295                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
296         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
297                 goto fail_bad_mask;
298         }
299
300         /*
301          * ifrm_supp_mask ensures that the source address and
302          * ethertype masks are equal to zero in inner frame,
303          * so these fields are filled in only for the outer frame
304          */
305         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
306                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
307                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
308                            EFX_MAC_ADDR_LEN);
309         } else if (!rte_is_zero_ether_addr(&mask->src)) {
310                 goto fail_bad_mask;
311         }
312
313         /*
314          * Ether type is in big-endian byte order in item and
315          * in little-endian in efx_spec, so byte swap is used
316          */
317         if (mask->type == supp_mask.type) {
318                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
319                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
320         } else if (mask->type != 0) {
321                 goto fail_bad_mask;
322         }
323
324         return 0;
325
326 fail_bad_mask:
327         rte_flow_error_set(error, EINVAL,
328                            RTE_FLOW_ERROR_TYPE_ITEM, item,
329                            "Bad mask in the ETH pattern item");
330         return -rte_errno;
331 }
332
333 /**
334  * Convert VLAN item to EFX filter specification.
335  *
336  * @param item[in]
337  *   Item specification. Only VID field is supported.
338  *   The mask can not be NULL. Ranging is not supported.
339  * @param efx_spec[in, out]
340  *   EFX filter specification to update.
341  * @param[out] error
342  *   Perform verbose error reporting if not NULL.
343  */
344 static int
345 sfc_flow_parse_vlan(const struct rte_flow_item *item,
346                     efx_filter_spec_t *efx_spec,
347                     struct rte_flow_error *error)
348 {
349         int rc;
350         uint16_t vid;
351         const struct rte_flow_item_vlan *spec = NULL;
352         const struct rte_flow_item_vlan *mask = NULL;
353         const struct rte_flow_item_vlan supp_mask = {
354                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
355                 .inner_type = RTE_BE16(0xffff),
356         };
357
358         rc = sfc_flow_parse_init(item,
359                                  (const void **)&spec,
360                                  (const void **)&mask,
361                                  &supp_mask,
362                                  NULL,
363                                  sizeof(struct rte_flow_item_vlan),
364                                  error);
365         if (rc != 0)
366                 return rc;
367
368         /*
369          * VID is in big-endian byte order in item and
370          * in little-endian in efx_spec, so byte swap is used.
371          * If two VLAN items are included, the first matches
372          * the outer tag and the next matches the inner tag.
373          */
374         if (mask->tci == supp_mask.tci) {
375                 /* Apply mask to keep VID only */
376                 vid = rte_bswap16(spec->tci & mask->tci);
377
378                 if (!(efx_spec->efs_match_flags &
379                       EFX_FILTER_MATCH_OUTER_VID)) {
380                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
381                         efx_spec->efs_outer_vid = vid;
382                 } else if (!(efx_spec->efs_match_flags &
383                              EFX_FILTER_MATCH_INNER_VID)) {
384                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
385                         efx_spec->efs_inner_vid = vid;
386                 } else {
387                         rte_flow_error_set(error, EINVAL,
388                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
389                                            "More than two VLAN items");
390                         return -rte_errno;
391                 }
392         } else {
393                 rte_flow_error_set(error, EINVAL,
394                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
395                                    "VLAN ID in TCI match is required");
396                 return -rte_errno;
397         }
398
399         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
400                 rte_flow_error_set(error, EINVAL,
401                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
402                                    "VLAN TPID matching is not supported");
403                 return -rte_errno;
404         }
405         if (mask->inner_type == supp_mask.inner_type) {
406                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
407                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
408         } else if (mask->inner_type) {
409                 rte_flow_error_set(error, EINVAL,
410                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
411                                    "Bad mask for VLAN inner_type");
412                 return -rte_errno;
413         }
414
415         return 0;
416 }
417
418 /**
419  * Convert IPv4 item to EFX filter specification.
420  *
421  * @param item[in]
422  *   Item specification. Only source and destination addresses and
423  *   protocol fields are supported. If the mask is NULL, default
424  *   mask will be used. Ranging is not supported.
425  * @param efx_spec[in, out]
426  *   EFX filter specification to update.
427  * @param[out] error
428  *   Perform verbose error reporting if not NULL.
429  */
430 static int
431 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
432                     efx_filter_spec_t *efx_spec,
433                     struct rte_flow_error *error)
434 {
435         int rc;
436         const struct rte_flow_item_ipv4 *spec = NULL;
437         const struct rte_flow_item_ipv4 *mask = NULL;
438         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
439         const struct rte_flow_item_ipv4 supp_mask = {
440                 .hdr = {
441                         .src_addr = 0xffffffff,
442                         .dst_addr = 0xffffffff,
443                         .next_proto_id = 0xff,
444                 }
445         };
446
447         rc = sfc_flow_parse_init(item,
448                                  (const void **)&spec,
449                                  (const void **)&mask,
450                                  &supp_mask,
451                                  &rte_flow_item_ipv4_mask,
452                                  sizeof(struct rte_flow_item_ipv4),
453                                  error);
454         if (rc != 0)
455                 return rc;
456
457         /*
458          * Filtering by IPv4 source and destination addresses requires
459          * the appropriate ETHER_TYPE in hardware filters
460          */
461         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
462                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
463                 efx_spec->efs_ether_type = ether_type_ipv4;
464         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
465                 rte_flow_error_set(error, EINVAL,
466                         RTE_FLOW_ERROR_TYPE_ITEM, item,
467                         "Ethertype in pattern with IPV4 item should be appropriate");
468                 return -rte_errno;
469         }
470
471         if (spec == NULL)
472                 return 0;
473
474         /*
475          * IPv4 addresses are in big-endian byte order in item and in
476          * efx_spec
477          */
478         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
479                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
480                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
481         } else if (mask->hdr.src_addr != 0) {
482                 goto fail_bad_mask;
483         }
484
485         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
486                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
487                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
488         } else if (mask->hdr.dst_addr != 0) {
489                 goto fail_bad_mask;
490         }
491
492         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
493                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
494                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
495         } else if (mask->hdr.next_proto_id != 0) {
496                 goto fail_bad_mask;
497         }
498
499         return 0;
500
501 fail_bad_mask:
502         rte_flow_error_set(error, EINVAL,
503                            RTE_FLOW_ERROR_TYPE_ITEM, item,
504                            "Bad mask in the IPV4 pattern item");
505         return -rte_errno;
506 }
507
508 /**
509  * Convert IPv6 item to EFX filter specification.
510  *
511  * @param item[in]
512  *   Item specification. Only source and destination addresses and
513  *   next header fields are supported. If the mask is NULL, default
514  *   mask will be used. Ranging is not supported.
515  * @param efx_spec[in, out]
516  *   EFX filter specification to update.
517  * @param[out] error
518  *   Perform verbose error reporting if not NULL.
519  */
520 static int
521 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
522                     efx_filter_spec_t *efx_spec,
523                     struct rte_flow_error *error)
524 {
525         int rc;
526         const struct rte_flow_item_ipv6 *spec = NULL;
527         const struct rte_flow_item_ipv6 *mask = NULL;
528         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
529         const struct rte_flow_item_ipv6 supp_mask = {
530                 .hdr = {
531                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
532                                       0xff, 0xff, 0xff, 0xff,
533                                       0xff, 0xff, 0xff, 0xff,
534                                       0xff, 0xff, 0xff, 0xff },
535                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
536                                       0xff, 0xff, 0xff, 0xff,
537                                       0xff, 0xff, 0xff, 0xff,
538                                       0xff, 0xff, 0xff, 0xff },
539                         .proto = 0xff,
540                 }
541         };
542
543         rc = sfc_flow_parse_init(item,
544                                  (const void **)&spec,
545                                  (const void **)&mask,
546                                  &supp_mask,
547                                  &rte_flow_item_ipv6_mask,
548                                  sizeof(struct rte_flow_item_ipv6),
549                                  error);
550         if (rc != 0)
551                 return rc;
552
553         /*
554          * Filtering by IPv6 source and destination addresses requires
555          * the appropriate ETHER_TYPE in hardware filters
556          */
557         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
558                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
559                 efx_spec->efs_ether_type = ether_type_ipv6;
560         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
561                 rte_flow_error_set(error, EINVAL,
562                         RTE_FLOW_ERROR_TYPE_ITEM, item,
563                         "Ethertype in pattern with IPV6 item should be appropriate");
564                 return -rte_errno;
565         }
566
567         if (spec == NULL)
568                 return 0;
569
570         /*
571          * IPv6 addresses are in big-endian byte order in item and in
572          * efx_spec
573          */
574         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
575                    sizeof(mask->hdr.src_addr)) == 0) {
576                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
577
578                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
579                                  sizeof(spec->hdr.src_addr));
580                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
581                            sizeof(efx_spec->efs_rem_host));
582         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
583                                      sizeof(mask->hdr.src_addr))) {
584                 goto fail_bad_mask;
585         }
586
587         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
588                    sizeof(mask->hdr.dst_addr)) == 0) {
589                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
590
591                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
592                                  sizeof(spec->hdr.dst_addr));
593                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
594                            sizeof(efx_spec->efs_loc_host));
595         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
596                                      sizeof(mask->hdr.dst_addr))) {
597                 goto fail_bad_mask;
598         }
599
600         if (mask->hdr.proto == supp_mask.hdr.proto) {
601                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
602                 efx_spec->efs_ip_proto = spec->hdr.proto;
603         } else if (mask->hdr.proto != 0) {
604                 goto fail_bad_mask;
605         }
606
607         return 0;
608
609 fail_bad_mask:
610         rte_flow_error_set(error, EINVAL,
611                            RTE_FLOW_ERROR_TYPE_ITEM, item,
612                            "Bad mask in the IPV6 pattern item");
613         return -rte_errno;
614 }
615
616 /**
617  * Convert TCP item to EFX filter specification.
618  *
619  * @param item[in]
620  *   Item specification. Only source and destination ports fields
621  *   are supported. If the mask is NULL, default mask will be used.
622  *   Ranging is not supported.
623  * @param efx_spec[in, out]
624  *   EFX filter specification to update.
625  * @param[out] error
626  *   Perform verbose error reporting if not NULL.
627  */
628 static int
629 sfc_flow_parse_tcp(const struct rte_flow_item *item,
630                    efx_filter_spec_t *efx_spec,
631                    struct rte_flow_error *error)
632 {
633         int rc;
634         const struct rte_flow_item_tcp *spec = NULL;
635         const struct rte_flow_item_tcp *mask = NULL;
636         const struct rte_flow_item_tcp supp_mask = {
637                 .hdr = {
638                         .src_port = 0xffff,
639                         .dst_port = 0xffff,
640                 }
641         };
642
643         rc = sfc_flow_parse_init(item,
644                                  (const void **)&spec,
645                                  (const void **)&mask,
646                                  &supp_mask,
647                                  &rte_flow_item_tcp_mask,
648                                  sizeof(struct rte_flow_item_tcp),
649                                  error);
650         if (rc != 0)
651                 return rc;
652
653         /*
654          * Filtering by TCP source and destination ports requires
655          * the appropriate IP_PROTO in hardware filters
656          */
657         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
658                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
659                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
660         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
661                 rte_flow_error_set(error, EINVAL,
662                         RTE_FLOW_ERROR_TYPE_ITEM, item,
663                         "IP proto in pattern with TCP item should be appropriate");
664                 return -rte_errno;
665         }
666
667         if (spec == NULL)
668                 return 0;
669
670         /*
671          * Source and destination ports are in big-endian byte order in item and
672          * in little-endian in efx_spec, so byte swap is used
673          */
674         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
675                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
676                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
677         } else if (mask->hdr.src_port != 0) {
678                 goto fail_bad_mask;
679         }
680
681         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
682                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
683                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
684         } else if (mask->hdr.dst_port != 0) {
685                 goto fail_bad_mask;
686         }
687
688         return 0;
689
690 fail_bad_mask:
691         rte_flow_error_set(error, EINVAL,
692                            RTE_FLOW_ERROR_TYPE_ITEM, item,
693                            "Bad mask in the TCP pattern item");
694         return -rte_errno;
695 }
696
697 /**
698  * Convert UDP item to EFX filter specification.
699  *
700  * @param item[in]
701  *   Item specification. Only source and destination ports fields
702  *   are supported. If the mask is NULL, default mask will be used.
703  *   Ranging is not supported.
704  * @param efx_spec[in, out]
705  *   EFX filter specification to update.
706  * @param[out] error
707  *   Perform verbose error reporting if not NULL.
708  */
709 static int
710 sfc_flow_parse_udp(const struct rte_flow_item *item,
711                    efx_filter_spec_t *efx_spec,
712                    struct rte_flow_error *error)
713 {
714         int rc;
715         const struct rte_flow_item_udp *spec = NULL;
716         const struct rte_flow_item_udp *mask = NULL;
717         const struct rte_flow_item_udp supp_mask = {
718                 .hdr = {
719                         .src_port = 0xffff,
720                         .dst_port = 0xffff,
721                 }
722         };
723
724         rc = sfc_flow_parse_init(item,
725                                  (const void **)&spec,
726                                  (const void **)&mask,
727                                  &supp_mask,
728                                  &rte_flow_item_udp_mask,
729                                  sizeof(struct rte_flow_item_udp),
730                                  error);
731         if (rc != 0)
732                 return rc;
733
734         /*
735          * Filtering by UDP source and destination ports requires
736          * the appropriate IP_PROTO in hardware filters
737          */
738         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
739                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
740                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
741         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
742                 rte_flow_error_set(error, EINVAL,
743                         RTE_FLOW_ERROR_TYPE_ITEM, item,
744                         "IP proto in pattern with UDP item should be appropriate");
745                 return -rte_errno;
746         }
747
748         if (spec == NULL)
749                 return 0;
750
751         /*
752          * Source and destination ports are in big-endian byte order in item and
753          * in little-endian in efx_spec, so byte swap is used
754          */
755         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
756                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
757                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
758         } else if (mask->hdr.src_port != 0) {
759                 goto fail_bad_mask;
760         }
761
762         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
763                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
764                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
765         } else if (mask->hdr.dst_port != 0) {
766                 goto fail_bad_mask;
767         }
768
769         return 0;
770
771 fail_bad_mask:
772         rte_flow_error_set(error, EINVAL,
773                            RTE_FLOW_ERROR_TYPE_ITEM, item,
774                            "Bad mask in the UDP pattern item");
775         return -rte_errno;
776 }
777
778 /*
779  * Filters for encapsulated packets match based on the EtherType and IP
780  * protocol in the outer frame.
781  */
782 static int
783 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
784                                         efx_filter_spec_t *efx_spec,
785                                         uint8_t ip_proto,
786                                         struct rte_flow_error *error)
787 {
788         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
789                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
790                 efx_spec->efs_ip_proto = ip_proto;
791         } else if (efx_spec->efs_ip_proto != ip_proto) {
792                 switch (ip_proto) {
793                 case EFX_IPPROTO_UDP:
794                         rte_flow_error_set(error, EINVAL,
795                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
796                                 "Outer IP header protocol must be UDP "
797                                 "in VxLAN/GENEVE pattern");
798                         return -rte_errno;
799
800                 case EFX_IPPROTO_GRE:
801                         rte_flow_error_set(error, EINVAL,
802                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
803                                 "Outer IP header protocol must be GRE "
804                                 "in NVGRE pattern");
805                         return -rte_errno;
806
807                 default:
808                         rte_flow_error_set(error, EINVAL,
809                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
810                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
811                                 "are supported");
812                         return -rte_errno;
813                 }
814         }
815
816         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
817             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
818             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
819                 rte_flow_error_set(error, EINVAL,
820                         RTE_FLOW_ERROR_TYPE_ITEM, item,
821                         "Outer frame EtherType in pattern with tunneling "
822                         "must be IPv4 or IPv6");
823                 return -rte_errno;
824         }
825
826         return 0;
827 }
828
829 static int
830 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
831                                   const uint8_t *vni_or_vsid_val,
832                                   const uint8_t *vni_or_vsid_mask,
833                                   const struct rte_flow_item *item,
834                                   struct rte_flow_error *error)
835 {
836         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
837                 0xff, 0xff, 0xff
838         };
839
840         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
841                    EFX_VNI_OR_VSID_LEN) == 0) {
842                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
843                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
844                            EFX_VNI_OR_VSID_LEN);
845         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
846                 rte_flow_error_set(error, EINVAL,
847                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
848                                    "Unsupported VNI/VSID mask");
849                 return -rte_errno;
850         }
851
852         return 0;
853 }
854
855 /**
856  * Convert VXLAN item to EFX filter specification.
857  *
858  * @param item[in]
859  *   Item specification. Only VXLAN network identifier field is supported.
860  *   If the mask is NULL, default mask will be used.
861  *   Ranging is not supported.
862  * @param efx_spec[in, out]
863  *   EFX filter specification to update.
864  * @param[out] error
865  *   Perform verbose error reporting if not NULL.
866  */
867 static int
868 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
869                      efx_filter_spec_t *efx_spec,
870                      struct rte_flow_error *error)
871 {
872         int rc;
873         const struct rte_flow_item_vxlan *spec = NULL;
874         const struct rte_flow_item_vxlan *mask = NULL;
875         const struct rte_flow_item_vxlan supp_mask = {
876                 .vni = { 0xff, 0xff, 0xff }
877         };
878
879         rc = sfc_flow_parse_init(item,
880                                  (const void **)&spec,
881                                  (const void **)&mask,
882                                  &supp_mask,
883                                  &rte_flow_item_vxlan_mask,
884                                  sizeof(struct rte_flow_item_vxlan),
885                                  error);
886         if (rc != 0)
887                 return rc;
888
889         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
890                                                      EFX_IPPROTO_UDP, error);
891         if (rc != 0)
892                 return rc;
893
894         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
895         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
896
897         if (spec == NULL)
898                 return 0;
899
900         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
901                                                mask->vni, item, error);
902
903         return rc;
904 }
905
906 /**
907  * Convert GENEVE item to EFX filter specification.
908  *
909  * @param item[in]
910  *   Item specification. Only Virtual Network Identifier and protocol type
911  *   fields are supported. But protocol type can be only Ethernet (0x6558).
912  *   If the mask is NULL, default mask will be used.
913  *   Ranging is not supported.
914  * @param efx_spec[in, out]
915  *   EFX filter specification to update.
916  * @param[out] error
917  *   Perform verbose error reporting if not NULL.
918  */
919 static int
920 sfc_flow_parse_geneve(const struct rte_flow_item *item,
921                       efx_filter_spec_t *efx_spec,
922                       struct rte_flow_error *error)
923 {
924         int rc;
925         const struct rte_flow_item_geneve *spec = NULL;
926         const struct rte_flow_item_geneve *mask = NULL;
927         const struct rte_flow_item_geneve supp_mask = {
928                 .protocol = RTE_BE16(0xffff),
929                 .vni = { 0xff, 0xff, 0xff }
930         };
931
932         rc = sfc_flow_parse_init(item,
933                                  (const void **)&spec,
934                                  (const void **)&mask,
935                                  &supp_mask,
936                                  &rte_flow_item_geneve_mask,
937                                  sizeof(struct rte_flow_item_geneve),
938                                  error);
939         if (rc != 0)
940                 return rc;
941
942         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
943                                                      EFX_IPPROTO_UDP, error);
944         if (rc != 0)
945                 return rc;
946
947         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
948         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
949
950         if (spec == NULL)
951                 return 0;
952
953         if (mask->protocol == supp_mask.protocol) {
954                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
955                         rte_flow_error_set(error, EINVAL,
956                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
957                                 "GENEVE encap. protocol must be Ethernet "
958                                 "(0x6558) in the GENEVE pattern item");
959                         return -rte_errno;
960                 }
961         } else if (mask->protocol != 0) {
962                 rte_flow_error_set(error, EINVAL,
963                         RTE_FLOW_ERROR_TYPE_ITEM, item,
964                         "Unsupported mask for GENEVE encap. protocol");
965                 return -rte_errno;
966         }
967
968         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
969                                                mask->vni, item, error);
970
971         return rc;
972 }
973
974 /**
975  * Convert NVGRE item to EFX filter specification.
976  *
977  * @param item[in]
978  *   Item specification. Only virtual subnet ID field is supported.
979  *   If the mask is NULL, default mask will be used.
980  *   Ranging is not supported.
981  * @param efx_spec[in, out]
982  *   EFX filter specification to update.
983  * @param[out] error
984  *   Perform verbose error reporting if not NULL.
985  */
986 static int
987 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
988                      efx_filter_spec_t *efx_spec,
989                      struct rte_flow_error *error)
990 {
991         int rc;
992         const struct rte_flow_item_nvgre *spec = NULL;
993         const struct rte_flow_item_nvgre *mask = NULL;
994         const struct rte_flow_item_nvgre supp_mask = {
995                 .tni = { 0xff, 0xff, 0xff }
996         };
997
998         rc = sfc_flow_parse_init(item,
999                                  (const void **)&spec,
1000                                  (const void **)&mask,
1001                                  &supp_mask,
1002                                  &rte_flow_item_nvgre_mask,
1003                                  sizeof(struct rte_flow_item_nvgre),
1004                                  error);
1005         if (rc != 0)
1006                 return rc;
1007
1008         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1009                                                      EFX_IPPROTO_GRE, error);
1010         if (rc != 0)
1011                 return rc;
1012
1013         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1014         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1015
1016         if (spec == NULL)
1017                 return 0;
1018
1019         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1020                                                mask->tni, item, error);
1021
1022         return rc;
1023 }
1024
1025 static const struct sfc_flow_item sfc_flow_items[] = {
1026         {
1027                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1028                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1029                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1030                 .parse = sfc_flow_parse_void,
1031         },
1032         {
1033                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1034                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1035                 .layer = SFC_FLOW_ITEM_L2,
1036                 .parse = sfc_flow_parse_eth,
1037         },
1038         {
1039                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1040                 .prev_layer = SFC_FLOW_ITEM_L2,
1041                 .layer = SFC_FLOW_ITEM_L2,
1042                 .parse = sfc_flow_parse_vlan,
1043         },
1044         {
1045                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1046                 .prev_layer = SFC_FLOW_ITEM_L2,
1047                 .layer = SFC_FLOW_ITEM_L3,
1048                 .parse = sfc_flow_parse_ipv4,
1049         },
1050         {
1051                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1052                 .prev_layer = SFC_FLOW_ITEM_L2,
1053                 .layer = SFC_FLOW_ITEM_L3,
1054                 .parse = sfc_flow_parse_ipv6,
1055         },
1056         {
1057                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1058                 .prev_layer = SFC_FLOW_ITEM_L3,
1059                 .layer = SFC_FLOW_ITEM_L4,
1060                 .parse = sfc_flow_parse_tcp,
1061         },
1062         {
1063                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1064                 .prev_layer = SFC_FLOW_ITEM_L3,
1065                 .layer = SFC_FLOW_ITEM_L4,
1066                 .parse = sfc_flow_parse_udp,
1067         },
1068         {
1069                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1070                 .prev_layer = SFC_FLOW_ITEM_L4,
1071                 .layer = SFC_FLOW_ITEM_START_LAYER,
1072                 .parse = sfc_flow_parse_vxlan,
1073         },
1074         {
1075                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1076                 .prev_layer = SFC_FLOW_ITEM_L4,
1077                 .layer = SFC_FLOW_ITEM_START_LAYER,
1078                 .parse = sfc_flow_parse_geneve,
1079         },
1080         {
1081                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1082                 .prev_layer = SFC_FLOW_ITEM_L3,
1083                 .layer = SFC_FLOW_ITEM_START_LAYER,
1084                 .parse = sfc_flow_parse_nvgre,
1085         },
1086 };
1087
1088 /*
1089  * Protocol-independent flow API support
1090  */
1091 static int
1092 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1093                     struct rte_flow *flow,
1094                     struct rte_flow_error *error)
1095 {
1096         struct sfc_flow_spec *spec = &flow->spec;
1097         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1098
1099         if (attr == NULL) {
1100                 rte_flow_error_set(error, EINVAL,
1101                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1102                                    "NULL attribute");
1103                 return -rte_errno;
1104         }
1105         if (attr->group != 0) {
1106                 rte_flow_error_set(error, ENOTSUP,
1107                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1108                                    "Groups are not supported");
1109                 return -rte_errno;
1110         }
1111         if (attr->priority != 0) {
1112                 rte_flow_error_set(error, ENOTSUP,
1113                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1114                                    "Priorities are not supported");
1115                 return -rte_errno;
1116         }
1117         if (attr->egress != 0) {
1118                 rte_flow_error_set(error, ENOTSUP,
1119                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1120                                    "Egress is not supported");
1121                 return -rte_errno;
1122         }
1123         if (attr->transfer != 0) {
1124                 rte_flow_error_set(error, ENOTSUP,
1125                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1126                                    "Transfer is not supported");
1127                 return -rte_errno;
1128         }
1129         if (attr->ingress == 0) {
1130                 rte_flow_error_set(error, ENOTSUP,
1131                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1132                                    "Only ingress is supported");
1133                 return -rte_errno;
1134         }
1135
1136         spec->type = SFC_FLOW_SPEC_FILTER;
1137         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1138         spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1139
1140         return 0;
1141 }
1142
1143 /* Get item from array sfc_flow_items */
1144 static const struct sfc_flow_item *
1145 sfc_flow_get_item(enum rte_flow_item_type type)
1146 {
1147         unsigned int i;
1148
1149         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1150                 if (sfc_flow_items[i].type == type)
1151                         return &sfc_flow_items[i];
1152
1153         return NULL;
1154 }
1155
1156 static int
1157 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1158                        struct rte_flow *flow,
1159                        struct rte_flow_error *error)
1160 {
1161         int rc;
1162         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1163         boolean_t is_ifrm = B_FALSE;
1164         const struct sfc_flow_item *item;
1165         struct sfc_flow_spec *spec = &flow->spec;
1166         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1167
1168         if (pattern == NULL) {
1169                 rte_flow_error_set(error, EINVAL,
1170                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1171                                    "NULL pattern");
1172                 return -rte_errno;
1173         }
1174
1175         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1176                 item = sfc_flow_get_item(pattern->type);
1177                 if (item == NULL) {
1178                         rte_flow_error_set(error, ENOTSUP,
1179                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1180                                            "Unsupported pattern item");
1181                         return -rte_errno;
1182                 }
1183
1184                 /*
1185                  * Omitting one or several protocol layers at the beginning
1186                  * of pattern is supported
1187                  */
1188                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1189                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1190                     item->prev_layer != prev_layer) {
1191                         rte_flow_error_set(error, ENOTSUP,
1192                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1193                                            "Unexpected sequence of pattern items");
1194                         return -rte_errno;
1195                 }
1196
1197                 /*
1198                  * Allow only VOID and ETH pattern items in the inner frame.
1199                  * Also check that there is only one tunneling protocol.
1200                  */
1201                 switch (item->type) {
1202                 case RTE_FLOW_ITEM_TYPE_VOID:
1203                 case RTE_FLOW_ITEM_TYPE_ETH:
1204                         break;
1205
1206                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1207                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1208                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1209                         if (is_ifrm) {
1210                                 rte_flow_error_set(error, EINVAL,
1211                                         RTE_FLOW_ERROR_TYPE_ITEM,
1212                                         pattern,
1213                                         "More than one tunneling protocol");
1214                                 return -rte_errno;
1215                         }
1216                         is_ifrm = B_TRUE;
1217                         break;
1218
1219                 default:
1220                         if (is_ifrm) {
1221                                 rte_flow_error_set(error, EINVAL,
1222                                         RTE_FLOW_ERROR_TYPE_ITEM,
1223                                         pattern,
1224                                         "There is an unsupported pattern item "
1225                                         "in the inner frame");
1226                                 return -rte_errno;
1227                         }
1228                         break;
1229                 }
1230
1231                 rc = item->parse(pattern, &spec_filter->template, error);
1232                 if (rc != 0)
1233                         return rc;
1234
1235                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1236                         prev_layer = item->layer;
1237         }
1238
1239         return 0;
1240 }
1241
1242 static int
1243 sfc_flow_parse_queue(struct sfc_adapter *sa,
1244                      const struct rte_flow_action_queue *queue,
1245                      struct rte_flow *flow)
1246 {
1247         struct sfc_flow_spec *spec = &flow->spec;
1248         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1249         struct sfc_rxq *rxq;
1250
1251         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1252                 return -EINVAL;
1253
1254         rxq = &sa->rxq_ctrl[queue->index];
1255         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1256
1257         return 0;
1258 }
1259
1260 static int
1261 sfc_flow_parse_rss(struct sfc_adapter *sa,
1262                    const struct rte_flow_action_rss *action_rss,
1263                    struct rte_flow *flow)
1264 {
1265         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1266         struct sfc_rss *rss = &sas->rss;
1267         unsigned int rxq_sw_index;
1268         struct sfc_rxq *rxq;
1269         unsigned int rxq_hw_index_min;
1270         unsigned int rxq_hw_index_max;
1271         efx_rx_hash_type_t efx_hash_types;
1272         const uint8_t *rss_key;
1273         struct sfc_flow_spec *spec = &flow->spec;
1274         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1275         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1276         unsigned int i;
1277
1278         if (action_rss->queue_num == 0)
1279                 return -EINVAL;
1280
1281         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1282         rxq = &sa->rxq_ctrl[rxq_sw_index];
1283         rxq_hw_index_min = rxq->hw_index;
1284         rxq_hw_index_max = 0;
1285
1286         for (i = 0; i < action_rss->queue_num; ++i) {
1287                 rxq_sw_index = action_rss->queue[i];
1288
1289                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1290                         return -EINVAL;
1291
1292                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1293
1294                 if (rxq->hw_index < rxq_hw_index_min)
1295                         rxq_hw_index_min = rxq->hw_index;
1296
1297                 if (rxq->hw_index > rxq_hw_index_max)
1298                         rxq_hw_index_max = rxq->hw_index;
1299         }
1300
1301         switch (action_rss->func) {
1302         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1303         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1304                 break;
1305         default:
1306                 return -EINVAL;
1307         }
1308
1309         if (action_rss->level)
1310                 return -EINVAL;
1311
1312         /*
1313          * Dummy RSS action with only one queue and no specific settings
1314          * for hash types and key does not require dedicated RSS context
1315          * and may be simplified to single queue action.
1316          */
1317         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1318             action_rss->key_len == 0) {
1319                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1320                 return 0;
1321         }
1322
1323         if (action_rss->types) {
1324                 int rc;
1325
1326                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1327                                           &efx_hash_types);
1328                 if (rc != 0)
1329                         return -rc;
1330         } else {
1331                 unsigned int i;
1332
1333                 efx_hash_types = 0;
1334                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1335                         efx_hash_types |= rss->hf_map[i].efx;
1336         }
1337
1338         if (action_rss->key_len) {
1339                 if (action_rss->key_len != sizeof(rss->key))
1340                         return -EINVAL;
1341
1342                 rss_key = action_rss->key;
1343         } else {
1344                 rss_key = rss->key;
1345         }
1346
1347         spec_filter->rss = B_TRUE;
1348
1349         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1350         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1351         sfc_rss_conf->rss_hash_types = efx_hash_types;
1352         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1353
1354         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1355                 unsigned int nb_queues = action_rss->queue_num;
1356                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1357                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1358
1359                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1360         }
1361
1362         return 0;
1363 }
1364
1365 static int
1366 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1367                     unsigned int filters_count)
1368 {
1369         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1370         unsigned int i;
1371         int ret = 0;
1372
1373         for (i = 0; i < filters_count; i++) {
1374                 int rc;
1375
1376                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1377                 if (ret == 0 && rc != 0) {
1378                         sfc_err(sa, "failed to remove filter specification "
1379                                 "(rc = %d)", rc);
1380                         ret = rc;
1381                 }
1382         }
1383
1384         return ret;
1385 }
1386
1387 static int
1388 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1389 {
1390         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1391         unsigned int i;
1392         int rc = 0;
1393
1394         for (i = 0; i < spec_filter->count; i++) {
1395                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1396                 if (rc != 0) {
1397                         sfc_flow_spec_flush(sa, spec, i);
1398                         break;
1399                 }
1400         }
1401
1402         return rc;
1403 }
1404
1405 static int
1406 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1407 {
1408         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1409
1410         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1411 }
1412
1413 static int
1414 sfc_flow_filter_insert(struct sfc_adapter *sa,
1415                        struct rte_flow *flow)
1416 {
1417         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1418         struct sfc_rss *rss = &sas->rss;
1419         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1420         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1421         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1422         unsigned int i;
1423         int rc = 0;
1424
1425         if (spec_filter->rss) {
1426                 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1427                                               flow_rss->rxq_hw_index_min + 1,
1428                                               EFX_MAXRSS);
1429
1430                 rc = efx_rx_scale_context_alloc(sa->nic,
1431                                                 EFX_RX_SCALE_EXCLUSIVE,
1432                                                 rss_spread,
1433                                                 &efs_rss_context);
1434                 if (rc != 0)
1435                         goto fail_scale_context_alloc;
1436
1437                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1438                                            rss->hash_alg,
1439                                            flow_rss->rss_hash_types, B_TRUE);
1440                 if (rc != 0)
1441                         goto fail_scale_mode_set;
1442
1443                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1444                                           flow_rss->rss_key,
1445                                           sizeof(rss->key));
1446                 if (rc != 0)
1447                         goto fail_scale_key_set;
1448
1449                 /*
1450                  * At this point, fully elaborated filter specifications
1451                  * have been produced from the template. To make sure that
1452                  * RSS behaviour is consistent between them, set the same
1453                  * RSS context value everywhere.
1454                  */
1455                 for (i = 0; i < spec_filter->count; i++) {
1456                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1457
1458                         spec->efs_rss_context = efs_rss_context;
1459                         spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1460                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1461                 }
1462         }
1463
1464         rc = sfc_flow_spec_insert(sa, &flow->spec);
1465         if (rc != 0)
1466                 goto fail_filter_insert;
1467
1468         if (spec_filter->rss) {
1469                 /*
1470                  * Scale table is set after filter insertion because
1471                  * the table entries are relative to the base RxQ ID
1472                  * and the latter is submitted to the HW by means of
1473                  * inserting a filter, so by the time of the request
1474                  * the HW knows all the information needed to verify
1475                  * the table entries, and the operation will succeed
1476                  */
1477                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1478                                           flow_rss->rss_tbl,
1479                                           RTE_DIM(flow_rss->rss_tbl));
1480                 if (rc != 0)
1481                         goto fail_scale_tbl_set;
1482         }
1483
1484         return 0;
1485
1486 fail_scale_tbl_set:
1487         sfc_flow_spec_remove(sa, &flow->spec);
1488
1489 fail_filter_insert:
1490 fail_scale_key_set:
1491 fail_scale_mode_set:
1492         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1493                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1494
1495 fail_scale_context_alloc:
1496         return rc;
1497 }
1498
1499 static int
1500 sfc_flow_filter_remove(struct sfc_adapter *sa,
1501                        struct rte_flow *flow)
1502 {
1503         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1504         int rc = 0;
1505
1506         rc = sfc_flow_spec_remove(sa, &flow->spec);
1507         if (rc != 0)
1508                 return rc;
1509
1510         if (spec_filter->rss) {
1511                 /*
1512                  * All specifications for a given flow rule have the same RSS
1513                  * context, so that RSS context value is taken from the first
1514                  * filter specification
1515                  */
1516                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1517
1518                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1519         }
1520
1521         return rc;
1522 }
1523
1524 static int
1525 sfc_flow_parse_mark(struct sfc_adapter *sa,
1526                     const struct rte_flow_action_mark *mark,
1527                     struct rte_flow *flow)
1528 {
1529         struct sfc_flow_spec *spec = &flow->spec;
1530         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1531         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1532
1533         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1534                 return EINVAL;
1535
1536         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1537         spec_filter->template.efs_mark = mark->id;
1538
1539         return 0;
1540 }
1541
1542 static int
1543 sfc_flow_parse_actions(struct sfc_adapter *sa,
1544                        const struct rte_flow_action actions[],
1545                        struct rte_flow *flow,
1546                        struct rte_flow_error *error)
1547 {
1548         int rc;
1549         struct sfc_flow_spec *spec = &flow->spec;
1550         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1551         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1552         uint32_t actions_set = 0;
1553         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1554                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1555                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1556         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1557                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1558
1559         if (actions == NULL) {
1560                 rte_flow_error_set(error, EINVAL,
1561                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1562                                    "NULL actions");
1563                 return -rte_errno;
1564         }
1565
1566 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1567         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1568
1569         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1570                 switch (actions->type) {
1571                 case RTE_FLOW_ACTION_TYPE_VOID:
1572                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1573                                                actions_set);
1574                         break;
1575
1576                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1577                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1578                                                actions_set);
1579                         if ((actions_set & fate_actions_mask) != 0)
1580                                 goto fail_fate_actions;
1581
1582                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1583                         if (rc != 0) {
1584                                 rte_flow_error_set(error, EINVAL,
1585                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1586                                         "Bad QUEUE action");
1587                                 return -rte_errno;
1588                         }
1589                         break;
1590
1591                 case RTE_FLOW_ACTION_TYPE_RSS:
1592                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1593                                                actions_set);
1594                         if ((actions_set & fate_actions_mask) != 0)
1595                                 goto fail_fate_actions;
1596
1597                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1598                         if (rc != 0) {
1599                                 rte_flow_error_set(error, -rc,
1600                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1601                                         "Bad RSS action");
1602                                 return -rte_errno;
1603                         }
1604                         break;
1605
1606                 case RTE_FLOW_ACTION_TYPE_DROP:
1607                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1608                                                actions_set);
1609                         if ((actions_set & fate_actions_mask) != 0)
1610                                 goto fail_fate_actions;
1611
1612                         spec_filter->template.efs_dmaq_id =
1613                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1614                         break;
1615
1616                 case RTE_FLOW_ACTION_TYPE_FLAG:
1617                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1618                                                actions_set);
1619                         if ((actions_set & mark_actions_mask) != 0)
1620                                 goto fail_actions_overlap;
1621
1622                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1623                                 rte_flow_error_set(error, ENOTSUP,
1624                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1625                                         "FLAG action is not supported on the current Rx datapath");
1626                                 return -rte_errno;
1627                         }
1628
1629                         spec_filter->template.efs_flags |=
1630                                 EFX_FILTER_FLAG_ACTION_FLAG;
1631                         break;
1632
1633                 case RTE_FLOW_ACTION_TYPE_MARK:
1634                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1635                                                actions_set);
1636                         if ((actions_set & mark_actions_mask) != 0)
1637                                 goto fail_actions_overlap;
1638
1639                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1640                                 rte_flow_error_set(error, ENOTSUP,
1641                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1642                                         "MARK action is not supported on the current Rx datapath");
1643                                 return -rte_errno;
1644                         }
1645
1646                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1647                         if (rc != 0) {
1648                                 rte_flow_error_set(error, rc,
1649                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1650                                         "Bad MARK action");
1651                                 return -rte_errno;
1652                         }
1653                         break;
1654
1655                 default:
1656                         rte_flow_error_set(error, ENOTSUP,
1657                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1658                                            "Action is not supported");
1659                         return -rte_errno;
1660                 }
1661
1662                 actions_set |= (1UL << actions->type);
1663         }
1664 #undef SFC_BUILD_SET_OVERFLOW
1665
1666         /* When fate is unknown, drop traffic. */
1667         if ((actions_set & fate_actions_mask) == 0) {
1668                 spec_filter->template.efs_dmaq_id =
1669                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1670         }
1671
1672         return 0;
1673
1674 fail_fate_actions:
1675         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1676                            "Cannot combine several fate-deciding actions, "
1677                            "choose between QUEUE, RSS or DROP");
1678         return -rte_errno;
1679
1680 fail_actions_overlap:
1681         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1682                            "Overlapping actions are not supported");
1683         return -rte_errno;
1684 }
1685
1686 /**
1687  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1688  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1689  * specifications after copying.
1690  *
1691  * @param spec[in, out]
1692  *   SFC flow specification to update.
1693  * @param filters_count_for_one_val[in]
1694  *   How many specifications should have the same match flag, what is the
1695  *   number of specifications before copying.
1696  * @param error[out]
1697  *   Perform verbose error reporting if not NULL.
1698  */
1699 static int
1700 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1701                                unsigned int filters_count_for_one_val,
1702                                struct rte_flow_error *error)
1703 {
1704         unsigned int i;
1705         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1706         static const efx_filter_match_flags_t vals[] = {
1707                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1708                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1709         };
1710
1711         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1712                 rte_flow_error_set(error, EINVAL,
1713                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1714                         "Number of specifications is incorrect while copying "
1715                         "by unknown destination flags");
1716                 return -rte_errno;
1717         }
1718
1719         for (i = 0; i < spec_filter->count; i++) {
1720                 /* The check above ensures that divisor can't be zero here */
1721                 spec_filter->filters[i].efs_match_flags |=
1722                         vals[i / filters_count_for_one_val];
1723         }
1724
1725         return 0;
1726 }
1727
1728 /**
1729  * Check that the following conditions are met:
1730  * - the list of supported filters has a filter
1731  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1732  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1733  *   be inserted.
1734  *
1735  * @param match[in]
1736  *   The match flags of filter.
1737  * @param spec[in]
1738  *   Specification to be supplemented.
1739  * @param filter[in]
1740  *   SFC filter with list of supported filters.
1741  */
1742 static boolean_t
1743 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1744                                  __rte_unused efx_filter_spec_t *spec,
1745                                  struct sfc_filter *filter)
1746 {
1747         unsigned int i;
1748         efx_filter_match_flags_t match_mcast_dst;
1749
1750         match_mcast_dst =
1751                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1752                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1753         for (i = 0; i < filter->supported_match_num; i++) {
1754                 if (match_mcast_dst == filter->supported_match[i])
1755                         return B_TRUE;
1756         }
1757
1758         return B_FALSE;
1759 }
1760
1761 /**
1762  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1763  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1764  * specifications after copying.
1765  *
1766  * @param spec[in, out]
1767  *   SFC flow specification to update.
1768  * @param filters_count_for_one_val[in]
1769  *   How many specifications should have the same EtherType value, what is the
1770  *   number of specifications before copying.
1771  * @param error[out]
1772  *   Perform verbose error reporting if not NULL.
1773  */
1774 static int
1775 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1776                         unsigned int filters_count_for_one_val,
1777                         struct rte_flow_error *error)
1778 {
1779         unsigned int i;
1780         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1781         static const uint16_t vals[] = {
1782                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1783         };
1784
1785         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1786                 rte_flow_error_set(error, EINVAL,
1787                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1788                         "Number of specifications is incorrect "
1789                         "while copying by Ethertype");
1790                 return -rte_errno;
1791         }
1792
1793         for (i = 0; i < spec_filter->count; i++) {
1794                 spec_filter->filters[i].efs_match_flags |=
1795                         EFX_FILTER_MATCH_ETHER_TYPE;
1796
1797                 /*
1798                  * The check above ensures that
1799                  * filters_count_for_one_val is not 0
1800                  */
1801                 spec_filter->filters[i].efs_ether_type =
1802                         vals[i / filters_count_for_one_val];
1803         }
1804
1805         return 0;
1806 }
1807
1808 /**
1809  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1810  * in the same specifications after copying.
1811  *
1812  * @param spec[in, out]
1813  *   SFC flow specification to update.
1814  * @param filters_count_for_one_val[in]
1815  *   How many specifications should have the same match flag, what is the
1816  *   number of specifications before copying.
1817  * @param error[out]
1818  *   Perform verbose error reporting if not NULL.
1819  */
1820 static int
1821 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1822                             unsigned int filters_count_for_one_val,
1823                             struct rte_flow_error *error)
1824 {
1825         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1826         unsigned int i;
1827
1828         if (filters_count_for_one_val != spec_filter->count) {
1829                 rte_flow_error_set(error, EINVAL,
1830                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1831                         "Number of specifications is incorrect "
1832                         "while copying by outer VLAN ID");
1833                 return -rte_errno;
1834         }
1835
1836         for (i = 0; i < spec_filter->count; i++) {
1837                 spec_filter->filters[i].efs_match_flags |=
1838                         EFX_FILTER_MATCH_OUTER_VID;
1839
1840                 spec_filter->filters[i].efs_outer_vid = 0;
1841         }
1842
1843         return 0;
1844 }
1845
1846 /**
1847  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1848  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1849  * specifications after copying.
1850  *
1851  * @param spec[in, out]
1852  *   SFC flow specification to update.
1853  * @param filters_count_for_one_val[in]
1854  *   How many specifications should have the same match flag, what is the
1855  *   number of specifications before copying.
1856  * @param error[out]
1857  *   Perform verbose error reporting if not NULL.
1858  */
1859 static int
1860 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1861                                     unsigned int filters_count_for_one_val,
1862                                     struct rte_flow_error *error)
1863 {
1864         unsigned int i;
1865         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1866         static const efx_filter_match_flags_t vals[] = {
1867                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1868                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1869         };
1870
1871         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1872                 rte_flow_error_set(error, EINVAL,
1873                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1874                         "Number of specifications is incorrect while copying "
1875                         "by inner frame unknown destination flags");
1876                 return -rte_errno;
1877         }
1878
1879         for (i = 0; i < spec_filter->count; i++) {
1880                 /* The check above ensures that divisor can't be zero here */
1881                 spec_filter->filters[i].efs_match_flags |=
1882                         vals[i / filters_count_for_one_val];
1883         }
1884
1885         return 0;
1886 }
1887
1888 /**
1889  * Check that the following conditions are met:
1890  * - the specification corresponds to a filter for encapsulated traffic
1891  * - the list of supported filters has a filter
1892  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1893  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1894  *   be inserted.
1895  *
1896  * @param match[in]
1897  *   The match flags of filter.
1898  * @param spec[in]
1899  *   Specification to be supplemented.
1900  * @param filter[in]
1901  *   SFC filter with list of supported filters.
1902  */
1903 static boolean_t
1904 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1905                                       efx_filter_spec_t *spec,
1906                                       struct sfc_filter *filter)
1907 {
1908         unsigned int i;
1909         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1910         efx_filter_match_flags_t match_mcast_dst;
1911
1912         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1913                 return B_FALSE;
1914
1915         match_mcast_dst =
1916                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1917                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1918         for (i = 0; i < filter->supported_match_num; i++) {
1919                 if (match_mcast_dst == filter->supported_match[i])
1920                         return B_TRUE;
1921         }
1922
1923         return B_FALSE;
1924 }
1925
1926 /**
1927  * Check that the list of supported filters has a filter that differs
1928  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1929  * in this case that filter will be used and the flag
1930  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1931  *
1932  * @param match[in]
1933  *   The match flags of filter.
1934  * @param spec[in]
1935  *   Specification to be supplemented.
1936  * @param filter[in]
1937  *   SFC filter with list of supported filters.
1938  */
1939 static boolean_t
1940 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1941                               __rte_unused efx_filter_spec_t *spec,
1942                               struct sfc_filter *filter)
1943 {
1944         unsigned int i;
1945         efx_filter_match_flags_t match_without_vid =
1946                 match & ~EFX_FILTER_MATCH_OUTER_VID;
1947
1948         for (i = 0; i < filter->supported_match_num; i++) {
1949                 if (match_without_vid == filter->supported_match[i])
1950                         return B_FALSE;
1951         }
1952
1953         return B_TRUE;
1954 }
1955
1956 /*
1957  * Match flags that can be automatically added to filters.
1958  * Selecting the last minimum when searching for the copy flag ensures that the
1959  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1960  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1961  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1962  * filters.
1963  */
1964 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1965         {
1966                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1967                 .vals_count = 2,
1968                 .set_vals = sfc_flow_set_unknown_dst_flags,
1969                 .spec_check = sfc_flow_check_unknown_dst_flags,
1970         },
1971         {
1972                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1973                 .vals_count = 2,
1974                 .set_vals = sfc_flow_set_ethertypes,
1975                 .spec_check = NULL,
1976         },
1977         {
1978                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1979                 .vals_count = 2,
1980                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1981                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1982         },
1983         {
1984                 .flag = EFX_FILTER_MATCH_OUTER_VID,
1985                 .vals_count = 1,
1986                 .set_vals = sfc_flow_set_outer_vid_flag,
1987                 .spec_check = sfc_flow_check_outer_vid_flag,
1988         },
1989 };
1990
1991 /* Get item from array sfc_flow_copy_flags */
1992 static const struct sfc_flow_copy_flag *
1993 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1994 {
1995         unsigned int i;
1996
1997         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1998                 if (sfc_flow_copy_flags[i].flag == flag)
1999                         return &sfc_flow_copy_flags[i];
2000         }
2001
2002         return NULL;
2003 }
2004
2005 /**
2006  * Make copies of the specifications, set match flag and values
2007  * of the field that corresponds to it.
2008  *
2009  * @param spec[in, out]
2010  *   SFC flow specification to update.
2011  * @param flag[in]
2012  *   The match flag to add.
2013  * @param error[out]
2014  *   Perform verbose error reporting if not NULL.
2015  */
2016 static int
2017 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2018                              efx_filter_match_flags_t flag,
2019                              struct rte_flow_error *error)
2020 {
2021         unsigned int i;
2022         unsigned int new_filters_count;
2023         unsigned int filters_count_for_one_val;
2024         const struct sfc_flow_copy_flag *copy_flag;
2025         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2026         int rc;
2027
2028         copy_flag = sfc_flow_get_copy_flag(flag);
2029         if (copy_flag == NULL) {
2030                 rte_flow_error_set(error, ENOTSUP,
2031                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2032                                    "Unsupported spec field for copying");
2033                 return -rte_errno;
2034         }
2035
2036         new_filters_count = spec_filter->count * copy_flag->vals_count;
2037         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2038                 rte_flow_error_set(error, EINVAL,
2039                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2040                         "Too much EFX specifications in the flow rule");
2041                 return -rte_errno;
2042         }
2043
2044         /* Copy filters specifications */
2045         for (i = spec_filter->count; i < new_filters_count; i++) {
2046                 spec_filter->filters[i] =
2047                         spec_filter->filters[i - spec_filter->count];
2048         }
2049
2050         filters_count_for_one_val = spec_filter->count;
2051         spec_filter->count = new_filters_count;
2052
2053         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2054         if (rc != 0)
2055                 return rc;
2056
2057         return 0;
2058 }
2059
2060 /**
2061  * Check that the given set of match flags missing in the original filter spec
2062  * could be covered by adding spec copies which specify the corresponding
2063  * flags and packet field values to match.
2064  *
2065  * @param miss_flags[in]
2066  *   Flags that are missing until the supported filter.
2067  * @param spec[in]
2068  *   Specification to be supplemented.
2069  * @param filter[in]
2070  *   SFC filter.
2071  *
2072  * @return
2073  *   Number of specifications after copy or 0, if the flags can not be added.
2074  */
2075 static unsigned int
2076 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2077                              efx_filter_spec_t *spec,
2078                              struct sfc_filter *filter)
2079 {
2080         unsigned int i;
2081         efx_filter_match_flags_t copy_flags = 0;
2082         efx_filter_match_flags_t flag;
2083         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2084         sfc_flow_spec_check *check;
2085         unsigned int multiplier = 1;
2086
2087         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2088                 flag = sfc_flow_copy_flags[i].flag;
2089                 check = sfc_flow_copy_flags[i].spec_check;
2090                 if ((flag & miss_flags) == flag) {
2091                         if (check != NULL && (!check(match, spec, filter)))
2092                                 continue;
2093
2094                         copy_flags |= flag;
2095                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2096                 }
2097         }
2098
2099         if (copy_flags == miss_flags)
2100                 return multiplier;
2101
2102         return 0;
2103 }
2104
2105 /**
2106  * Attempt to supplement the specification template to the minimally
2107  * supported set of match flags. To do this, it is necessary to copy
2108  * the specifications, filling them with the values of fields that
2109  * correspond to the missing flags.
2110  * The necessary and sufficient filter is built from the fewest number
2111  * of copies which could be made to cover the minimally required set
2112  * of flags.
2113  *
2114  * @param sa[in]
2115  *   SFC adapter.
2116  * @param spec[in, out]
2117  *   SFC flow specification to update.
2118  * @param error[out]
2119  *   Perform verbose error reporting if not NULL.
2120  */
2121 static int
2122 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2123                                struct sfc_flow_spec *spec,
2124                                struct rte_flow_error *error)
2125 {
2126         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2127         struct sfc_filter *filter = &sa->filter;
2128         efx_filter_match_flags_t miss_flags;
2129         efx_filter_match_flags_t min_miss_flags = 0;
2130         efx_filter_match_flags_t match;
2131         unsigned int min_multiplier = UINT_MAX;
2132         unsigned int multiplier;
2133         unsigned int i;
2134         int rc;
2135
2136         match = spec_filter->template.efs_match_flags;
2137         for (i = 0; i < filter->supported_match_num; i++) {
2138                 if ((match & filter->supported_match[i]) == match) {
2139                         miss_flags = filter->supported_match[i] & (~match);
2140                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2141                                 &spec_filter->template, filter);
2142                         if (multiplier > 0) {
2143                                 if (multiplier <= min_multiplier) {
2144                                         min_multiplier = multiplier;
2145                                         min_miss_flags = miss_flags;
2146                                 }
2147                         }
2148                 }
2149         }
2150
2151         if (min_multiplier == UINT_MAX) {
2152                 rte_flow_error_set(error, ENOTSUP,
2153                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2154                                    "The flow rule pattern is unsupported");
2155                 return -rte_errno;
2156         }
2157
2158         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2159                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2160
2161                 if ((flag & min_miss_flags) == flag) {
2162                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2163                         if (rc != 0)
2164                                 return rc;
2165                 }
2166         }
2167
2168         return 0;
2169 }
2170
2171 /**
2172  * Check that set of match flags is referred to by a filter. Filter is
2173  * described by match flags with the ability to add OUTER_VID and INNER_VID
2174  * flags.
2175  *
2176  * @param match_flags[in]
2177  *   Set of match flags.
2178  * @param flags_pattern[in]
2179  *   Pattern of filter match flags.
2180  */
2181 static boolean_t
2182 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2183                             efx_filter_match_flags_t flags_pattern)
2184 {
2185         if ((match_flags & flags_pattern) != flags_pattern)
2186                 return B_FALSE;
2187
2188         switch (match_flags & ~flags_pattern) {
2189         case 0:
2190         case EFX_FILTER_MATCH_OUTER_VID:
2191         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2192                 return B_TRUE;
2193         default:
2194                 return B_FALSE;
2195         }
2196 }
2197
2198 /**
2199  * Check whether the spec maps to a hardware filter which is known to be
2200  * ineffective despite being valid.
2201  *
2202  * @param filter[in]
2203  *   SFC filter with list of supported filters.
2204  * @param spec[in]
2205  *   SFC flow specification.
2206  */
2207 static boolean_t
2208 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2209                                   struct sfc_flow_spec *spec)
2210 {
2211         unsigned int i;
2212         uint16_t ether_type;
2213         uint8_t ip_proto;
2214         efx_filter_match_flags_t match_flags;
2215         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2216
2217         for (i = 0; i < spec_filter->count; i++) {
2218                 match_flags = spec_filter->filters[i].efs_match_flags;
2219
2220                 if (sfc_flow_is_match_with_vids(match_flags,
2221                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2222                     sfc_flow_is_match_with_vids(match_flags,
2223                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2224                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2225                         ether_type = spec_filter->filters[i].efs_ether_type;
2226                         if (filter->supports_ip_proto_or_addr_filter &&
2227                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2228                              ether_type == EFX_ETHER_TYPE_IPV6))
2229                                 return B_TRUE;
2230                 } else if (sfc_flow_is_match_with_vids(match_flags,
2231                                 EFX_FILTER_MATCH_ETHER_TYPE |
2232                                 EFX_FILTER_MATCH_IP_PROTO) ||
2233                            sfc_flow_is_match_with_vids(match_flags,
2234                                 EFX_FILTER_MATCH_ETHER_TYPE |
2235                                 EFX_FILTER_MATCH_IP_PROTO |
2236                                 EFX_FILTER_MATCH_LOC_MAC)) {
2237                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2238                         if (filter->supports_rem_or_local_port_filter &&
2239                             (ip_proto == EFX_IPPROTO_TCP ||
2240                              ip_proto == EFX_IPPROTO_UDP))
2241                                 return B_TRUE;
2242                 }
2243         }
2244
2245         return B_FALSE;
2246 }
2247
2248 static int
2249 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2250                               struct rte_flow *flow,
2251                               struct rte_flow_error *error)
2252 {
2253         struct sfc_flow_spec *spec = &flow->spec;
2254         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2255         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2256         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2257         int rc;
2258
2259         /* Initialize the first filter spec with template */
2260         spec_filter->filters[0] = *spec_tmpl;
2261         spec_filter->count = 1;
2262
2263         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2264                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2265                 if (rc != 0)
2266                         return rc;
2267         }
2268
2269         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2270                 rte_flow_error_set(error, ENOTSUP,
2271                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2272                         "The flow rule pattern is unsupported");
2273                 return -rte_errno;
2274         }
2275
2276         return 0;
2277 }
2278
2279 static int
2280 sfc_flow_parse(struct rte_eth_dev *dev,
2281                const struct rte_flow_attr *attr,
2282                const struct rte_flow_item pattern[],
2283                const struct rte_flow_action actions[],
2284                struct rte_flow *flow,
2285                struct rte_flow_error *error)
2286 {
2287         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2288         int rc;
2289
2290         rc = sfc_flow_parse_attr(attr, flow, error);
2291         if (rc != 0)
2292                 goto fail_bad_value;
2293
2294         rc = sfc_flow_parse_pattern(pattern, flow, error);
2295         if (rc != 0)
2296                 goto fail_bad_value;
2297
2298         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2299         if (rc != 0)
2300                 goto fail_bad_value;
2301
2302         rc = sfc_flow_validate_match_flags(sa, flow, error);
2303         if (rc != 0)
2304                 goto fail_bad_value;
2305
2306         return 0;
2307
2308 fail_bad_value:
2309         return rc;
2310 }
2311
2312 static int
2313 sfc_flow_validate(struct rte_eth_dev *dev,
2314                   const struct rte_flow_attr *attr,
2315                   const struct rte_flow_item pattern[],
2316                   const struct rte_flow_action actions[],
2317                   struct rte_flow_error *error)
2318 {
2319         struct rte_flow flow;
2320
2321         memset(&flow, 0, sizeof(flow));
2322
2323         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2324 }
2325
2326 static struct rte_flow *
2327 sfc_flow_create(struct rte_eth_dev *dev,
2328                 const struct rte_flow_attr *attr,
2329                 const struct rte_flow_item pattern[],
2330                 const struct rte_flow_action actions[],
2331                 struct rte_flow_error *error)
2332 {
2333         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2334         struct rte_flow *flow = NULL;
2335         int rc;
2336
2337         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2338         if (flow == NULL) {
2339                 rte_flow_error_set(error, ENOMEM,
2340                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2341                                    "Failed to allocate memory");
2342                 goto fail_no_mem;
2343         }
2344
2345         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2346         if (rc != 0)
2347                 goto fail_bad_value;
2348
2349         sfc_adapter_lock(sa);
2350
2351         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2352
2353         if (sa->state == SFC_ADAPTER_STARTED) {
2354                 rc = sfc_flow_filter_insert(sa, flow);
2355                 if (rc != 0) {
2356                         rte_flow_error_set(error, rc,
2357                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2358                                 "Failed to insert filter");
2359                         goto fail_filter_insert;
2360                 }
2361         }
2362
2363         sfc_adapter_unlock(sa);
2364
2365         return flow;
2366
2367 fail_filter_insert:
2368         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2369
2370 fail_bad_value:
2371         rte_free(flow);
2372         sfc_adapter_unlock(sa);
2373
2374 fail_no_mem:
2375         return NULL;
2376 }
2377
2378 static int
2379 sfc_flow_remove(struct sfc_adapter *sa,
2380                 struct rte_flow *flow,
2381                 struct rte_flow_error *error)
2382 {
2383         int rc = 0;
2384
2385         SFC_ASSERT(sfc_adapter_is_locked(sa));
2386
2387         if (sa->state == SFC_ADAPTER_STARTED) {
2388                 rc = sfc_flow_filter_remove(sa, flow);
2389                 if (rc != 0)
2390                         rte_flow_error_set(error, rc,
2391                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2392                                 "Failed to destroy flow rule");
2393         }
2394
2395         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2396         rte_free(flow);
2397
2398         return rc;
2399 }
2400
2401 static int
2402 sfc_flow_destroy(struct rte_eth_dev *dev,
2403                  struct rte_flow *flow,
2404                  struct rte_flow_error *error)
2405 {
2406         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2407         struct rte_flow *flow_ptr;
2408         int rc = EINVAL;
2409
2410         sfc_adapter_lock(sa);
2411
2412         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2413                 if (flow_ptr == flow)
2414                         rc = 0;
2415         }
2416         if (rc != 0) {
2417                 rte_flow_error_set(error, rc,
2418                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2419                                    "Failed to find flow rule to destroy");
2420                 goto fail_bad_value;
2421         }
2422
2423         rc = sfc_flow_remove(sa, flow, error);
2424
2425 fail_bad_value:
2426         sfc_adapter_unlock(sa);
2427
2428         return -rc;
2429 }
2430
2431 static int
2432 sfc_flow_flush(struct rte_eth_dev *dev,
2433                struct rte_flow_error *error)
2434 {
2435         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2436         struct rte_flow *flow;
2437         int rc = 0;
2438         int ret = 0;
2439
2440         sfc_adapter_lock(sa);
2441
2442         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2443                 rc = sfc_flow_remove(sa, flow, error);
2444                 if (rc != 0)
2445                         ret = rc;
2446         }
2447
2448         sfc_adapter_unlock(sa);
2449
2450         return -ret;
2451 }
2452
2453 static int
2454 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2455                  struct rte_flow_error *error)
2456 {
2457         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2458         int ret = 0;
2459
2460         sfc_adapter_lock(sa);
2461         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2462                 rte_flow_error_set(error, EBUSY,
2463                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2464                                    NULL, "please close the port first");
2465                 ret = -rte_errno;
2466         } else {
2467                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2468         }
2469         sfc_adapter_unlock(sa);
2470
2471         return ret;
2472 }
2473
2474 const struct rte_flow_ops sfc_flow_ops = {
2475         .validate = sfc_flow_validate,
2476         .create = sfc_flow_create,
2477         .destroy = sfc_flow_destroy,
2478         .flush = sfc_flow_flush,
2479         .query = NULL,
2480         .isolate = sfc_flow_isolate,
2481 };
2482
2483 void
2484 sfc_flow_init(struct sfc_adapter *sa)
2485 {
2486         SFC_ASSERT(sfc_adapter_is_locked(sa));
2487
2488         TAILQ_INIT(&sa->flow_list);
2489 }
2490
2491 void
2492 sfc_flow_fini(struct sfc_adapter *sa)
2493 {
2494         struct rte_flow *flow;
2495
2496         SFC_ASSERT(sfc_adapter_is_locked(sa));
2497
2498         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2499                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2500                 rte_free(flow);
2501         }
2502 }
2503
2504 void
2505 sfc_flow_stop(struct sfc_adapter *sa)
2506 {
2507         struct rte_flow *flow;
2508
2509         SFC_ASSERT(sfc_adapter_is_locked(sa));
2510
2511         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2512                 sfc_flow_filter_remove(sa, flow);
2513 }
2514
2515 int
2516 sfc_flow_start(struct sfc_adapter *sa)
2517 {
2518         struct rte_flow *flow;
2519         int rc = 0;
2520
2521         sfc_log_init(sa, "entry");
2522
2523         SFC_ASSERT(sfc_adapter_is_locked(sa));
2524
2525         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2526                 rc = sfc_flow_filter_insert(sa, flow);
2527                 if (rc != 0)
2528                         goto fail_bad_flow;
2529         }
2530
2531         sfc_log_init(sa, "done");
2532
2533 fail_bad_flow:
2534         return rc;
2535 }