net/failsafe: fix crash on slave queue release
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_eth_ctrl.h>
15 #include <rte_ether.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18
19 #include "efx.h"
20
21 #include "sfc.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 /*
29  * At now flow API is implemented in such a manner that each
30  * flow rule is converted to one or more hardware filters.
31  * All elements of flow rule (attributes, pattern items, actions)
32  * correspond to one or more fields in the efx_filter_spec_s structure
33  * that is responsible for the hardware filter.
34  * If some required field is unset in the flow rule, then a handful
35  * of filter copies will be created to cover all possible values
36  * of such a field.
37  */
38
39 enum sfc_flow_item_layers {
40         SFC_FLOW_ITEM_ANY_LAYER,
41         SFC_FLOW_ITEM_START_LAYER,
42         SFC_FLOW_ITEM_L2,
43         SFC_FLOW_ITEM_L3,
44         SFC_FLOW_ITEM_L4,
45 };
46
47 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
48                                   efx_filter_spec_t *spec,
49                                   struct rte_flow_error *error);
50
51 struct sfc_flow_item {
52         enum rte_flow_item_type type;           /* Type of item */
53         enum sfc_flow_item_layers layer;        /* Layer of item */
54         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
55         sfc_flow_item_parse *parse;             /* Parsing function */
56 };
57
58 static sfc_flow_item_parse sfc_flow_parse_void;
59 static sfc_flow_item_parse sfc_flow_parse_eth;
60 static sfc_flow_item_parse sfc_flow_parse_vlan;
61 static sfc_flow_item_parse sfc_flow_parse_ipv4;
62 static sfc_flow_item_parse sfc_flow_parse_ipv6;
63 static sfc_flow_item_parse sfc_flow_parse_tcp;
64 static sfc_flow_item_parse sfc_flow_parse_udp;
65 static sfc_flow_item_parse sfc_flow_parse_vxlan;
66 static sfc_flow_item_parse sfc_flow_parse_geneve;
67 static sfc_flow_item_parse sfc_flow_parse_nvgre;
68
69 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
70                                      unsigned int filters_count_for_one_val,
71                                      struct rte_flow_error *error);
72
73 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
74                                         efx_filter_spec_t *spec,
75                                         struct sfc_filter *filter);
76
77 struct sfc_flow_copy_flag {
78         /* EFX filter specification match flag */
79         efx_filter_match_flags_t flag;
80         /* Number of values of corresponding field */
81         unsigned int vals_count;
82         /* Function to set values in specifications */
83         sfc_flow_spec_set_vals *set_vals;
84         /*
85          * Function to check that the specification is suitable
86          * for adding this match flag
87          */
88         sfc_flow_spec_check *spec_check;
89 };
90
91 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
92 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
93 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
94 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
95 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
96 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
97 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
98
99 static boolean_t
100 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
101 {
102         uint8_t sum = 0;
103         unsigned int i;
104
105         for (i = 0; i < size; i++)
106                 sum |= buf[i];
107
108         return (sum == 0) ? B_TRUE : B_FALSE;
109 }
110
111 /*
112  * Validate item and prepare structures spec and mask for parsing
113  */
114 static int
115 sfc_flow_parse_init(const struct rte_flow_item *item,
116                     const void **spec_ptr,
117                     const void **mask_ptr,
118                     const void *supp_mask,
119                     const void *def_mask,
120                     unsigned int size,
121                     struct rte_flow_error *error)
122 {
123         const uint8_t *spec;
124         const uint8_t *mask;
125         const uint8_t *last;
126         uint8_t supp;
127         unsigned int i;
128
129         if (item == NULL) {
130                 rte_flow_error_set(error, EINVAL,
131                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
132                                    "NULL item");
133                 return -rte_errno;
134         }
135
136         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
137                 rte_flow_error_set(error, EINVAL,
138                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
139                                    "Mask or last is set without spec");
140                 return -rte_errno;
141         }
142
143         /*
144          * If "mask" is not set, default mask is used,
145          * but if default mask is NULL, "mask" should be set
146          */
147         if (item->mask == NULL) {
148                 if (def_mask == NULL) {
149                         rte_flow_error_set(error, EINVAL,
150                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
151                                 "Mask should be specified");
152                         return -rte_errno;
153                 }
154
155                 mask = def_mask;
156         } else {
157                 mask = item->mask;
158         }
159
160         spec = item->spec;
161         last = item->last;
162
163         if (spec == NULL)
164                 goto exit;
165
166         /*
167          * If field values in "last" are either 0 or equal to the corresponding
168          * values in "spec" then they are ignored
169          */
170         if (last != NULL &&
171             !sfc_flow_is_zero(last, size) &&
172             memcmp(last, spec, size) != 0) {
173                 rte_flow_error_set(error, ENOTSUP,
174                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
175                                    "Ranging is not supported");
176                 return -rte_errno;
177         }
178
179         if (supp_mask == NULL) {
180                 rte_flow_error_set(error, EINVAL,
181                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
182                         "Supported mask for item should be specified");
183                 return -rte_errno;
184         }
185
186         /* Check that mask does not ask for more match than supp_mask */
187         for (i = 0; i < size; i++) {
188                 supp = ((const uint8_t *)supp_mask)[i];
189
190                 if (~supp & mask[i]) {
191                         rte_flow_error_set(error, ENOTSUP,
192                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
193                                            "Item's field is not supported");
194                         return -rte_errno;
195                 }
196         }
197
198 exit:
199         *spec_ptr = spec;
200         *mask_ptr = mask;
201         return 0;
202 }
203
204 /*
205  * Protocol parsers.
206  * Masking is not supported, so masks in items should be either
207  * full or empty (zeroed) and set only for supported fields which
208  * are specified in the supp_mask.
209  */
210
211 static int
212 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
213                     __rte_unused efx_filter_spec_t *efx_spec,
214                     __rte_unused struct rte_flow_error *error)
215 {
216         return 0;
217 }
218
219 /**
220  * Convert Ethernet item to EFX filter specification.
221  *
222  * @param item[in]
223  *   Item specification. Outer frame specification may only comprise
224  *   source/destination addresses and Ethertype field.
225  *   Inner frame specification may contain destination address only.
226  *   There is support for individual/group mask as well as for empty and full.
227  *   If the mask is NULL, default mask will be used. Ranging is not supported.
228  * @param efx_spec[in, out]
229  *   EFX filter specification to update.
230  * @param[out] error
231  *   Perform verbose error reporting if not NULL.
232  */
233 static int
234 sfc_flow_parse_eth(const struct rte_flow_item *item,
235                    efx_filter_spec_t *efx_spec,
236                    struct rte_flow_error *error)
237 {
238         int rc;
239         const struct rte_flow_item_eth *spec = NULL;
240         const struct rte_flow_item_eth *mask = NULL;
241         const struct rte_flow_item_eth supp_mask = {
242                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
243                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
244                 .type = 0xffff,
245         };
246         const struct rte_flow_item_eth ifrm_supp_mask = {
247                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
248         };
249         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
250                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
251         };
252         const struct rte_flow_item_eth *supp_mask_p;
253         const struct rte_flow_item_eth *def_mask_p;
254         uint8_t *loc_mac = NULL;
255         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
256                 EFX_TUNNEL_PROTOCOL_NONE);
257
258         if (is_ifrm) {
259                 supp_mask_p = &ifrm_supp_mask;
260                 def_mask_p = &ifrm_supp_mask;
261                 loc_mac = efx_spec->efs_ifrm_loc_mac;
262         } else {
263                 supp_mask_p = &supp_mask;
264                 def_mask_p = &rte_flow_item_eth_mask;
265                 loc_mac = efx_spec->efs_loc_mac;
266         }
267
268         rc = sfc_flow_parse_init(item,
269                                  (const void **)&spec,
270                                  (const void **)&mask,
271                                  supp_mask_p, def_mask_p,
272                                  sizeof(struct rte_flow_item_eth),
273                                  error);
274         if (rc != 0)
275                 return rc;
276
277         /* If "spec" is not set, could be any Ethernet */
278         if (spec == NULL)
279                 return 0;
280
281         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
282                 efx_spec->efs_match_flags |= is_ifrm ?
283                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
284                         EFX_FILTER_MATCH_LOC_MAC;
285                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
286                            EFX_MAC_ADDR_LEN);
287         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
288                           EFX_MAC_ADDR_LEN) == 0) {
289                 if (is_unicast_ether_addr(&spec->dst))
290                         efx_spec->efs_match_flags |= is_ifrm ?
291                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
292                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
293                 else
294                         efx_spec->efs_match_flags |= is_ifrm ?
295                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
296                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
297         } else if (!is_zero_ether_addr(&mask->dst)) {
298                 goto fail_bad_mask;
299         }
300
301         /*
302          * ifrm_supp_mask ensures that the source address and
303          * ethertype masks are equal to zero in inner frame,
304          * so these fields are filled in only for the outer frame
305          */
306         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
307                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
308                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
309                            EFX_MAC_ADDR_LEN);
310         } else if (!is_zero_ether_addr(&mask->src)) {
311                 goto fail_bad_mask;
312         }
313
314         /*
315          * Ether type is in big-endian byte order in item and
316          * in little-endian in efx_spec, so byte swap is used
317          */
318         if (mask->type == supp_mask.type) {
319                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
320                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
321         } else if (mask->type != 0) {
322                 goto fail_bad_mask;
323         }
324
325         return 0;
326
327 fail_bad_mask:
328         rte_flow_error_set(error, EINVAL,
329                            RTE_FLOW_ERROR_TYPE_ITEM, item,
330                            "Bad mask in the ETH pattern item");
331         return -rte_errno;
332 }
333
334 /**
335  * Convert VLAN item to EFX filter specification.
336  *
337  * @param item[in]
338  *   Item specification. Only VID field is supported.
339  *   The mask can not be NULL. Ranging is not supported.
340  * @param efx_spec[in, out]
341  *   EFX filter specification to update.
342  * @param[out] error
343  *   Perform verbose error reporting if not NULL.
344  */
345 static int
346 sfc_flow_parse_vlan(const struct rte_flow_item *item,
347                     efx_filter_spec_t *efx_spec,
348                     struct rte_flow_error *error)
349 {
350         int rc;
351         uint16_t vid;
352         const struct rte_flow_item_vlan *spec = NULL;
353         const struct rte_flow_item_vlan *mask = NULL;
354         const struct rte_flow_item_vlan supp_mask = {
355                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
356                 .inner_type = RTE_BE16(0xffff),
357         };
358
359         rc = sfc_flow_parse_init(item,
360                                  (const void **)&spec,
361                                  (const void **)&mask,
362                                  &supp_mask,
363                                  NULL,
364                                  sizeof(struct rte_flow_item_vlan),
365                                  error);
366         if (rc != 0)
367                 return rc;
368
369         /*
370          * VID is in big-endian byte order in item and
371          * in little-endian in efx_spec, so byte swap is used.
372          * If two VLAN items are included, the first matches
373          * the outer tag and the next matches the inner tag.
374          */
375         if (mask->tci == supp_mask.tci) {
376                 /* Apply mask to keep VID only */
377                 vid = rte_bswap16(spec->tci & mask->tci);
378
379                 if (!(efx_spec->efs_match_flags &
380                       EFX_FILTER_MATCH_OUTER_VID)) {
381                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
382                         efx_spec->efs_outer_vid = vid;
383                 } else if (!(efx_spec->efs_match_flags &
384                              EFX_FILTER_MATCH_INNER_VID)) {
385                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
386                         efx_spec->efs_inner_vid = vid;
387                 } else {
388                         rte_flow_error_set(error, EINVAL,
389                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
390                                            "More than two VLAN items");
391                         return -rte_errno;
392                 }
393         } else {
394                 rte_flow_error_set(error, EINVAL,
395                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
396                                    "VLAN ID in TCI match is required");
397                 return -rte_errno;
398         }
399
400         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
401                 rte_flow_error_set(error, EINVAL,
402                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
403                                    "VLAN TPID matching is not supported");
404                 return -rte_errno;
405         }
406         if (mask->inner_type == supp_mask.inner_type) {
407                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
408                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
409         } else if (mask->inner_type) {
410                 rte_flow_error_set(error, EINVAL,
411                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
412                                    "Bad mask for VLAN inner_type");
413                 return -rte_errno;
414         }
415
416         return 0;
417 }
418
419 /**
420  * Convert IPv4 item to EFX filter specification.
421  *
422  * @param item[in]
423  *   Item specification. Only source and destination addresses and
424  *   protocol fields are supported. If the mask is NULL, default
425  *   mask will be used. Ranging is not supported.
426  * @param efx_spec[in, out]
427  *   EFX filter specification to update.
428  * @param[out] error
429  *   Perform verbose error reporting if not NULL.
430  */
431 static int
432 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
433                     efx_filter_spec_t *efx_spec,
434                     struct rte_flow_error *error)
435 {
436         int rc;
437         const struct rte_flow_item_ipv4 *spec = NULL;
438         const struct rte_flow_item_ipv4 *mask = NULL;
439         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
440         const struct rte_flow_item_ipv4 supp_mask = {
441                 .hdr = {
442                         .src_addr = 0xffffffff,
443                         .dst_addr = 0xffffffff,
444                         .next_proto_id = 0xff,
445                 }
446         };
447
448         rc = sfc_flow_parse_init(item,
449                                  (const void **)&spec,
450                                  (const void **)&mask,
451                                  &supp_mask,
452                                  &rte_flow_item_ipv4_mask,
453                                  sizeof(struct rte_flow_item_ipv4),
454                                  error);
455         if (rc != 0)
456                 return rc;
457
458         /*
459          * Filtering by IPv4 source and destination addresses requires
460          * the appropriate ETHER_TYPE in hardware filters
461          */
462         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
463                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
464                 efx_spec->efs_ether_type = ether_type_ipv4;
465         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
466                 rte_flow_error_set(error, EINVAL,
467                         RTE_FLOW_ERROR_TYPE_ITEM, item,
468                         "Ethertype in pattern with IPV4 item should be appropriate");
469                 return -rte_errno;
470         }
471
472         if (spec == NULL)
473                 return 0;
474
475         /*
476          * IPv4 addresses are in big-endian byte order in item and in
477          * efx_spec
478          */
479         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
480                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
481                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
482         } else if (mask->hdr.src_addr != 0) {
483                 goto fail_bad_mask;
484         }
485
486         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
487                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
488                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
489         } else if (mask->hdr.dst_addr != 0) {
490                 goto fail_bad_mask;
491         }
492
493         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
494                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
495                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
496         } else if (mask->hdr.next_proto_id != 0) {
497                 goto fail_bad_mask;
498         }
499
500         return 0;
501
502 fail_bad_mask:
503         rte_flow_error_set(error, EINVAL,
504                            RTE_FLOW_ERROR_TYPE_ITEM, item,
505                            "Bad mask in the IPV4 pattern item");
506         return -rte_errno;
507 }
508
509 /**
510  * Convert IPv6 item to EFX filter specification.
511  *
512  * @param item[in]
513  *   Item specification. Only source and destination addresses and
514  *   next header fields are supported. If the mask is NULL, default
515  *   mask will be used. Ranging is not supported.
516  * @param efx_spec[in, out]
517  *   EFX filter specification to update.
518  * @param[out] error
519  *   Perform verbose error reporting if not NULL.
520  */
521 static int
522 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
523                     efx_filter_spec_t *efx_spec,
524                     struct rte_flow_error *error)
525 {
526         int rc;
527         const struct rte_flow_item_ipv6 *spec = NULL;
528         const struct rte_flow_item_ipv6 *mask = NULL;
529         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
530         const struct rte_flow_item_ipv6 supp_mask = {
531                 .hdr = {
532                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
533                                       0xff, 0xff, 0xff, 0xff,
534                                       0xff, 0xff, 0xff, 0xff,
535                                       0xff, 0xff, 0xff, 0xff },
536                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
537                                       0xff, 0xff, 0xff, 0xff,
538                                       0xff, 0xff, 0xff, 0xff,
539                                       0xff, 0xff, 0xff, 0xff },
540                         .proto = 0xff,
541                 }
542         };
543
544         rc = sfc_flow_parse_init(item,
545                                  (const void **)&spec,
546                                  (const void **)&mask,
547                                  &supp_mask,
548                                  &rte_flow_item_ipv6_mask,
549                                  sizeof(struct rte_flow_item_ipv6),
550                                  error);
551         if (rc != 0)
552                 return rc;
553
554         /*
555          * Filtering by IPv6 source and destination addresses requires
556          * the appropriate ETHER_TYPE in hardware filters
557          */
558         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
559                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
560                 efx_spec->efs_ether_type = ether_type_ipv6;
561         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
562                 rte_flow_error_set(error, EINVAL,
563                         RTE_FLOW_ERROR_TYPE_ITEM, item,
564                         "Ethertype in pattern with IPV6 item should be appropriate");
565                 return -rte_errno;
566         }
567
568         if (spec == NULL)
569                 return 0;
570
571         /*
572          * IPv6 addresses are in big-endian byte order in item and in
573          * efx_spec
574          */
575         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
576                    sizeof(mask->hdr.src_addr)) == 0) {
577                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
578
579                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
580                                  sizeof(spec->hdr.src_addr));
581                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
582                            sizeof(efx_spec->efs_rem_host));
583         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
584                                      sizeof(mask->hdr.src_addr))) {
585                 goto fail_bad_mask;
586         }
587
588         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
589                    sizeof(mask->hdr.dst_addr)) == 0) {
590                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
591
592                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
593                                  sizeof(spec->hdr.dst_addr));
594                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
595                            sizeof(efx_spec->efs_loc_host));
596         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
597                                      sizeof(mask->hdr.dst_addr))) {
598                 goto fail_bad_mask;
599         }
600
601         if (mask->hdr.proto == supp_mask.hdr.proto) {
602                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
603                 efx_spec->efs_ip_proto = spec->hdr.proto;
604         } else if (mask->hdr.proto != 0) {
605                 goto fail_bad_mask;
606         }
607
608         return 0;
609
610 fail_bad_mask:
611         rte_flow_error_set(error, EINVAL,
612                            RTE_FLOW_ERROR_TYPE_ITEM, item,
613                            "Bad mask in the IPV6 pattern item");
614         return -rte_errno;
615 }
616
617 /**
618  * Convert TCP item to EFX filter specification.
619  *
620  * @param item[in]
621  *   Item specification. Only source and destination ports fields
622  *   are supported. If the mask is NULL, default mask will be used.
623  *   Ranging is not supported.
624  * @param efx_spec[in, out]
625  *   EFX filter specification to update.
626  * @param[out] error
627  *   Perform verbose error reporting if not NULL.
628  */
629 static int
630 sfc_flow_parse_tcp(const struct rte_flow_item *item,
631                    efx_filter_spec_t *efx_spec,
632                    struct rte_flow_error *error)
633 {
634         int rc;
635         const struct rte_flow_item_tcp *spec = NULL;
636         const struct rte_flow_item_tcp *mask = NULL;
637         const struct rte_flow_item_tcp supp_mask = {
638                 .hdr = {
639                         .src_port = 0xffff,
640                         .dst_port = 0xffff,
641                 }
642         };
643
644         rc = sfc_flow_parse_init(item,
645                                  (const void **)&spec,
646                                  (const void **)&mask,
647                                  &supp_mask,
648                                  &rte_flow_item_tcp_mask,
649                                  sizeof(struct rte_flow_item_tcp),
650                                  error);
651         if (rc != 0)
652                 return rc;
653
654         /*
655          * Filtering by TCP source and destination ports requires
656          * the appropriate IP_PROTO in hardware filters
657          */
658         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
659                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
660                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
661         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
662                 rte_flow_error_set(error, EINVAL,
663                         RTE_FLOW_ERROR_TYPE_ITEM, item,
664                         "IP proto in pattern with TCP item should be appropriate");
665                 return -rte_errno;
666         }
667
668         if (spec == NULL)
669                 return 0;
670
671         /*
672          * Source and destination ports are in big-endian byte order in item and
673          * in little-endian in efx_spec, so byte swap is used
674          */
675         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
676                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
677                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
678         } else if (mask->hdr.src_port != 0) {
679                 goto fail_bad_mask;
680         }
681
682         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
683                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
684                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
685         } else if (mask->hdr.dst_port != 0) {
686                 goto fail_bad_mask;
687         }
688
689         return 0;
690
691 fail_bad_mask:
692         rte_flow_error_set(error, EINVAL,
693                            RTE_FLOW_ERROR_TYPE_ITEM, item,
694                            "Bad mask in the TCP pattern item");
695         return -rte_errno;
696 }
697
698 /**
699  * Convert UDP item to EFX filter specification.
700  *
701  * @param item[in]
702  *   Item specification. Only source and destination ports fields
703  *   are supported. If the mask is NULL, default mask will be used.
704  *   Ranging is not supported.
705  * @param efx_spec[in, out]
706  *   EFX filter specification to update.
707  * @param[out] error
708  *   Perform verbose error reporting if not NULL.
709  */
710 static int
711 sfc_flow_parse_udp(const struct rte_flow_item *item,
712                    efx_filter_spec_t *efx_spec,
713                    struct rte_flow_error *error)
714 {
715         int rc;
716         const struct rte_flow_item_udp *spec = NULL;
717         const struct rte_flow_item_udp *mask = NULL;
718         const struct rte_flow_item_udp supp_mask = {
719                 .hdr = {
720                         .src_port = 0xffff,
721                         .dst_port = 0xffff,
722                 }
723         };
724
725         rc = sfc_flow_parse_init(item,
726                                  (const void **)&spec,
727                                  (const void **)&mask,
728                                  &supp_mask,
729                                  &rte_flow_item_udp_mask,
730                                  sizeof(struct rte_flow_item_udp),
731                                  error);
732         if (rc != 0)
733                 return rc;
734
735         /*
736          * Filtering by UDP source and destination ports requires
737          * the appropriate IP_PROTO in hardware filters
738          */
739         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
740                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
741                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
742         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
743                 rte_flow_error_set(error, EINVAL,
744                         RTE_FLOW_ERROR_TYPE_ITEM, item,
745                         "IP proto in pattern with UDP item should be appropriate");
746                 return -rte_errno;
747         }
748
749         if (spec == NULL)
750                 return 0;
751
752         /*
753          * Source and destination ports are in big-endian byte order in item and
754          * in little-endian in efx_spec, so byte swap is used
755          */
756         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
757                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
758                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
759         } else if (mask->hdr.src_port != 0) {
760                 goto fail_bad_mask;
761         }
762
763         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
764                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
765                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
766         } else if (mask->hdr.dst_port != 0) {
767                 goto fail_bad_mask;
768         }
769
770         return 0;
771
772 fail_bad_mask:
773         rte_flow_error_set(error, EINVAL,
774                            RTE_FLOW_ERROR_TYPE_ITEM, item,
775                            "Bad mask in the UDP pattern item");
776         return -rte_errno;
777 }
778
779 /*
780  * Filters for encapsulated packets match based on the EtherType and IP
781  * protocol in the outer frame.
782  */
783 static int
784 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
785                                         efx_filter_spec_t *efx_spec,
786                                         uint8_t ip_proto,
787                                         struct rte_flow_error *error)
788 {
789         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
790                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
791                 efx_spec->efs_ip_proto = ip_proto;
792         } else if (efx_spec->efs_ip_proto != ip_proto) {
793                 switch (ip_proto) {
794                 case EFX_IPPROTO_UDP:
795                         rte_flow_error_set(error, EINVAL,
796                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
797                                 "Outer IP header protocol must be UDP "
798                                 "in VxLAN/GENEVE pattern");
799                         return -rte_errno;
800
801                 case EFX_IPPROTO_GRE:
802                         rte_flow_error_set(error, EINVAL,
803                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
804                                 "Outer IP header protocol must be GRE "
805                                 "in NVGRE pattern");
806                         return -rte_errno;
807
808                 default:
809                         rte_flow_error_set(error, EINVAL,
810                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
811                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
812                                 "are supported");
813                         return -rte_errno;
814                 }
815         }
816
817         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
818             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
819             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
820                 rte_flow_error_set(error, EINVAL,
821                         RTE_FLOW_ERROR_TYPE_ITEM, item,
822                         "Outer frame EtherType in pattern with tunneling "
823                         "must be IPv4 or IPv6");
824                 return -rte_errno;
825         }
826
827         return 0;
828 }
829
830 static int
831 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
832                                   const uint8_t *vni_or_vsid_val,
833                                   const uint8_t *vni_or_vsid_mask,
834                                   const struct rte_flow_item *item,
835                                   struct rte_flow_error *error)
836 {
837         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
838                 0xff, 0xff, 0xff
839         };
840
841         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
842                    EFX_VNI_OR_VSID_LEN) == 0) {
843                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
844                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
845                            EFX_VNI_OR_VSID_LEN);
846         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
847                 rte_flow_error_set(error, EINVAL,
848                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
849                                    "Unsupported VNI/VSID mask");
850                 return -rte_errno;
851         }
852
853         return 0;
854 }
855
856 /**
857  * Convert VXLAN item to EFX filter specification.
858  *
859  * @param item[in]
860  *   Item specification. Only VXLAN network identifier field is supported.
861  *   If the mask is NULL, default mask will be used.
862  *   Ranging is not supported.
863  * @param efx_spec[in, out]
864  *   EFX filter specification to update.
865  * @param[out] error
866  *   Perform verbose error reporting if not NULL.
867  */
868 static int
869 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
870                      efx_filter_spec_t *efx_spec,
871                      struct rte_flow_error *error)
872 {
873         int rc;
874         const struct rte_flow_item_vxlan *spec = NULL;
875         const struct rte_flow_item_vxlan *mask = NULL;
876         const struct rte_flow_item_vxlan supp_mask = {
877                 .vni = { 0xff, 0xff, 0xff }
878         };
879
880         rc = sfc_flow_parse_init(item,
881                                  (const void **)&spec,
882                                  (const void **)&mask,
883                                  &supp_mask,
884                                  &rte_flow_item_vxlan_mask,
885                                  sizeof(struct rte_flow_item_vxlan),
886                                  error);
887         if (rc != 0)
888                 return rc;
889
890         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
891                                                      EFX_IPPROTO_UDP, error);
892         if (rc != 0)
893                 return rc;
894
895         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
896         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
897
898         if (spec == NULL)
899                 return 0;
900
901         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
902                                                mask->vni, item, error);
903
904         return rc;
905 }
906
907 /**
908  * Convert GENEVE item to EFX filter specification.
909  *
910  * @param item[in]
911  *   Item specification. Only Virtual Network Identifier and protocol type
912  *   fields are supported. But protocol type can be only Ethernet (0x6558).
913  *   If the mask is NULL, default mask will be used.
914  *   Ranging is not supported.
915  * @param efx_spec[in, out]
916  *   EFX filter specification to update.
917  * @param[out] error
918  *   Perform verbose error reporting if not NULL.
919  */
920 static int
921 sfc_flow_parse_geneve(const struct rte_flow_item *item,
922                       efx_filter_spec_t *efx_spec,
923                       struct rte_flow_error *error)
924 {
925         int rc;
926         const struct rte_flow_item_geneve *spec = NULL;
927         const struct rte_flow_item_geneve *mask = NULL;
928         const struct rte_flow_item_geneve supp_mask = {
929                 .protocol = RTE_BE16(0xffff),
930                 .vni = { 0xff, 0xff, 0xff }
931         };
932
933         rc = sfc_flow_parse_init(item,
934                                  (const void **)&spec,
935                                  (const void **)&mask,
936                                  &supp_mask,
937                                  &rte_flow_item_geneve_mask,
938                                  sizeof(struct rte_flow_item_geneve),
939                                  error);
940         if (rc != 0)
941                 return rc;
942
943         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
944                                                      EFX_IPPROTO_UDP, error);
945         if (rc != 0)
946                 return rc;
947
948         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
949         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
950
951         if (spec == NULL)
952                 return 0;
953
954         if (mask->protocol == supp_mask.protocol) {
955                 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
956                         rte_flow_error_set(error, EINVAL,
957                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
958                                 "GENEVE encap. protocol must be Ethernet "
959                                 "(0x6558) in the GENEVE pattern item");
960                         return -rte_errno;
961                 }
962         } else if (mask->protocol != 0) {
963                 rte_flow_error_set(error, EINVAL,
964                         RTE_FLOW_ERROR_TYPE_ITEM, item,
965                         "Unsupported mask for GENEVE encap. protocol");
966                 return -rte_errno;
967         }
968
969         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
970                                                mask->vni, item, error);
971
972         return rc;
973 }
974
975 /**
976  * Convert NVGRE item to EFX filter specification.
977  *
978  * @param item[in]
979  *   Item specification. Only virtual subnet ID field is supported.
980  *   If the mask is NULL, default mask will be used.
981  *   Ranging is not supported.
982  * @param efx_spec[in, out]
983  *   EFX filter specification to update.
984  * @param[out] error
985  *   Perform verbose error reporting if not NULL.
986  */
987 static int
988 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
989                      efx_filter_spec_t *efx_spec,
990                      struct rte_flow_error *error)
991 {
992         int rc;
993         const struct rte_flow_item_nvgre *spec = NULL;
994         const struct rte_flow_item_nvgre *mask = NULL;
995         const struct rte_flow_item_nvgre supp_mask = {
996                 .tni = { 0xff, 0xff, 0xff }
997         };
998
999         rc = sfc_flow_parse_init(item,
1000                                  (const void **)&spec,
1001                                  (const void **)&mask,
1002                                  &supp_mask,
1003                                  &rte_flow_item_nvgre_mask,
1004                                  sizeof(struct rte_flow_item_nvgre),
1005                                  error);
1006         if (rc != 0)
1007                 return rc;
1008
1009         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1010                                                      EFX_IPPROTO_GRE, error);
1011         if (rc != 0)
1012                 return rc;
1013
1014         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1015         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1016
1017         if (spec == NULL)
1018                 return 0;
1019
1020         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1021                                                mask->tni, item, error);
1022
1023         return rc;
1024 }
1025
1026 static const struct sfc_flow_item sfc_flow_items[] = {
1027         {
1028                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1029                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1030                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1031                 .parse = sfc_flow_parse_void,
1032         },
1033         {
1034                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1035                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1036                 .layer = SFC_FLOW_ITEM_L2,
1037                 .parse = sfc_flow_parse_eth,
1038         },
1039         {
1040                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1041                 .prev_layer = SFC_FLOW_ITEM_L2,
1042                 .layer = SFC_FLOW_ITEM_L2,
1043                 .parse = sfc_flow_parse_vlan,
1044         },
1045         {
1046                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1047                 .prev_layer = SFC_FLOW_ITEM_L2,
1048                 .layer = SFC_FLOW_ITEM_L3,
1049                 .parse = sfc_flow_parse_ipv4,
1050         },
1051         {
1052                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1053                 .prev_layer = SFC_FLOW_ITEM_L2,
1054                 .layer = SFC_FLOW_ITEM_L3,
1055                 .parse = sfc_flow_parse_ipv6,
1056         },
1057         {
1058                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1059                 .prev_layer = SFC_FLOW_ITEM_L3,
1060                 .layer = SFC_FLOW_ITEM_L4,
1061                 .parse = sfc_flow_parse_tcp,
1062         },
1063         {
1064                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1065                 .prev_layer = SFC_FLOW_ITEM_L3,
1066                 .layer = SFC_FLOW_ITEM_L4,
1067                 .parse = sfc_flow_parse_udp,
1068         },
1069         {
1070                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1071                 .prev_layer = SFC_FLOW_ITEM_L4,
1072                 .layer = SFC_FLOW_ITEM_START_LAYER,
1073                 .parse = sfc_flow_parse_vxlan,
1074         },
1075         {
1076                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1077                 .prev_layer = SFC_FLOW_ITEM_L4,
1078                 .layer = SFC_FLOW_ITEM_START_LAYER,
1079                 .parse = sfc_flow_parse_geneve,
1080         },
1081         {
1082                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1083                 .prev_layer = SFC_FLOW_ITEM_L3,
1084                 .layer = SFC_FLOW_ITEM_START_LAYER,
1085                 .parse = sfc_flow_parse_nvgre,
1086         },
1087 };
1088
1089 /*
1090  * Protocol-independent flow API support
1091  */
1092 static int
1093 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1094                     struct rte_flow *flow,
1095                     struct rte_flow_error *error)
1096 {
1097         if (attr == NULL) {
1098                 rte_flow_error_set(error, EINVAL,
1099                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1100                                    "NULL attribute");
1101                 return -rte_errno;
1102         }
1103         if (attr->group != 0) {
1104                 rte_flow_error_set(error, ENOTSUP,
1105                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1106                                    "Groups are not supported");
1107                 return -rte_errno;
1108         }
1109         if (attr->priority != 0) {
1110                 rte_flow_error_set(error, ENOTSUP,
1111                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1112                                    "Priorities are not supported");
1113                 return -rte_errno;
1114         }
1115         if (attr->egress != 0) {
1116                 rte_flow_error_set(error, ENOTSUP,
1117                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1118                                    "Egress is not supported");
1119                 return -rte_errno;
1120         }
1121         if (attr->transfer != 0) {
1122                 rte_flow_error_set(error, ENOTSUP,
1123                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1124                                    "Transfer is not supported");
1125                 return -rte_errno;
1126         }
1127         if (attr->ingress == 0) {
1128                 rte_flow_error_set(error, ENOTSUP,
1129                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1130                                    "Only ingress is supported");
1131                 return -rte_errno;
1132         }
1133
1134         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1135         flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1136
1137         return 0;
1138 }
1139
1140 /* Get item from array sfc_flow_items */
1141 static const struct sfc_flow_item *
1142 sfc_flow_get_item(enum rte_flow_item_type type)
1143 {
1144         unsigned int i;
1145
1146         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1147                 if (sfc_flow_items[i].type == type)
1148                         return &sfc_flow_items[i];
1149
1150         return NULL;
1151 }
1152
1153 static int
1154 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1155                        struct rte_flow *flow,
1156                        struct rte_flow_error *error)
1157 {
1158         int rc;
1159         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1160         boolean_t is_ifrm = B_FALSE;
1161         const struct sfc_flow_item *item;
1162
1163         if (pattern == NULL) {
1164                 rte_flow_error_set(error, EINVAL,
1165                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1166                                    "NULL pattern");
1167                 return -rte_errno;
1168         }
1169
1170         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1171                 item = sfc_flow_get_item(pattern->type);
1172                 if (item == NULL) {
1173                         rte_flow_error_set(error, ENOTSUP,
1174                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1175                                            "Unsupported pattern item");
1176                         return -rte_errno;
1177                 }
1178
1179                 /*
1180                  * Omitting one or several protocol layers at the beginning
1181                  * of pattern is supported
1182                  */
1183                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1184                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1185                     item->prev_layer != prev_layer) {
1186                         rte_flow_error_set(error, ENOTSUP,
1187                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1188                                            "Unexpected sequence of pattern items");
1189                         return -rte_errno;
1190                 }
1191
1192                 /*
1193                  * Allow only VOID and ETH pattern items in the inner frame.
1194                  * Also check that there is only one tunneling protocol.
1195                  */
1196                 switch (item->type) {
1197                 case RTE_FLOW_ITEM_TYPE_VOID:
1198                 case RTE_FLOW_ITEM_TYPE_ETH:
1199                         break;
1200
1201                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1202                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1203                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1204                         if (is_ifrm) {
1205                                 rte_flow_error_set(error, EINVAL,
1206                                         RTE_FLOW_ERROR_TYPE_ITEM,
1207                                         pattern,
1208                                         "More than one tunneling protocol");
1209                                 return -rte_errno;
1210                         }
1211                         is_ifrm = B_TRUE;
1212                         break;
1213
1214                 default:
1215                         if (is_ifrm) {
1216                                 rte_flow_error_set(error, EINVAL,
1217                                         RTE_FLOW_ERROR_TYPE_ITEM,
1218                                         pattern,
1219                                         "There is an unsupported pattern item "
1220                                         "in the inner frame");
1221                                 return -rte_errno;
1222                         }
1223                         break;
1224                 }
1225
1226                 rc = item->parse(pattern, &flow->spec.template, error);
1227                 if (rc != 0)
1228                         return rc;
1229
1230                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1231                         prev_layer = item->layer;
1232         }
1233
1234         return 0;
1235 }
1236
1237 static int
1238 sfc_flow_parse_queue(struct sfc_adapter *sa,
1239                      const struct rte_flow_action_queue *queue,
1240                      struct rte_flow *flow)
1241 {
1242         struct sfc_rxq *rxq;
1243
1244         if (queue->index >= sa->rxq_count)
1245                 return -EINVAL;
1246
1247         rxq = sa->rxq_info[queue->index].rxq;
1248         flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1249
1250         return 0;
1251 }
1252
1253 static int
1254 sfc_flow_parse_rss(struct sfc_adapter *sa,
1255                    const struct rte_flow_action_rss *action_rss,
1256                    struct rte_flow *flow)
1257 {
1258         struct sfc_rss *rss = &sa->rss;
1259         unsigned int rxq_sw_index;
1260         struct sfc_rxq *rxq;
1261         unsigned int rxq_hw_index_min;
1262         unsigned int rxq_hw_index_max;
1263         efx_rx_hash_type_t efx_hash_types;
1264         const uint8_t *rss_key;
1265         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1266         unsigned int i;
1267
1268         if (action_rss->queue_num == 0)
1269                 return -EINVAL;
1270
1271         rxq_sw_index = sa->rxq_count - 1;
1272         rxq = sa->rxq_info[rxq_sw_index].rxq;
1273         rxq_hw_index_min = rxq->hw_index;
1274         rxq_hw_index_max = 0;
1275
1276         for (i = 0; i < action_rss->queue_num; ++i) {
1277                 rxq_sw_index = action_rss->queue[i];
1278
1279                 if (rxq_sw_index >= sa->rxq_count)
1280                         return -EINVAL;
1281
1282                 rxq = sa->rxq_info[rxq_sw_index].rxq;
1283
1284                 if (rxq->hw_index < rxq_hw_index_min)
1285                         rxq_hw_index_min = rxq->hw_index;
1286
1287                 if (rxq->hw_index > rxq_hw_index_max)
1288                         rxq_hw_index_max = rxq->hw_index;
1289         }
1290
1291         switch (action_rss->func) {
1292         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1293         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1294                 break;
1295         default:
1296                 return -EINVAL;
1297         }
1298
1299         if (action_rss->level)
1300                 return -EINVAL;
1301
1302         /*
1303          * Dummy RSS action with only one queue and no specific settings
1304          * for hash types and key does not require dedicated RSS context
1305          * and may be simplified to single queue action.
1306          */
1307         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1308             action_rss->key_len == 0) {
1309                 flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
1310                 return 0;
1311         }
1312
1313         if (action_rss->types) {
1314                 int rc;
1315
1316                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1317                                           &efx_hash_types);
1318                 if (rc != 0)
1319                         return -rc;
1320         } else {
1321                 unsigned int i;
1322
1323                 efx_hash_types = 0;
1324                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1325                         efx_hash_types |= rss->hf_map[i].efx;
1326         }
1327
1328         if (action_rss->key_len) {
1329                 if (action_rss->key_len != sizeof(rss->key))
1330                         return -EINVAL;
1331
1332                 rss_key = action_rss->key;
1333         } else {
1334                 rss_key = rss->key;
1335         }
1336
1337         flow->rss = B_TRUE;
1338
1339         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1340         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1341         sfc_rss_conf->rss_hash_types = efx_hash_types;
1342         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1343
1344         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1345                 unsigned int nb_queues = action_rss->queue_num;
1346                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1347                 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1348
1349                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1350         }
1351
1352         return 0;
1353 }
1354
1355 static int
1356 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1357                     unsigned int filters_count)
1358 {
1359         unsigned int i;
1360         int ret = 0;
1361
1362         for (i = 0; i < filters_count; i++) {
1363                 int rc;
1364
1365                 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1366                 if (ret == 0 && rc != 0) {
1367                         sfc_err(sa, "failed to remove filter specification "
1368                                 "(rc = %d)", rc);
1369                         ret = rc;
1370                 }
1371         }
1372
1373         return ret;
1374 }
1375
1376 static int
1377 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1378 {
1379         unsigned int i;
1380         int rc = 0;
1381
1382         for (i = 0; i < spec->count; i++) {
1383                 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1384                 if (rc != 0) {
1385                         sfc_flow_spec_flush(sa, spec, i);
1386                         break;
1387                 }
1388         }
1389
1390         return rc;
1391 }
1392
1393 static int
1394 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1395 {
1396         return sfc_flow_spec_flush(sa, spec, spec->count);
1397 }
1398
1399 static int
1400 sfc_flow_filter_insert(struct sfc_adapter *sa,
1401                        struct rte_flow *flow)
1402 {
1403         struct sfc_rss *rss = &sa->rss;
1404         struct sfc_flow_rss *flow_rss = &flow->rss_conf;
1405         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1406         unsigned int i;
1407         int rc = 0;
1408
1409         if (flow->rss) {
1410                 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1411                                               flow_rss->rxq_hw_index_min + 1,
1412                                               EFX_MAXRSS);
1413
1414                 rc = efx_rx_scale_context_alloc(sa->nic,
1415                                                 EFX_RX_SCALE_EXCLUSIVE,
1416                                                 rss_spread,
1417                                                 &efs_rss_context);
1418                 if (rc != 0)
1419                         goto fail_scale_context_alloc;
1420
1421                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1422                                            rss->hash_alg,
1423                                            flow_rss->rss_hash_types, B_TRUE);
1424                 if (rc != 0)
1425                         goto fail_scale_mode_set;
1426
1427                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1428                                           flow_rss->rss_key,
1429                                           sizeof(rss->key));
1430                 if (rc != 0)
1431                         goto fail_scale_key_set;
1432
1433                 /*
1434                  * At this point, fully elaborated filter specifications
1435                  * have been produced from the template. To make sure that
1436                  * RSS behaviour is consistent between them, set the same
1437                  * RSS context value everywhere.
1438                  */
1439                 for (i = 0; i < flow->spec.count; i++) {
1440                         efx_filter_spec_t *spec = &flow->spec.filters[i];
1441
1442                         spec->efs_rss_context = efs_rss_context;
1443                         spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1444                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1445                 }
1446         }
1447
1448         rc = sfc_flow_spec_insert(sa, &flow->spec);
1449         if (rc != 0)
1450                 goto fail_filter_insert;
1451
1452         if (flow->rss) {
1453                 /*
1454                  * Scale table is set after filter insertion because
1455                  * the table entries are relative to the base RxQ ID
1456                  * and the latter is submitted to the HW by means of
1457                  * inserting a filter, so by the time of the request
1458                  * the HW knows all the information needed to verify
1459                  * the table entries, and the operation will succeed
1460                  */
1461                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1462                                           flow_rss->rss_tbl,
1463                                           RTE_DIM(flow_rss->rss_tbl));
1464                 if (rc != 0)
1465                         goto fail_scale_tbl_set;
1466         }
1467
1468         return 0;
1469
1470 fail_scale_tbl_set:
1471         sfc_flow_spec_remove(sa, &flow->spec);
1472
1473 fail_filter_insert:
1474 fail_scale_key_set:
1475 fail_scale_mode_set:
1476         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1477                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1478
1479 fail_scale_context_alloc:
1480         return rc;
1481 }
1482
1483 static int
1484 sfc_flow_filter_remove(struct sfc_adapter *sa,
1485                        struct rte_flow *flow)
1486 {
1487         int rc = 0;
1488
1489         rc = sfc_flow_spec_remove(sa, &flow->spec);
1490         if (rc != 0)
1491                 return rc;
1492
1493         if (flow->rss) {
1494                 /*
1495                  * All specifications for a given flow rule have the same RSS
1496                  * context, so that RSS context value is taken from the first
1497                  * filter specification
1498                  */
1499                 efx_filter_spec_t *spec = &flow->spec.filters[0];
1500
1501                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1502         }
1503
1504         return rc;
1505 }
1506
1507 static int
1508 sfc_flow_parse_mark(struct sfc_adapter *sa,
1509                     const struct rte_flow_action_mark *mark,
1510                     struct rte_flow *flow)
1511 {
1512         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1513
1514         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1515                 return EINVAL;
1516
1517         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1518         flow->spec.template.efs_mark = mark->id;
1519
1520         return 0;
1521 }
1522
1523 static int
1524 sfc_flow_parse_actions(struct sfc_adapter *sa,
1525                        const struct rte_flow_action actions[],
1526                        struct rte_flow *flow,
1527                        struct rte_flow_error *error)
1528 {
1529         int rc;
1530         const unsigned int dp_rx_features = sa->dp_rx->features;
1531         uint32_t actions_set = 0;
1532         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1533                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1534                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1535         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1536                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1537
1538         if (actions == NULL) {
1539                 rte_flow_error_set(error, EINVAL,
1540                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1541                                    "NULL actions");
1542                 return -rte_errno;
1543         }
1544
1545 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1546         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1547
1548         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1549                 switch (actions->type) {
1550                 case RTE_FLOW_ACTION_TYPE_VOID:
1551                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1552                                                actions_set);
1553                         break;
1554
1555                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1556                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1557                                                actions_set);
1558                         if ((actions_set & fate_actions_mask) != 0)
1559                                 goto fail_fate_actions;
1560
1561                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1562                         if (rc != 0) {
1563                                 rte_flow_error_set(error, EINVAL,
1564                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1565                                         "Bad QUEUE action");
1566                                 return -rte_errno;
1567                         }
1568                         break;
1569
1570                 case RTE_FLOW_ACTION_TYPE_RSS:
1571                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1572                                                actions_set);
1573                         if ((actions_set & fate_actions_mask) != 0)
1574                                 goto fail_fate_actions;
1575
1576                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1577                         if (rc != 0) {
1578                                 rte_flow_error_set(error, -rc,
1579                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1580                                         "Bad RSS action");
1581                                 return -rte_errno;
1582                         }
1583                         break;
1584
1585                 case RTE_FLOW_ACTION_TYPE_DROP:
1586                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1587                                                actions_set);
1588                         if ((actions_set & fate_actions_mask) != 0)
1589                                 goto fail_fate_actions;
1590
1591                         flow->spec.template.efs_dmaq_id =
1592                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1593                         break;
1594
1595                 case RTE_FLOW_ACTION_TYPE_FLAG:
1596                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1597                                                actions_set);
1598                         if ((actions_set & mark_actions_mask) != 0)
1599                                 goto fail_actions_overlap;
1600
1601                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1602                                 rte_flow_error_set(error, ENOTSUP,
1603                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1604                                         "FLAG action is not supported on the current Rx datapath");
1605                                 return -rte_errno;
1606                         }
1607
1608                         flow->spec.template.efs_flags |=
1609                                 EFX_FILTER_FLAG_ACTION_FLAG;
1610                         break;
1611
1612                 case RTE_FLOW_ACTION_TYPE_MARK:
1613                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1614                                                actions_set);
1615                         if ((actions_set & mark_actions_mask) != 0)
1616                                 goto fail_actions_overlap;
1617
1618                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1619                                 rte_flow_error_set(error, ENOTSUP,
1620                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1621                                         "MARK action is not supported on the current Rx datapath");
1622                                 return -rte_errno;
1623                         }
1624
1625                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1626                         if (rc != 0) {
1627                                 rte_flow_error_set(error, rc,
1628                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1629                                         "Bad MARK action");
1630                                 return -rte_errno;
1631                         }
1632                         break;
1633
1634                 default:
1635                         rte_flow_error_set(error, ENOTSUP,
1636                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1637                                            "Action is not supported");
1638                         return -rte_errno;
1639                 }
1640
1641                 actions_set |= (1UL << actions->type);
1642         }
1643 #undef SFC_BUILD_SET_OVERFLOW
1644
1645         /* When fate is unknown, drop traffic. */
1646         if ((actions_set & fate_actions_mask) == 0) {
1647                 flow->spec.template.efs_dmaq_id =
1648                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1649         }
1650
1651         return 0;
1652
1653 fail_fate_actions:
1654         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1655                            "Cannot combine several fate-deciding actions, "
1656                            "choose between QUEUE, RSS or DROP");
1657         return -rte_errno;
1658
1659 fail_actions_overlap:
1660         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1661                            "Overlapping actions are not supported");
1662         return -rte_errno;
1663 }
1664
1665 /**
1666  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1667  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1668  * specifications after copying.
1669  *
1670  * @param spec[in, out]
1671  *   SFC flow specification to update.
1672  * @param filters_count_for_one_val[in]
1673  *   How many specifications should have the same match flag, what is the
1674  *   number of specifications before copying.
1675  * @param error[out]
1676  *   Perform verbose error reporting if not NULL.
1677  */
1678 static int
1679 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1680                                unsigned int filters_count_for_one_val,
1681                                struct rte_flow_error *error)
1682 {
1683         unsigned int i;
1684         static const efx_filter_match_flags_t vals[] = {
1685                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1686                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1687         };
1688
1689         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1690                 rte_flow_error_set(error, EINVAL,
1691                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1692                         "Number of specifications is incorrect while copying "
1693                         "by unknown destination flags");
1694                 return -rte_errno;
1695         }
1696
1697         for (i = 0; i < spec->count; i++) {
1698                 /* The check above ensures that divisor can't be zero here */
1699                 spec->filters[i].efs_match_flags |=
1700                         vals[i / filters_count_for_one_val];
1701         }
1702
1703         return 0;
1704 }
1705
1706 /**
1707  * Check that the following conditions are met:
1708  * - the list of supported filters has a filter
1709  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1710  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1711  *   be inserted.
1712  *
1713  * @param match[in]
1714  *   The match flags of filter.
1715  * @param spec[in]
1716  *   Specification to be supplemented.
1717  * @param filter[in]
1718  *   SFC filter with list of supported filters.
1719  */
1720 static boolean_t
1721 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1722                                  __rte_unused efx_filter_spec_t *spec,
1723                                  struct sfc_filter *filter)
1724 {
1725         unsigned int i;
1726         efx_filter_match_flags_t match_mcast_dst;
1727
1728         match_mcast_dst =
1729                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1730                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1731         for (i = 0; i < filter->supported_match_num; i++) {
1732                 if (match_mcast_dst == filter->supported_match[i])
1733                         return B_TRUE;
1734         }
1735
1736         return B_FALSE;
1737 }
1738
1739 /**
1740  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1741  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1742  * specifications after copying.
1743  *
1744  * @param spec[in, out]
1745  *   SFC flow specification to update.
1746  * @param filters_count_for_one_val[in]
1747  *   How many specifications should have the same EtherType value, what is the
1748  *   number of specifications before copying.
1749  * @param error[out]
1750  *   Perform verbose error reporting if not NULL.
1751  */
1752 static int
1753 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1754                         unsigned int filters_count_for_one_val,
1755                         struct rte_flow_error *error)
1756 {
1757         unsigned int i;
1758         static const uint16_t vals[] = {
1759                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1760         };
1761
1762         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1763                 rte_flow_error_set(error, EINVAL,
1764                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1765                         "Number of specifications is incorrect "
1766                         "while copying by Ethertype");
1767                 return -rte_errno;
1768         }
1769
1770         for (i = 0; i < spec->count; i++) {
1771                 spec->filters[i].efs_match_flags |=
1772                         EFX_FILTER_MATCH_ETHER_TYPE;
1773
1774                 /*
1775                  * The check above ensures that
1776                  * filters_count_for_one_val is not 0
1777                  */
1778                 spec->filters[i].efs_ether_type =
1779                         vals[i / filters_count_for_one_val];
1780         }
1781
1782         return 0;
1783 }
1784
1785 /**
1786  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1787  * in the same specifications after copying.
1788  *
1789  * @param spec[in, out]
1790  *   SFC flow specification to update.
1791  * @param filters_count_for_one_val[in]
1792  *   How many specifications should have the same match flag, what is the
1793  *   number of specifications before copying.
1794  * @param error[out]
1795  *   Perform verbose error reporting if not NULL.
1796  */
1797 static int
1798 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1799                             unsigned int filters_count_for_one_val,
1800                             struct rte_flow_error *error)
1801 {
1802         unsigned int i;
1803
1804         if (filters_count_for_one_val != spec->count) {
1805                 rte_flow_error_set(error, EINVAL,
1806                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1807                         "Number of specifications is incorrect "
1808                         "while copying by outer VLAN ID");
1809                 return -rte_errno;
1810         }
1811
1812         for (i = 0; i < spec->count; i++) {
1813                 spec->filters[i].efs_match_flags |=
1814                         EFX_FILTER_MATCH_OUTER_VID;
1815
1816                 spec->filters[i].efs_outer_vid = 0;
1817         }
1818
1819         return 0;
1820 }
1821
1822 /**
1823  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1824  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1825  * specifications after copying.
1826  *
1827  * @param spec[in, out]
1828  *   SFC flow specification to update.
1829  * @param filters_count_for_one_val[in]
1830  *   How many specifications should have the same match flag, what is the
1831  *   number of specifications before copying.
1832  * @param error[out]
1833  *   Perform verbose error reporting if not NULL.
1834  */
1835 static int
1836 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1837                                     unsigned int filters_count_for_one_val,
1838                                     struct rte_flow_error *error)
1839 {
1840         unsigned int i;
1841         static const efx_filter_match_flags_t vals[] = {
1842                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1843                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1844         };
1845
1846         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1847                 rte_flow_error_set(error, EINVAL,
1848                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1849                         "Number of specifications is incorrect while copying "
1850                         "by inner frame unknown destination flags");
1851                 return -rte_errno;
1852         }
1853
1854         for (i = 0; i < spec->count; i++) {
1855                 /* The check above ensures that divisor can't be zero here */
1856                 spec->filters[i].efs_match_flags |=
1857                         vals[i / filters_count_for_one_val];
1858         }
1859
1860         return 0;
1861 }
1862
1863 /**
1864  * Check that the following conditions are met:
1865  * - the specification corresponds to a filter for encapsulated traffic
1866  * - the list of supported filters has a filter
1867  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1868  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1869  *   be inserted.
1870  *
1871  * @param match[in]
1872  *   The match flags of filter.
1873  * @param spec[in]
1874  *   Specification to be supplemented.
1875  * @param filter[in]
1876  *   SFC filter with list of supported filters.
1877  */
1878 static boolean_t
1879 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1880                                       efx_filter_spec_t *spec,
1881                                       struct sfc_filter *filter)
1882 {
1883         unsigned int i;
1884         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1885         efx_filter_match_flags_t match_mcast_dst;
1886
1887         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1888                 return B_FALSE;
1889
1890         match_mcast_dst =
1891                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1892                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1893         for (i = 0; i < filter->supported_match_num; i++) {
1894                 if (match_mcast_dst == filter->supported_match[i])
1895                         return B_TRUE;
1896         }
1897
1898         return B_FALSE;
1899 }
1900
1901 /**
1902  * Check that the list of supported filters has a filter that differs
1903  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1904  * in this case that filter will be used and the flag
1905  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1906  *
1907  * @param match[in]
1908  *   The match flags of filter.
1909  * @param spec[in]
1910  *   Specification to be supplemented.
1911  * @param filter[in]
1912  *   SFC filter with list of supported filters.
1913  */
1914 static boolean_t
1915 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1916                               __rte_unused efx_filter_spec_t *spec,
1917                               struct sfc_filter *filter)
1918 {
1919         unsigned int i;
1920         efx_filter_match_flags_t match_without_vid =
1921                 match & ~EFX_FILTER_MATCH_OUTER_VID;
1922
1923         for (i = 0; i < filter->supported_match_num; i++) {
1924                 if (match_without_vid == filter->supported_match[i])
1925                         return B_FALSE;
1926         }
1927
1928         return B_TRUE;
1929 }
1930
1931 /*
1932  * Match flags that can be automatically added to filters.
1933  * Selecting the last minimum when searching for the copy flag ensures that the
1934  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1935  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1936  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1937  * filters.
1938  */
1939 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1940         {
1941                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1942                 .vals_count = 2,
1943                 .set_vals = sfc_flow_set_unknown_dst_flags,
1944                 .spec_check = sfc_flow_check_unknown_dst_flags,
1945         },
1946         {
1947                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1948                 .vals_count = 2,
1949                 .set_vals = sfc_flow_set_ethertypes,
1950                 .spec_check = NULL,
1951         },
1952         {
1953                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1954                 .vals_count = 2,
1955                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1956                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1957         },
1958         {
1959                 .flag = EFX_FILTER_MATCH_OUTER_VID,
1960                 .vals_count = 1,
1961                 .set_vals = sfc_flow_set_outer_vid_flag,
1962                 .spec_check = sfc_flow_check_outer_vid_flag,
1963         },
1964 };
1965
1966 /* Get item from array sfc_flow_copy_flags */
1967 static const struct sfc_flow_copy_flag *
1968 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1969 {
1970         unsigned int i;
1971
1972         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1973                 if (sfc_flow_copy_flags[i].flag == flag)
1974                         return &sfc_flow_copy_flags[i];
1975         }
1976
1977         return NULL;
1978 }
1979
1980 /**
1981  * Make copies of the specifications, set match flag and values
1982  * of the field that corresponds to it.
1983  *
1984  * @param spec[in, out]
1985  *   SFC flow specification to update.
1986  * @param flag[in]
1987  *   The match flag to add.
1988  * @param error[out]
1989  *   Perform verbose error reporting if not NULL.
1990  */
1991 static int
1992 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1993                              efx_filter_match_flags_t flag,
1994                              struct rte_flow_error *error)
1995 {
1996         unsigned int i;
1997         unsigned int new_filters_count;
1998         unsigned int filters_count_for_one_val;
1999         const struct sfc_flow_copy_flag *copy_flag;
2000         int rc;
2001
2002         copy_flag = sfc_flow_get_copy_flag(flag);
2003         if (copy_flag == NULL) {
2004                 rte_flow_error_set(error, ENOTSUP,
2005                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2006                                    "Unsupported spec field for copying");
2007                 return -rte_errno;
2008         }
2009
2010         new_filters_count = spec->count * copy_flag->vals_count;
2011         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2012                 rte_flow_error_set(error, EINVAL,
2013                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2014                         "Too much EFX specifications in the flow rule");
2015                 return -rte_errno;
2016         }
2017
2018         /* Copy filters specifications */
2019         for (i = spec->count; i < new_filters_count; i++)
2020                 spec->filters[i] = spec->filters[i - spec->count];
2021
2022         filters_count_for_one_val = spec->count;
2023         spec->count = new_filters_count;
2024
2025         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2026         if (rc != 0)
2027                 return rc;
2028
2029         return 0;
2030 }
2031
2032 /**
2033  * Check that the given set of match flags missing in the original filter spec
2034  * could be covered by adding spec copies which specify the corresponding
2035  * flags and packet field values to match.
2036  *
2037  * @param miss_flags[in]
2038  *   Flags that are missing until the supported filter.
2039  * @param spec[in]
2040  *   Specification to be supplemented.
2041  * @param filter[in]
2042  *   SFC filter.
2043  *
2044  * @return
2045  *   Number of specifications after copy or 0, if the flags can not be added.
2046  */
2047 static unsigned int
2048 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2049                              efx_filter_spec_t *spec,
2050                              struct sfc_filter *filter)
2051 {
2052         unsigned int i;
2053         efx_filter_match_flags_t copy_flags = 0;
2054         efx_filter_match_flags_t flag;
2055         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2056         sfc_flow_spec_check *check;
2057         unsigned int multiplier = 1;
2058
2059         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2060                 flag = sfc_flow_copy_flags[i].flag;
2061                 check = sfc_flow_copy_flags[i].spec_check;
2062                 if ((flag & miss_flags) == flag) {
2063                         if (check != NULL && (!check(match, spec, filter)))
2064                                 continue;
2065
2066                         copy_flags |= flag;
2067                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2068                 }
2069         }
2070
2071         if (copy_flags == miss_flags)
2072                 return multiplier;
2073
2074         return 0;
2075 }
2076
2077 /**
2078  * Attempt to supplement the specification template to the minimally
2079  * supported set of match flags. To do this, it is necessary to copy
2080  * the specifications, filling them with the values of fields that
2081  * correspond to the missing flags.
2082  * The necessary and sufficient filter is built from the fewest number
2083  * of copies which could be made to cover the minimally required set
2084  * of flags.
2085  *
2086  * @param sa[in]
2087  *   SFC adapter.
2088  * @param spec[in, out]
2089  *   SFC flow specification to update.
2090  * @param error[out]
2091  *   Perform verbose error reporting if not NULL.
2092  */
2093 static int
2094 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2095                                struct sfc_flow_spec *spec,
2096                                struct rte_flow_error *error)
2097 {
2098         struct sfc_filter *filter = &sa->filter;
2099         efx_filter_match_flags_t miss_flags;
2100         efx_filter_match_flags_t min_miss_flags = 0;
2101         efx_filter_match_flags_t match;
2102         unsigned int min_multiplier = UINT_MAX;
2103         unsigned int multiplier;
2104         unsigned int i;
2105         int rc;
2106
2107         match = spec->template.efs_match_flags;
2108         for (i = 0; i < filter->supported_match_num; i++) {
2109                 if ((match & filter->supported_match[i]) == match) {
2110                         miss_flags = filter->supported_match[i] & (~match);
2111                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2112                                 &spec->template, filter);
2113                         if (multiplier > 0) {
2114                                 if (multiplier <= min_multiplier) {
2115                                         min_multiplier = multiplier;
2116                                         min_miss_flags = miss_flags;
2117                                 }
2118                         }
2119                 }
2120         }
2121
2122         if (min_multiplier == UINT_MAX) {
2123                 rte_flow_error_set(error, ENOTSUP,
2124                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2125                                    "The flow rule pattern is unsupported");
2126                 return -rte_errno;
2127         }
2128
2129         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2130                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2131
2132                 if ((flag & min_miss_flags) == flag) {
2133                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2134                         if (rc != 0)
2135                                 return rc;
2136                 }
2137         }
2138
2139         return 0;
2140 }
2141
2142 /**
2143  * Check that set of match flags is referred to by a filter. Filter is
2144  * described by match flags with the ability to add OUTER_VID and INNER_VID
2145  * flags.
2146  *
2147  * @param match_flags[in]
2148  *   Set of match flags.
2149  * @param flags_pattern[in]
2150  *   Pattern of filter match flags.
2151  */
2152 static boolean_t
2153 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2154                             efx_filter_match_flags_t flags_pattern)
2155 {
2156         if ((match_flags & flags_pattern) != flags_pattern)
2157                 return B_FALSE;
2158
2159         switch (match_flags & ~flags_pattern) {
2160         case 0:
2161         case EFX_FILTER_MATCH_OUTER_VID:
2162         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2163                 return B_TRUE;
2164         default:
2165                 return B_FALSE;
2166         }
2167 }
2168
2169 /**
2170  * Check whether the spec maps to a hardware filter which is known to be
2171  * ineffective despite being valid.
2172  *
2173  * @param filter[in]
2174  *   SFC filter with list of supported filters.
2175  * @param spec[in]
2176  *   SFC flow specification.
2177  */
2178 static boolean_t
2179 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2180                                   struct sfc_flow_spec *spec)
2181 {
2182         unsigned int i;
2183         uint16_t ether_type;
2184         uint8_t ip_proto;
2185         efx_filter_match_flags_t match_flags;
2186
2187         for (i = 0; i < spec->count; i++) {
2188                 match_flags = spec->filters[i].efs_match_flags;
2189
2190                 if (sfc_flow_is_match_with_vids(match_flags,
2191                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2192                     sfc_flow_is_match_with_vids(match_flags,
2193                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2194                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2195                         ether_type = spec->filters[i].efs_ether_type;
2196                         if (filter->supports_ip_proto_or_addr_filter &&
2197                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2198                              ether_type == EFX_ETHER_TYPE_IPV6))
2199                                 return B_TRUE;
2200                 } else if (sfc_flow_is_match_with_vids(match_flags,
2201                                 EFX_FILTER_MATCH_ETHER_TYPE |
2202                                 EFX_FILTER_MATCH_IP_PROTO) ||
2203                            sfc_flow_is_match_with_vids(match_flags,
2204                                 EFX_FILTER_MATCH_ETHER_TYPE |
2205                                 EFX_FILTER_MATCH_IP_PROTO |
2206                                 EFX_FILTER_MATCH_LOC_MAC)) {
2207                         ip_proto = spec->filters[i].efs_ip_proto;
2208                         if (filter->supports_rem_or_local_port_filter &&
2209                             (ip_proto == EFX_IPPROTO_TCP ||
2210                              ip_proto == EFX_IPPROTO_UDP))
2211                                 return B_TRUE;
2212                 }
2213         }
2214
2215         return B_FALSE;
2216 }
2217
2218 static int
2219 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2220                               struct rte_flow *flow,
2221                               struct rte_flow_error *error)
2222 {
2223         efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2224         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2225         int rc;
2226
2227         /* Initialize the first filter spec with template */
2228         flow->spec.filters[0] = *spec_tmpl;
2229         flow->spec.count = 1;
2230
2231         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2232                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2233                 if (rc != 0)
2234                         return rc;
2235         }
2236
2237         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2238                 rte_flow_error_set(error, ENOTSUP,
2239                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2240                         "The flow rule pattern is unsupported");
2241                 return -rte_errno;
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int
2248 sfc_flow_parse(struct rte_eth_dev *dev,
2249                const struct rte_flow_attr *attr,
2250                const struct rte_flow_item pattern[],
2251                const struct rte_flow_action actions[],
2252                struct rte_flow *flow,
2253                struct rte_flow_error *error)
2254 {
2255         struct sfc_adapter *sa = dev->data->dev_private;
2256         int rc;
2257
2258         rc = sfc_flow_parse_attr(attr, flow, error);
2259         if (rc != 0)
2260                 goto fail_bad_value;
2261
2262         rc = sfc_flow_parse_pattern(pattern, flow, error);
2263         if (rc != 0)
2264                 goto fail_bad_value;
2265
2266         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2267         if (rc != 0)
2268                 goto fail_bad_value;
2269
2270         rc = sfc_flow_validate_match_flags(sa, flow, error);
2271         if (rc != 0)
2272                 goto fail_bad_value;
2273
2274         return 0;
2275
2276 fail_bad_value:
2277         return rc;
2278 }
2279
2280 static int
2281 sfc_flow_validate(struct rte_eth_dev *dev,
2282                   const struct rte_flow_attr *attr,
2283                   const struct rte_flow_item pattern[],
2284                   const struct rte_flow_action actions[],
2285                   struct rte_flow_error *error)
2286 {
2287         struct rte_flow flow;
2288
2289         memset(&flow, 0, sizeof(flow));
2290
2291         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2292 }
2293
2294 static struct rte_flow *
2295 sfc_flow_create(struct rte_eth_dev *dev,
2296                 const struct rte_flow_attr *attr,
2297                 const struct rte_flow_item pattern[],
2298                 const struct rte_flow_action actions[],
2299                 struct rte_flow_error *error)
2300 {
2301         struct sfc_adapter *sa = dev->data->dev_private;
2302         struct rte_flow *flow = NULL;
2303         int rc;
2304
2305         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2306         if (flow == NULL) {
2307                 rte_flow_error_set(error, ENOMEM,
2308                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2309                                    "Failed to allocate memory");
2310                 goto fail_no_mem;
2311         }
2312
2313         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2314         if (rc != 0)
2315                 goto fail_bad_value;
2316
2317         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2318
2319         sfc_adapter_lock(sa);
2320
2321         if (sa->state == SFC_ADAPTER_STARTED) {
2322                 rc = sfc_flow_filter_insert(sa, flow);
2323                 if (rc != 0) {
2324                         rte_flow_error_set(error, rc,
2325                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2326                                 "Failed to insert filter");
2327                         goto fail_filter_insert;
2328                 }
2329         }
2330
2331         sfc_adapter_unlock(sa);
2332
2333         return flow;
2334
2335 fail_filter_insert:
2336         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2337
2338 fail_bad_value:
2339         rte_free(flow);
2340         sfc_adapter_unlock(sa);
2341
2342 fail_no_mem:
2343         return NULL;
2344 }
2345
2346 static int
2347 sfc_flow_remove(struct sfc_adapter *sa,
2348                 struct rte_flow *flow,
2349                 struct rte_flow_error *error)
2350 {
2351         int rc = 0;
2352
2353         SFC_ASSERT(sfc_adapter_is_locked(sa));
2354
2355         if (sa->state == SFC_ADAPTER_STARTED) {
2356                 rc = sfc_flow_filter_remove(sa, flow);
2357                 if (rc != 0)
2358                         rte_flow_error_set(error, rc,
2359                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2360                                 "Failed to destroy flow rule");
2361         }
2362
2363         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2364         rte_free(flow);
2365
2366         return rc;
2367 }
2368
2369 static int
2370 sfc_flow_destroy(struct rte_eth_dev *dev,
2371                  struct rte_flow *flow,
2372                  struct rte_flow_error *error)
2373 {
2374         struct sfc_adapter *sa = dev->data->dev_private;
2375         struct rte_flow *flow_ptr;
2376         int rc = EINVAL;
2377
2378         sfc_adapter_lock(sa);
2379
2380         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2381                 if (flow_ptr == flow)
2382                         rc = 0;
2383         }
2384         if (rc != 0) {
2385                 rte_flow_error_set(error, rc,
2386                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2387                                    "Failed to find flow rule to destroy");
2388                 goto fail_bad_value;
2389         }
2390
2391         rc = sfc_flow_remove(sa, flow, error);
2392
2393 fail_bad_value:
2394         sfc_adapter_unlock(sa);
2395
2396         return -rc;
2397 }
2398
2399 static int
2400 sfc_flow_flush(struct rte_eth_dev *dev,
2401                struct rte_flow_error *error)
2402 {
2403         struct sfc_adapter *sa = dev->data->dev_private;
2404         struct rte_flow *flow;
2405         int rc = 0;
2406         int ret = 0;
2407
2408         sfc_adapter_lock(sa);
2409
2410         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2411                 rc = sfc_flow_remove(sa, flow, error);
2412                 if (rc != 0)
2413                         ret = rc;
2414         }
2415
2416         sfc_adapter_unlock(sa);
2417
2418         return -ret;
2419 }
2420
2421 static int
2422 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2423                  struct rte_flow_error *error)
2424 {
2425         struct sfc_adapter *sa = dev->data->dev_private;
2426         struct sfc_port *port = &sa->port;
2427         int ret = 0;
2428
2429         sfc_adapter_lock(sa);
2430         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2431                 rte_flow_error_set(error, EBUSY,
2432                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2433                                    NULL, "please close the port first");
2434                 ret = -rte_errno;
2435         } else {
2436                 port->isolated = (enable) ? B_TRUE : B_FALSE;
2437         }
2438         sfc_adapter_unlock(sa);
2439
2440         return ret;
2441 }
2442
2443 const struct rte_flow_ops sfc_flow_ops = {
2444         .validate = sfc_flow_validate,
2445         .create = sfc_flow_create,
2446         .destroy = sfc_flow_destroy,
2447         .flush = sfc_flow_flush,
2448         .query = NULL,
2449         .isolate = sfc_flow_isolate,
2450 };
2451
2452 void
2453 sfc_flow_init(struct sfc_adapter *sa)
2454 {
2455         SFC_ASSERT(sfc_adapter_is_locked(sa));
2456
2457         TAILQ_INIT(&sa->filter.flow_list);
2458 }
2459
2460 void
2461 sfc_flow_fini(struct sfc_adapter *sa)
2462 {
2463         struct rte_flow *flow;
2464
2465         SFC_ASSERT(sfc_adapter_is_locked(sa));
2466
2467         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2468                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2469                 rte_free(flow);
2470         }
2471 }
2472
2473 void
2474 sfc_flow_stop(struct sfc_adapter *sa)
2475 {
2476         struct rte_flow *flow;
2477
2478         SFC_ASSERT(sfc_adapter_is_locked(sa));
2479
2480         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2481                 sfc_flow_filter_remove(sa, flow);
2482 }
2483
2484 int
2485 sfc_flow_start(struct sfc_adapter *sa)
2486 {
2487         struct rte_flow *flow;
2488         int rc = 0;
2489
2490         sfc_log_init(sa, "entry");
2491
2492         SFC_ASSERT(sfc_adapter_is_locked(sa));
2493
2494         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2495                 rc = sfc_flow_filter_insert(sa, flow);
2496                 if (rc != 0)
2497                         goto fail_bad_flow;
2498         }
2499
2500         sfc_log_init(sa, "done");
2501
2502 fail_bad_flow:
2503         return rc;
2504 }