net/sfc: separate adapter primary process and shared data
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_eth_ctrl.h>
15 #include <rte_ether.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18
19 #include "efx.h"
20
21 #include "sfc.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 /*
29  * At now flow API is implemented in such a manner that each
30  * flow rule is converted to one or more hardware filters.
31  * All elements of flow rule (attributes, pattern items, actions)
32  * correspond to one or more fields in the efx_filter_spec_s structure
33  * that is responsible for the hardware filter.
34  * If some required field is unset in the flow rule, then a handful
35  * of filter copies will be created to cover all possible values
36  * of such a field.
37  */
38
39 enum sfc_flow_item_layers {
40         SFC_FLOW_ITEM_ANY_LAYER,
41         SFC_FLOW_ITEM_START_LAYER,
42         SFC_FLOW_ITEM_L2,
43         SFC_FLOW_ITEM_L3,
44         SFC_FLOW_ITEM_L4,
45 };
46
47 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
48                                   efx_filter_spec_t *spec,
49                                   struct rte_flow_error *error);
50
51 struct sfc_flow_item {
52         enum rte_flow_item_type type;           /* Type of item */
53         enum sfc_flow_item_layers layer;        /* Layer of item */
54         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
55         sfc_flow_item_parse *parse;             /* Parsing function */
56 };
57
58 static sfc_flow_item_parse sfc_flow_parse_void;
59 static sfc_flow_item_parse sfc_flow_parse_eth;
60 static sfc_flow_item_parse sfc_flow_parse_vlan;
61 static sfc_flow_item_parse sfc_flow_parse_ipv4;
62 static sfc_flow_item_parse sfc_flow_parse_ipv6;
63 static sfc_flow_item_parse sfc_flow_parse_tcp;
64 static sfc_flow_item_parse sfc_flow_parse_udp;
65 static sfc_flow_item_parse sfc_flow_parse_vxlan;
66 static sfc_flow_item_parse sfc_flow_parse_geneve;
67 static sfc_flow_item_parse sfc_flow_parse_nvgre;
68
69 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
70                                      unsigned int filters_count_for_one_val,
71                                      struct rte_flow_error *error);
72
73 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
74                                         efx_filter_spec_t *spec,
75                                         struct sfc_filter *filter);
76
77 struct sfc_flow_copy_flag {
78         /* EFX filter specification match flag */
79         efx_filter_match_flags_t flag;
80         /* Number of values of corresponding field */
81         unsigned int vals_count;
82         /* Function to set values in specifications */
83         sfc_flow_spec_set_vals *set_vals;
84         /*
85          * Function to check that the specification is suitable
86          * for adding this match flag
87          */
88         sfc_flow_spec_check *spec_check;
89 };
90
91 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
92 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
93 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
94 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
95 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
96 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
97 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
98
99 static boolean_t
100 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
101 {
102         uint8_t sum = 0;
103         unsigned int i;
104
105         for (i = 0; i < size; i++)
106                 sum |= buf[i];
107
108         return (sum == 0) ? B_TRUE : B_FALSE;
109 }
110
111 /*
112  * Validate item and prepare structures spec and mask for parsing
113  */
114 static int
115 sfc_flow_parse_init(const struct rte_flow_item *item,
116                     const void **spec_ptr,
117                     const void **mask_ptr,
118                     const void *supp_mask,
119                     const void *def_mask,
120                     unsigned int size,
121                     struct rte_flow_error *error)
122 {
123         const uint8_t *spec;
124         const uint8_t *mask;
125         const uint8_t *last;
126         uint8_t supp;
127         unsigned int i;
128
129         if (item == NULL) {
130                 rte_flow_error_set(error, EINVAL,
131                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
132                                    "NULL item");
133                 return -rte_errno;
134         }
135
136         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
137                 rte_flow_error_set(error, EINVAL,
138                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
139                                    "Mask or last is set without spec");
140                 return -rte_errno;
141         }
142
143         /*
144          * If "mask" is not set, default mask is used,
145          * but if default mask is NULL, "mask" should be set
146          */
147         if (item->mask == NULL) {
148                 if (def_mask == NULL) {
149                         rte_flow_error_set(error, EINVAL,
150                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
151                                 "Mask should be specified");
152                         return -rte_errno;
153                 }
154
155                 mask = def_mask;
156         } else {
157                 mask = item->mask;
158         }
159
160         spec = item->spec;
161         last = item->last;
162
163         if (spec == NULL)
164                 goto exit;
165
166         /*
167          * If field values in "last" are either 0 or equal to the corresponding
168          * values in "spec" then they are ignored
169          */
170         if (last != NULL &&
171             !sfc_flow_is_zero(last, size) &&
172             memcmp(last, spec, size) != 0) {
173                 rte_flow_error_set(error, ENOTSUP,
174                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
175                                    "Ranging is not supported");
176                 return -rte_errno;
177         }
178
179         if (supp_mask == NULL) {
180                 rte_flow_error_set(error, EINVAL,
181                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
182                         "Supported mask for item should be specified");
183                 return -rte_errno;
184         }
185
186         /* Check that mask does not ask for more match than supp_mask */
187         for (i = 0; i < size; i++) {
188                 supp = ((const uint8_t *)supp_mask)[i];
189
190                 if (~supp & mask[i]) {
191                         rte_flow_error_set(error, ENOTSUP,
192                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
193                                            "Item's field is not supported");
194                         return -rte_errno;
195                 }
196         }
197
198 exit:
199         *spec_ptr = spec;
200         *mask_ptr = mask;
201         return 0;
202 }
203
204 /*
205  * Protocol parsers.
206  * Masking is not supported, so masks in items should be either
207  * full or empty (zeroed) and set only for supported fields which
208  * are specified in the supp_mask.
209  */
210
211 static int
212 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
213                     __rte_unused efx_filter_spec_t *efx_spec,
214                     __rte_unused struct rte_flow_error *error)
215 {
216         return 0;
217 }
218
219 /**
220  * Convert Ethernet item to EFX filter specification.
221  *
222  * @param item[in]
223  *   Item specification. Outer frame specification may only comprise
224  *   source/destination addresses and Ethertype field.
225  *   Inner frame specification may contain destination address only.
226  *   There is support for individual/group mask as well as for empty and full.
227  *   If the mask is NULL, default mask will be used. Ranging is not supported.
228  * @param efx_spec[in, out]
229  *   EFX filter specification to update.
230  * @param[out] error
231  *   Perform verbose error reporting if not NULL.
232  */
233 static int
234 sfc_flow_parse_eth(const struct rte_flow_item *item,
235                    efx_filter_spec_t *efx_spec,
236                    struct rte_flow_error *error)
237 {
238         int rc;
239         const struct rte_flow_item_eth *spec = NULL;
240         const struct rte_flow_item_eth *mask = NULL;
241         const struct rte_flow_item_eth supp_mask = {
242                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
243                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
244                 .type = 0xffff,
245         };
246         const struct rte_flow_item_eth ifrm_supp_mask = {
247                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
248         };
249         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
250                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
251         };
252         const struct rte_flow_item_eth *supp_mask_p;
253         const struct rte_flow_item_eth *def_mask_p;
254         uint8_t *loc_mac = NULL;
255         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
256                 EFX_TUNNEL_PROTOCOL_NONE);
257
258         if (is_ifrm) {
259                 supp_mask_p = &ifrm_supp_mask;
260                 def_mask_p = &ifrm_supp_mask;
261                 loc_mac = efx_spec->efs_ifrm_loc_mac;
262         } else {
263                 supp_mask_p = &supp_mask;
264                 def_mask_p = &rte_flow_item_eth_mask;
265                 loc_mac = efx_spec->efs_loc_mac;
266         }
267
268         rc = sfc_flow_parse_init(item,
269                                  (const void **)&spec,
270                                  (const void **)&mask,
271                                  supp_mask_p, def_mask_p,
272                                  sizeof(struct rte_flow_item_eth),
273                                  error);
274         if (rc != 0)
275                 return rc;
276
277         /* If "spec" is not set, could be any Ethernet */
278         if (spec == NULL)
279                 return 0;
280
281         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
282                 efx_spec->efs_match_flags |= is_ifrm ?
283                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
284                         EFX_FILTER_MATCH_LOC_MAC;
285                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
286                            EFX_MAC_ADDR_LEN);
287         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
288                           EFX_MAC_ADDR_LEN) == 0) {
289                 if (is_unicast_ether_addr(&spec->dst))
290                         efx_spec->efs_match_flags |= is_ifrm ?
291                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
292                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
293                 else
294                         efx_spec->efs_match_flags |= is_ifrm ?
295                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
296                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
297         } else if (!is_zero_ether_addr(&mask->dst)) {
298                 goto fail_bad_mask;
299         }
300
301         /*
302          * ifrm_supp_mask ensures that the source address and
303          * ethertype masks are equal to zero in inner frame,
304          * so these fields are filled in only for the outer frame
305          */
306         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
307                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
308                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
309                            EFX_MAC_ADDR_LEN);
310         } else if (!is_zero_ether_addr(&mask->src)) {
311                 goto fail_bad_mask;
312         }
313
314         /*
315          * Ether type is in big-endian byte order in item and
316          * in little-endian in efx_spec, so byte swap is used
317          */
318         if (mask->type == supp_mask.type) {
319                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
320                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
321         } else if (mask->type != 0) {
322                 goto fail_bad_mask;
323         }
324
325         return 0;
326
327 fail_bad_mask:
328         rte_flow_error_set(error, EINVAL,
329                            RTE_FLOW_ERROR_TYPE_ITEM, item,
330                            "Bad mask in the ETH pattern item");
331         return -rte_errno;
332 }
333
334 /**
335  * Convert VLAN item to EFX filter specification.
336  *
337  * @param item[in]
338  *   Item specification. Only VID field is supported.
339  *   The mask can not be NULL. Ranging is not supported.
340  * @param efx_spec[in, out]
341  *   EFX filter specification to update.
342  * @param[out] error
343  *   Perform verbose error reporting if not NULL.
344  */
345 static int
346 sfc_flow_parse_vlan(const struct rte_flow_item *item,
347                     efx_filter_spec_t *efx_spec,
348                     struct rte_flow_error *error)
349 {
350         int rc;
351         uint16_t vid;
352         const struct rte_flow_item_vlan *spec = NULL;
353         const struct rte_flow_item_vlan *mask = NULL;
354         const struct rte_flow_item_vlan supp_mask = {
355                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
356                 .inner_type = RTE_BE16(0xffff),
357         };
358
359         rc = sfc_flow_parse_init(item,
360                                  (const void **)&spec,
361                                  (const void **)&mask,
362                                  &supp_mask,
363                                  NULL,
364                                  sizeof(struct rte_flow_item_vlan),
365                                  error);
366         if (rc != 0)
367                 return rc;
368
369         /*
370          * VID is in big-endian byte order in item and
371          * in little-endian in efx_spec, so byte swap is used.
372          * If two VLAN items are included, the first matches
373          * the outer tag and the next matches the inner tag.
374          */
375         if (mask->tci == supp_mask.tci) {
376                 /* Apply mask to keep VID only */
377                 vid = rte_bswap16(spec->tci & mask->tci);
378
379                 if (!(efx_spec->efs_match_flags &
380                       EFX_FILTER_MATCH_OUTER_VID)) {
381                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
382                         efx_spec->efs_outer_vid = vid;
383                 } else if (!(efx_spec->efs_match_flags &
384                              EFX_FILTER_MATCH_INNER_VID)) {
385                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
386                         efx_spec->efs_inner_vid = vid;
387                 } else {
388                         rte_flow_error_set(error, EINVAL,
389                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
390                                            "More than two VLAN items");
391                         return -rte_errno;
392                 }
393         } else {
394                 rte_flow_error_set(error, EINVAL,
395                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
396                                    "VLAN ID in TCI match is required");
397                 return -rte_errno;
398         }
399
400         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
401                 rte_flow_error_set(error, EINVAL,
402                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
403                                    "VLAN TPID matching is not supported");
404                 return -rte_errno;
405         }
406         if (mask->inner_type == supp_mask.inner_type) {
407                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
408                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
409         } else if (mask->inner_type) {
410                 rte_flow_error_set(error, EINVAL,
411                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
412                                    "Bad mask for VLAN inner_type");
413                 return -rte_errno;
414         }
415
416         return 0;
417 }
418
419 /**
420  * Convert IPv4 item to EFX filter specification.
421  *
422  * @param item[in]
423  *   Item specification. Only source and destination addresses and
424  *   protocol fields are supported. If the mask is NULL, default
425  *   mask will be used. Ranging is not supported.
426  * @param efx_spec[in, out]
427  *   EFX filter specification to update.
428  * @param[out] error
429  *   Perform verbose error reporting if not NULL.
430  */
431 static int
432 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
433                     efx_filter_spec_t *efx_spec,
434                     struct rte_flow_error *error)
435 {
436         int rc;
437         const struct rte_flow_item_ipv4 *spec = NULL;
438         const struct rte_flow_item_ipv4 *mask = NULL;
439         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
440         const struct rte_flow_item_ipv4 supp_mask = {
441                 .hdr = {
442                         .src_addr = 0xffffffff,
443                         .dst_addr = 0xffffffff,
444                         .next_proto_id = 0xff,
445                 }
446         };
447
448         rc = sfc_flow_parse_init(item,
449                                  (const void **)&spec,
450                                  (const void **)&mask,
451                                  &supp_mask,
452                                  &rte_flow_item_ipv4_mask,
453                                  sizeof(struct rte_flow_item_ipv4),
454                                  error);
455         if (rc != 0)
456                 return rc;
457
458         /*
459          * Filtering by IPv4 source and destination addresses requires
460          * the appropriate ETHER_TYPE in hardware filters
461          */
462         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
463                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
464                 efx_spec->efs_ether_type = ether_type_ipv4;
465         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
466                 rte_flow_error_set(error, EINVAL,
467                         RTE_FLOW_ERROR_TYPE_ITEM, item,
468                         "Ethertype in pattern with IPV4 item should be appropriate");
469                 return -rte_errno;
470         }
471
472         if (spec == NULL)
473                 return 0;
474
475         /*
476          * IPv4 addresses are in big-endian byte order in item and in
477          * efx_spec
478          */
479         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
480                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
481                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
482         } else if (mask->hdr.src_addr != 0) {
483                 goto fail_bad_mask;
484         }
485
486         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
487                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
488                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
489         } else if (mask->hdr.dst_addr != 0) {
490                 goto fail_bad_mask;
491         }
492
493         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
494                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
495                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
496         } else if (mask->hdr.next_proto_id != 0) {
497                 goto fail_bad_mask;
498         }
499
500         return 0;
501
502 fail_bad_mask:
503         rte_flow_error_set(error, EINVAL,
504                            RTE_FLOW_ERROR_TYPE_ITEM, item,
505                            "Bad mask in the IPV4 pattern item");
506         return -rte_errno;
507 }
508
509 /**
510  * Convert IPv6 item to EFX filter specification.
511  *
512  * @param item[in]
513  *   Item specification. Only source and destination addresses and
514  *   next header fields are supported. If the mask is NULL, default
515  *   mask will be used. Ranging is not supported.
516  * @param efx_spec[in, out]
517  *   EFX filter specification to update.
518  * @param[out] error
519  *   Perform verbose error reporting if not NULL.
520  */
521 static int
522 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
523                     efx_filter_spec_t *efx_spec,
524                     struct rte_flow_error *error)
525 {
526         int rc;
527         const struct rte_flow_item_ipv6 *spec = NULL;
528         const struct rte_flow_item_ipv6 *mask = NULL;
529         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
530         const struct rte_flow_item_ipv6 supp_mask = {
531                 .hdr = {
532                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
533                                       0xff, 0xff, 0xff, 0xff,
534                                       0xff, 0xff, 0xff, 0xff,
535                                       0xff, 0xff, 0xff, 0xff },
536                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
537                                       0xff, 0xff, 0xff, 0xff,
538                                       0xff, 0xff, 0xff, 0xff,
539                                       0xff, 0xff, 0xff, 0xff },
540                         .proto = 0xff,
541                 }
542         };
543
544         rc = sfc_flow_parse_init(item,
545                                  (const void **)&spec,
546                                  (const void **)&mask,
547                                  &supp_mask,
548                                  &rte_flow_item_ipv6_mask,
549                                  sizeof(struct rte_flow_item_ipv6),
550                                  error);
551         if (rc != 0)
552                 return rc;
553
554         /*
555          * Filtering by IPv6 source and destination addresses requires
556          * the appropriate ETHER_TYPE in hardware filters
557          */
558         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
559                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
560                 efx_spec->efs_ether_type = ether_type_ipv6;
561         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
562                 rte_flow_error_set(error, EINVAL,
563                         RTE_FLOW_ERROR_TYPE_ITEM, item,
564                         "Ethertype in pattern with IPV6 item should be appropriate");
565                 return -rte_errno;
566         }
567
568         if (spec == NULL)
569                 return 0;
570
571         /*
572          * IPv6 addresses are in big-endian byte order in item and in
573          * efx_spec
574          */
575         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
576                    sizeof(mask->hdr.src_addr)) == 0) {
577                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
578
579                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
580                                  sizeof(spec->hdr.src_addr));
581                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
582                            sizeof(efx_spec->efs_rem_host));
583         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
584                                      sizeof(mask->hdr.src_addr))) {
585                 goto fail_bad_mask;
586         }
587
588         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
589                    sizeof(mask->hdr.dst_addr)) == 0) {
590                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
591
592                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
593                                  sizeof(spec->hdr.dst_addr));
594                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
595                            sizeof(efx_spec->efs_loc_host));
596         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
597                                      sizeof(mask->hdr.dst_addr))) {
598                 goto fail_bad_mask;
599         }
600
601         if (mask->hdr.proto == supp_mask.hdr.proto) {
602                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
603                 efx_spec->efs_ip_proto = spec->hdr.proto;
604         } else if (mask->hdr.proto != 0) {
605                 goto fail_bad_mask;
606         }
607
608         return 0;
609
610 fail_bad_mask:
611         rte_flow_error_set(error, EINVAL,
612                            RTE_FLOW_ERROR_TYPE_ITEM, item,
613                            "Bad mask in the IPV6 pattern item");
614         return -rte_errno;
615 }
616
617 /**
618  * Convert TCP item to EFX filter specification.
619  *
620  * @param item[in]
621  *   Item specification. Only source and destination ports fields
622  *   are supported. If the mask is NULL, default mask will be used.
623  *   Ranging is not supported.
624  * @param efx_spec[in, out]
625  *   EFX filter specification to update.
626  * @param[out] error
627  *   Perform verbose error reporting if not NULL.
628  */
629 static int
630 sfc_flow_parse_tcp(const struct rte_flow_item *item,
631                    efx_filter_spec_t *efx_spec,
632                    struct rte_flow_error *error)
633 {
634         int rc;
635         const struct rte_flow_item_tcp *spec = NULL;
636         const struct rte_flow_item_tcp *mask = NULL;
637         const struct rte_flow_item_tcp supp_mask = {
638                 .hdr = {
639                         .src_port = 0xffff,
640                         .dst_port = 0xffff,
641                 }
642         };
643
644         rc = sfc_flow_parse_init(item,
645                                  (const void **)&spec,
646                                  (const void **)&mask,
647                                  &supp_mask,
648                                  &rte_flow_item_tcp_mask,
649                                  sizeof(struct rte_flow_item_tcp),
650                                  error);
651         if (rc != 0)
652                 return rc;
653
654         /*
655          * Filtering by TCP source and destination ports requires
656          * the appropriate IP_PROTO in hardware filters
657          */
658         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
659                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
660                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
661         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
662                 rte_flow_error_set(error, EINVAL,
663                         RTE_FLOW_ERROR_TYPE_ITEM, item,
664                         "IP proto in pattern with TCP item should be appropriate");
665                 return -rte_errno;
666         }
667
668         if (spec == NULL)
669                 return 0;
670
671         /*
672          * Source and destination ports are in big-endian byte order in item and
673          * in little-endian in efx_spec, so byte swap is used
674          */
675         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
676                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
677                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
678         } else if (mask->hdr.src_port != 0) {
679                 goto fail_bad_mask;
680         }
681
682         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
683                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
684                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
685         } else if (mask->hdr.dst_port != 0) {
686                 goto fail_bad_mask;
687         }
688
689         return 0;
690
691 fail_bad_mask:
692         rte_flow_error_set(error, EINVAL,
693                            RTE_FLOW_ERROR_TYPE_ITEM, item,
694                            "Bad mask in the TCP pattern item");
695         return -rte_errno;
696 }
697
698 /**
699  * Convert UDP item to EFX filter specification.
700  *
701  * @param item[in]
702  *   Item specification. Only source and destination ports fields
703  *   are supported. If the mask is NULL, default mask will be used.
704  *   Ranging is not supported.
705  * @param efx_spec[in, out]
706  *   EFX filter specification to update.
707  * @param[out] error
708  *   Perform verbose error reporting if not NULL.
709  */
710 static int
711 sfc_flow_parse_udp(const struct rte_flow_item *item,
712                    efx_filter_spec_t *efx_spec,
713                    struct rte_flow_error *error)
714 {
715         int rc;
716         const struct rte_flow_item_udp *spec = NULL;
717         const struct rte_flow_item_udp *mask = NULL;
718         const struct rte_flow_item_udp supp_mask = {
719                 .hdr = {
720                         .src_port = 0xffff,
721                         .dst_port = 0xffff,
722                 }
723         };
724
725         rc = sfc_flow_parse_init(item,
726                                  (const void **)&spec,
727                                  (const void **)&mask,
728                                  &supp_mask,
729                                  &rte_flow_item_udp_mask,
730                                  sizeof(struct rte_flow_item_udp),
731                                  error);
732         if (rc != 0)
733                 return rc;
734
735         /*
736          * Filtering by UDP source and destination ports requires
737          * the appropriate IP_PROTO in hardware filters
738          */
739         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
740                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
741                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
742         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
743                 rte_flow_error_set(error, EINVAL,
744                         RTE_FLOW_ERROR_TYPE_ITEM, item,
745                         "IP proto in pattern with UDP item should be appropriate");
746                 return -rte_errno;
747         }
748
749         if (spec == NULL)
750                 return 0;
751
752         /*
753          * Source and destination ports are in big-endian byte order in item and
754          * in little-endian in efx_spec, so byte swap is used
755          */
756         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
757                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
758                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
759         } else if (mask->hdr.src_port != 0) {
760                 goto fail_bad_mask;
761         }
762
763         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
764                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
765                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
766         } else if (mask->hdr.dst_port != 0) {
767                 goto fail_bad_mask;
768         }
769
770         return 0;
771
772 fail_bad_mask:
773         rte_flow_error_set(error, EINVAL,
774                            RTE_FLOW_ERROR_TYPE_ITEM, item,
775                            "Bad mask in the UDP pattern item");
776         return -rte_errno;
777 }
778
779 /*
780  * Filters for encapsulated packets match based on the EtherType and IP
781  * protocol in the outer frame.
782  */
783 static int
784 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
785                                         efx_filter_spec_t *efx_spec,
786                                         uint8_t ip_proto,
787                                         struct rte_flow_error *error)
788 {
789         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
790                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
791                 efx_spec->efs_ip_proto = ip_proto;
792         } else if (efx_spec->efs_ip_proto != ip_proto) {
793                 switch (ip_proto) {
794                 case EFX_IPPROTO_UDP:
795                         rte_flow_error_set(error, EINVAL,
796                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
797                                 "Outer IP header protocol must be UDP "
798                                 "in VxLAN/GENEVE pattern");
799                         return -rte_errno;
800
801                 case EFX_IPPROTO_GRE:
802                         rte_flow_error_set(error, EINVAL,
803                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
804                                 "Outer IP header protocol must be GRE "
805                                 "in NVGRE pattern");
806                         return -rte_errno;
807
808                 default:
809                         rte_flow_error_set(error, EINVAL,
810                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
811                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
812                                 "are supported");
813                         return -rte_errno;
814                 }
815         }
816
817         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
818             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
819             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
820                 rte_flow_error_set(error, EINVAL,
821                         RTE_FLOW_ERROR_TYPE_ITEM, item,
822                         "Outer frame EtherType in pattern with tunneling "
823                         "must be IPv4 or IPv6");
824                 return -rte_errno;
825         }
826
827         return 0;
828 }
829
830 static int
831 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
832                                   const uint8_t *vni_or_vsid_val,
833                                   const uint8_t *vni_or_vsid_mask,
834                                   const struct rte_flow_item *item,
835                                   struct rte_flow_error *error)
836 {
837         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
838                 0xff, 0xff, 0xff
839         };
840
841         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
842                    EFX_VNI_OR_VSID_LEN) == 0) {
843                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
844                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
845                            EFX_VNI_OR_VSID_LEN);
846         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
847                 rte_flow_error_set(error, EINVAL,
848                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
849                                    "Unsupported VNI/VSID mask");
850                 return -rte_errno;
851         }
852
853         return 0;
854 }
855
856 /**
857  * Convert VXLAN item to EFX filter specification.
858  *
859  * @param item[in]
860  *   Item specification. Only VXLAN network identifier field is supported.
861  *   If the mask is NULL, default mask will be used.
862  *   Ranging is not supported.
863  * @param efx_spec[in, out]
864  *   EFX filter specification to update.
865  * @param[out] error
866  *   Perform verbose error reporting if not NULL.
867  */
868 static int
869 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
870                      efx_filter_spec_t *efx_spec,
871                      struct rte_flow_error *error)
872 {
873         int rc;
874         const struct rte_flow_item_vxlan *spec = NULL;
875         const struct rte_flow_item_vxlan *mask = NULL;
876         const struct rte_flow_item_vxlan supp_mask = {
877                 .vni = { 0xff, 0xff, 0xff }
878         };
879
880         rc = sfc_flow_parse_init(item,
881                                  (const void **)&spec,
882                                  (const void **)&mask,
883                                  &supp_mask,
884                                  &rte_flow_item_vxlan_mask,
885                                  sizeof(struct rte_flow_item_vxlan),
886                                  error);
887         if (rc != 0)
888                 return rc;
889
890         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
891                                                      EFX_IPPROTO_UDP, error);
892         if (rc != 0)
893                 return rc;
894
895         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
896         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
897
898         if (spec == NULL)
899                 return 0;
900
901         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
902                                                mask->vni, item, error);
903
904         return rc;
905 }
906
907 /**
908  * Convert GENEVE item to EFX filter specification.
909  *
910  * @param item[in]
911  *   Item specification. Only Virtual Network Identifier and protocol type
912  *   fields are supported. But protocol type can be only Ethernet (0x6558).
913  *   If the mask is NULL, default mask will be used.
914  *   Ranging is not supported.
915  * @param efx_spec[in, out]
916  *   EFX filter specification to update.
917  * @param[out] error
918  *   Perform verbose error reporting if not NULL.
919  */
920 static int
921 sfc_flow_parse_geneve(const struct rte_flow_item *item,
922                       efx_filter_spec_t *efx_spec,
923                       struct rte_flow_error *error)
924 {
925         int rc;
926         const struct rte_flow_item_geneve *spec = NULL;
927         const struct rte_flow_item_geneve *mask = NULL;
928         const struct rte_flow_item_geneve supp_mask = {
929                 .protocol = RTE_BE16(0xffff),
930                 .vni = { 0xff, 0xff, 0xff }
931         };
932
933         rc = sfc_flow_parse_init(item,
934                                  (const void **)&spec,
935                                  (const void **)&mask,
936                                  &supp_mask,
937                                  &rte_flow_item_geneve_mask,
938                                  sizeof(struct rte_flow_item_geneve),
939                                  error);
940         if (rc != 0)
941                 return rc;
942
943         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
944                                                      EFX_IPPROTO_UDP, error);
945         if (rc != 0)
946                 return rc;
947
948         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
949         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
950
951         if (spec == NULL)
952                 return 0;
953
954         if (mask->protocol == supp_mask.protocol) {
955                 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
956                         rte_flow_error_set(error, EINVAL,
957                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
958                                 "GENEVE encap. protocol must be Ethernet "
959                                 "(0x6558) in the GENEVE pattern item");
960                         return -rte_errno;
961                 }
962         } else if (mask->protocol != 0) {
963                 rte_flow_error_set(error, EINVAL,
964                         RTE_FLOW_ERROR_TYPE_ITEM, item,
965                         "Unsupported mask for GENEVE encap. protocol");
966                 return -rte_errno;
967         }
968
969         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
970                                                mask->vni, item, error);
971
972         return rc;
973 }
974
975 /**
976  * Convert NVGRE item to EFX filter specification.
977  *
978  * @param item[in]
979  *   Item specification. Only virtual subnet ID field is supported.
980  *   If the mask is NULL, default mask will be used.
981  *   Ranging is not supported.
982  * @param efx_spec[in, out]
983  *   EFX filter specification to update.
984  * @param[out] error
985  *   Perform verbose error reporting if not NULL.
986  */
987 static int
988 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
989                      efx_filter_spec_t *efx_spec,
990                      struct rte_flow_error *error)
991 {
992         int rc;
993         const struct rte_flow_item_nvgre *spec = NULL;
994         const struct rte_flow_item_nvgre *mask = NULL;
995         const struct rte_flow_item_nvgre supp_mask = {
996                 .tni = { 0xff, 0xff, 0xff }
997         };
998
999         rc = sfc_flow_parse_init(item,
1000                                  (const void **)&spec,
1001                                  (const void **)&mask,
1002                                  &supp_mask,
1003                                  &rte_flow_item_nvgre_mask,
1004                                  sizeof(struct rte_flow_item_nvgre),
1005                                  error);
1006         if (rc != 0)
1007                 return rc;
1008
1009         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1010                                                      EFX_IPPROTO_GRE, error);
1011         if (rc != 0)
1012                 return rc;
1013
1014         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1015         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1016
1017         if (spec == NULL)
1018                 return 0;
1019
1020         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1021                                                mask->tni, item, error);
1022
1023         return rc;
1024 }
1025
1026 static const struct sfc_flow_item sfc_flow_items[] = {
1027         {
1028                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1029                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1030                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1031                 .parse = sfc_flow_parse_void,
1032         },
1033         {
1034                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1035                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1036                 .layer = SFC_FLOW_ITEM_L2,
1037                 .parse = sfc_flow_parse_eth,
1038         },
1039         {
1040                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1041                 .prev_layer = SFC_FLOW_ITEM_L2,
1042                 .layer = SFC_FLOW_ITEM_L2,
1043                 .parse = sfc_flow_parse_vlan,
1044         },
1045         {
1046                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1047                 .prev_layer = SFC_FLOW_ITEM_L2,
1048                 .layer = SFC_FLOW_ITEM_L3,
1049                 .parse = sfc_flow_parse_ipv4,
1050         },
1051         {
1052                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1053                 .prev_layer = SFC_FLOW_ITEM_L2,
1054                 .layer = SFC_FLOW_ITEM_L3,
1055                 .parse = sfc_flow_parse_ipv6,
1056         },
1057         {
1058                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1059                 .prev_layer = SFC_FLOW_ITEM_L3,
1060                 .layer = SFC_FLOW_ITEM_L4,
1061                 .parse = sfc_flow_parse_tcp,
1062         },
1063         {
1064                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1065                 .prev_layer = SFC_FLOW_ITEM_L3,
1066                 .layer = SFC_FLOW_ITEM_L4,
1067                 .parse = sfc_flow_parse_udp,
1068         },
1069         {
1070                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1071                 .prev_layer = SFC_FLOW_ITEM_L4,
1072                 .layer = SFC_FLOW_ITEM_START_LAYER,
1073                 .parse = sfc_flow_parse_vxlan,
1074         },
1075         {
1076                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1077                 .prev_layer = SFC_FLOW_ITEM_L4,
1078                 .layer = SFC_FLOW_ITEM_START_LAYER,
1079                 .parse = sfc_flow_parse_geneve,
1080         },
1081         {
1082                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1083                 .prev_layer = SFC_FLOW_ITEM_L3,
1084                 .layer = SFC_FLOW_ITEM_START_LAYER,
1085                 .parse = sfc_flow_parse_nvgre,
1086         },
1087 };
1088
1089 /*
1090  * Protocol-independent flow API support
1091  */
1092 static int
1093 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1094                     struct rte_flow *flow,
1095                     struct rte_flow_error *error)
1096 {
1097         if (attr == NULL) {
1098                 rte_flow_error_set(error, EINVAL,
1099                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1100                                    "NULL attribute");
1101                 return -rte_errno;
1102         }
1103         if (attr->group != 0) {
1104                 rte_flow_error_set(error, ENOTSUP,
1105                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1106                                    "Groups are not supported");
1107                 return -rte_errno;
1108         }
1109         if (attr->priority != 0) {
1110                 rte_flow_error_set(error, ENOTSUP,
1111                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1112                                    "Priorities are not supported");
1113                 return -rte_errno;
1114         }
1115         if (attr->egress != 0) {
1116                 rte_flow_error_set(error, ENOTSUP,
1117                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1118                                    "Egress is not supported");
1119                 return -rte_errno;
1120         }
1121         if (attr->transfer != 0) {
1122                 rte_flow_error_set(error, ENOTSUP,
1123                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1124                                    "Transfer is not supported");
1125                 return -rte_errno;
1126         }
1127         if (attr->ingress == 0) {
1128                 rte_flow_error_set(error, ENOTSUP,
1129                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1130                                    "Only ingress is supported");
1131                 return -rte_errno;
1132         }
1133
1134         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1135         flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1136
1137         return 0;
1138 }
1139
1140 /* Get item from array sfc_flow_items */
1141 static const struct sfc_flow_item *
1142 sfc_flow_get_item(enum rte_flow_item_type type)
1143 {
1144         unsigned int i;
1145
1146         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1147                 if (sfc_flow_items[i].type == type)
1148                         return &sfc_flow_items[i];
1149
1150         return NULL;
1151 }
1152
1153 static int
1154 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1155                        struct rte_flow *flow,
1156                        struct rte_flow_error *error)
1157 {
1158         int rc;
1159         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1160         boolean_t is_ifrm = B_FALSE;
1161         const struct sfc_flow_item *item;
1162
1163         if (pattern == NULL) {
1164                 rte_flow_error_set(error, EINVAL,
1165                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1166                                    "NULL pattern");
1167                 return -rte_errno;
1168         }
1169
1170         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1171                 item = sfc_flow_get_item(pattern->type);
1172                 if (item == NULL) {
1173                         rte_flow_error_set(error, ENOTSUP,
1174                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1175                                            "Unsupported pattern item");
1176                         return -rte_errno;
1177                 }
1178
1179                 /*
1180                  * Omitting one or several protocol layers at the beginning
1181                  * of pattern is supported
1182                  */
1183                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1184                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1185                     item->prev_layer != prev_layer) {
1186                         rte_flow_error_set(error, ENOTSUP,
1187                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1188                                            "Unexpected sequence of pattern items");
1189                         return -rte_errno;
1190                 }
1191
1192                 /*
1193                  * Allow only VOID and ETH pattern items in the inner frame.
1194                  * Also check that there is only one tunneling protocol.
1195                  */
1196                 switch (item->type) {
1197                 case RTE_FLOW_ITEM_TYPE_VOID:
1198                 case RTE_FLOW_ITEM_TYPE_ETH:
1199                         break;
1200
1201                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1202                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1203                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1204                         if (is_ifrm) {
1205                                 rte_flow_error_set(error, EINVAL,
1206                                         RTE_FLOW_ERROR_TYPE_ITEM,
1207                                         pattern,
1208                                         "More than one tunneling protocol");
1209                                 return -rte_errno;
1210                         }
1211                         is_ifrm = B_TRUE;
1212                         break;
1213
1214                 default:
1215                         if (is_ifrm) {
1216                                 rte_flow_error_set(error, EINVAL,
1217                                         RTE_FLOW_ERROR_TYPE_ITEM,
1218                                         pattern,
1219                                         "There is an unsupported pattern item "
1220                                         "in the inner frame");
1221                                 return -rte_errno;
1222                         }
1223                         break;
1224                 }
1225
1226                 rc = item->parse(pattern, &flow->spec.template, error);
1227                 if (rc != 0)
1228                         return rc;
1229
1230                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1231                         prev_layer = item->layer;
1232         }
1233
1234         return 0;
1235 }
1236
1237 static int
1238 sfc_flow_parse_queue(struct sfc_adapter *sa,
1239                      const struct rte_flow_action_queue *queue,
1240                      struct rte_flow *flow)
1241 {
1242         struct sfc_rxq *rxq;
1243
1244         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1245                 return -EINVAL;
1246
1247         rxq = &sa->rxq_ctrl[queue->index];
1248         flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1249
1250         return 0;
1251 }
1252
1253 static int
1254 sfc_flow_parse_rss(struct sfc_adapter *sa,
1255                    const struct rte_flow_action_rss *action_rss,
1256                    struct rte_flow *flow)
1257 {
1258         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1259         struct sfc_rss *rss = &sas->rss;
1260         unsigned int rxq_sw_index;
1261         struct sfc_rxq *rxq;
1262         unsigned int rxq_hw_index_min;
1263         unsigned int rxq_hw_index_max;
1264         efx_rx_hash_type_t efx_hash_types;
1265         const uint8_t *rss_key;
1266         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1267         unsigned int i;
1268
1269         if (action_rss->queue_num == 0)
1270                 return -EINVAL;
1271
1272         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1273         rxq = &sa->rxq_ctrl[rxq_sw_index];
1274         rxq_hw_index_min = rxq->hw_index;
1275         rxq_hw_index_max = 0;
1276
1277         for (i = 0; i < action_rss->queue_num; ++i) {
1278                 rxq_sw_index = action_rss->queue[i];
1279
1280                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1281                         return -EINVAL;
1282
1283                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1284
1285                 if (rxq->hw_index < rxq_hw_index_min)
1286                         rxq_hw_index_min = rxq->hw_index;
1287
1288                 if (rxq->hw_index > rxq_hw_index_max)
1289                         rxq_hw_index_max = rxq->hw_index;
1290         }
1291
1292         switch (action_rss->func) {
1293         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1294         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1295                 break;
1296         default:
1297                 return -EINVAL;
1298         }
1299
1300         if (action_rss->level)
1301                 return -EINVAL;
1302
1303         /*
1304          * Dummy RSS action with only one queue and no specific settings
1305          * for hash types and key does not require dedicated RSS context
1306          * and may be simplified to single queue action.
1307          */
1308         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1309             action_rss->key_len == 0) {
1310                 flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
1311                 return 0;
1312         }
1313
1314         if (action_rss->types) {
1315                 int rc;
1316
1317                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1318                                           &efx_hash_types);
1319                 if (rc != 0)
1320                         return -rc;
1321         } else {
1322                 unsigned int i;
1323
1324                 efx_hash_types = 0;
1325                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1326                         efx_hash_types |= rss->hf_map[i].efx;
1327         }
1328
1329         if (action_rss->key_len) {
1330                 if (action_rss->key_len != sizeof(rss->key))
1331                         return -EINVAL;
1332
1333                 rss_key = action_rss->key;
1334         } else {
1335                 rss_key = rss->key;
1336         }
1337
1338         flow->rss = B_TRUE;
1339
1340         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1341         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1342         sfc_rss_conf->rss_hash_types = efx_hash_types;
1343         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1344
1345         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1346                 unsigned int nb_queues = action_rss->queue_num;
1347                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1348                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1349
1350                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int
1357 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1358                     unsigned int filters_count)
1359 {
1360         unsigned int i;
1361         int ret = 0;
1362
1363         for (i = 0; i < filters_count; i++) {
1364                 int rc;
1365
1366                 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1367                 if (ret == 0 && rc != 0) {
1368                         sfc_err(sa, "failed to remove filter specification "
1369                                 "(rc = %d)", rc);
1370                         ret = rc;
1371                 }
1372         }
1373
1374         return ret;
1375 }
1376
1377 static int
1378 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1379 {
1380         unsigned int i;
1381         int rc = 0;
1382
1383         for (i = 0; i < spec->count; i++) {
1384                 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1385                 if (rc != 0) {
1386                         sfc_flow_spec_flush(sa, spec, i);
1387                         break;
1388                 }
1389         }
1390
1391         return rc;
1392 }
1393
1394 static int
1395 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1396 {
1397         return sfc_flow_spec_flush(sa, spec, spec->count);
1398 }
1399
1400 static int
1401 sfc_flow_filter_insert(struct sfc_adapter *sa,
1402                        struct rte_flow *flow)
1403 {
1404         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1405         struct sfc_rss *rss = &sas->rss;
1406         struct sfc_flow_rss *flow_rss = &flow->rss_conf;
1407         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1408         unsigned int i;
1409         int rc = 0;
1410
1411         if (flow->rss) {
1412                 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1413                                               flow_rss->rxq_hw_index_min + 1,
1414                                               EFX_MAXRSS);
1415
1416                 rc = efx_rx_scale_context_alloc(sa->nic,
1417                                                 EFX_RX_SCALE_EXCLUSIVE,
1418                                                 rss_spread,
1419                                                 &efs_rss_context);
1420                 if (rc != 0)
1421                         goto fail_scale_context_alloc;
1422
1423                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1424                                            rss->hash_alg,
1425                                            flow_rss->rss_hash_types, B_TRUE);
1426                 if (rc != 0)
1427                         goto fail_scale_mode_set;
1428
1429                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1430                                           flow_rss->rss_key,
1431                                           sizeof(rss->key));
1432                 if (rc != 0)
1433                         goto fail_scale_key_set;
1434
1435                 /*
1436                  * At this point, fully elaborated filter specifications
1437                  * have been produced from the template. To make sure that
1438                  * RSS behaviour is consistent between them, set the same
1439                  * RSS context value everywhere.
1440                  */
1441                 for (i = 0; i < flow->spec.count; i++) {
1442                         efx_filter_spec_t *spec = &flow->spec.filters[i];
1443
1444                         spec->efs_rss_context = efs_rss_context;
1445                         spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1446                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1447                 }
1448         }
1449
1450         rc = sfc_flow_spec_insert(sa, &flow->spec);
1451         if (rc != 0)
1452                 goto fail_filter_insert;
1453
1454         if (flow->rss) {
1455                 /*
1456                  * Scale table is set after filter insertion because
1457                  * the table entries are relative to the base RxQ ID
1458                  * and the latter is submitted to the HW by means of
1459                  * inserting a filter, so by the time of the request
1460                  * the HW knows all the information needed to verify
1461                  * the table entries, and the operation will succeed
1462                  */
1463                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1464                                           flow_rss->rss_tbl,
1465                                           RTE_DIM(flow_rss->rss_tbl));
1466                 if (rc != 0)
1467                         goto fail_scale_tbl_set;
1468         }
1469
1470         return 0;
1471
1472 fail_scale_tbl_set:
1473         sfc_flow_spec_remove(sa, &flow->spec);
1474
1475 fail_filter_insert:
1476 fail_scale_key_set:
1477 fail_scale_mode_set:
1478         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1479                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1480
1481 fail_scale_context_alloc:
1482         return rc;
1483 }
1484
1485 static int
1486 sfc_flow_filter_remove(struct sfc_adapter *sa,
1487                        struct rte_flow *flow)
1488 {
1489         int rc = 0;
1490
1491         rc = sfc_flow_spec_remove(sa, &flow->spec);
1492         if (rc != 0)
1493                 return rc;
1494
1495         if (flow->rss) {
1496                 /*
1497                  * All specifications for a given flow rule have the same RSS
1498                  * context, so that RSS context value is taken from the first
1499                  * filter specification
1500                  */
1501                 efx_filter_spec_t *spec = &flow->spec.filters[0];
1502
1503                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1504         }
1505
1506         return rc;
1507 }
1508
1509 static int
1510 sfc_flow_parse_mark(struct sfc_adapter *sa,
1511                     const struct rte_flow_action_mark *mark,
1512                     struct rte_flow *flow)
1513 {
1514         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1515
1516         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1517                 return EINVAL;
1518
1519         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1520         flow->spec.template.efs_mark = mark->id;
1521
1522         return 0;
1523 }
1524
1525 static int
1526 sfc_flow_parse_actions(struct sfc_adapter *sa,
1527                        const struct rte_flow_action actions[],
1528                        struct rte_flow *flow,
1529                        struct rte_flow_error *error)
1530 {
1531         int rc;
1532         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1533         uint32_t actions_set = 0;
1534         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1535                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1536                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1537         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1538                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1539
1540         if (actions == NULL) {
1541                 rte_flow_error_set(error, EINVAL,
1542                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1543                                    "NULL actions");
1544                 return -rte_errno;
1545         }
1546
1547 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1548         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1549
1550         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1551                 switch (actions->type) {
1552                 case RTE_FLOW_ACTION_TYPE_VOID:
1553                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1554                                                actions_set);
1555                         break;
1556
1557                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1558                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1559                                                actions_set);
1560                         if ((actions_set & fate_actions_mask) != 0)
1561                                 goto fail_fate_actions;
1562
1563                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1564                         if (rc != 0) {
1565                                 rte_flow_error_set(error, EINVAL,
1566                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1567                                         "Bad QUEUE action");
1568                                 return -rte_errno;
1569                         }
1570                         break;
1571
1572                 case RTE_FLOW_ACTION_TYPE_RSS:
1573                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1574                                                actions_set);
1575                         if ((actions_set & fate_actions_mask) != 0)
1576                                 goto fail_fate_actions;
1577
1578                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1579                         if (rc != 0) {
1580                                 rte_flow_error_set(error, -rc,
1581                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1582                                         "Bad RSS action");
1583                                 return -rte_errno;
1584                         }
1585                         break;
1586
1587                 case RTE_FLOW_ACTION_TYPE_DROP:
1588                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1589                                                actions_set);
1590                         if ((actions_set & fate_actions_mask) != 0)
1591                                 goto fail_fate_actions;
1592
1593                         flow->spec.template.efs_dmaq_id =
1594                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1595                         break;
1596
1597                 case RTE_FLOW_ACTION_TYPE_FLAG:
1598                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1599                                                actions_set);
1600                         if ((actions_set & mark_actions_mask) != 0)
1601                                 goto fail_actions_overlap;
1602
1603                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1604                                 rte_flow_error_set(error, ENOTSUP,
1605                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1606                                         "FLAG action is not supported on the current Rx datapath");
1607                                 return -rte_errno;
1608                         }
1609
1610                         flow->spec.template.efs_flags |=
1611                                 EFX_FILTER_FLAG_ACTION_FLAG;
1612                         break;
1613
1614                 case RTE_FLOW_ACTION_TYPE_MARK:
1615                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1616                                                actions_set);
1617                         if ((actions_set & mark_actions_mask) != 0)
1618                                 goto fail_actions_overlap;
1619
1620                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1621                                 rte_flow_error_set(error, ENOTSUP,
1622                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1623                                         "MARK action is not supported on the current Rx datapath");
1624                                 return -rte_errno;
1625                         }
1626
1627                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1628                         if (rc != 0) {
1629                                 rte_flow_error_set(error, rc,
1630                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1631                                         "Bad MARK action");
1632                                 return -rte_errno;
1633                         }
1634                         break;
1635
1636                 default:
1637                         rte_flow_error_set(error, ENOTSUP,
1638                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1639                                            "Action is not supported");
1640                         return -rte_errno;
1641                 }
1642
1643                 actions_set |= (1UL << actions->type);
1644         }
1645 #undef SFC_BUILD_SET_OVERFLOW
1646
1647         /* When fate is unknown, drop traffic. */
1648         if ((actions_set & fate_actions_mask) == 0) {
1649                 flow->spec.template.efs_dmaq_id =
1650                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1651         }
1652
1653         return 0;
1654
1655 fail_fate_actions:
1656         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1657                            "Cannot combine several fate-deciding actions, "
1658                            "choose between QUEUE, RSS or DROP");
1659         return -rte_errno;
1660
1661 fail_actions_overlap:
1662         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1663                            "Overlapping actions are not supported");
1664         return -rte_errno;
1665 }
1666
1667 /**
1668  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1669  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1670  * specifications after copying.
1671  *
1672  * @param spec[in, out]
1673  *   SFC flow specification to update.
1674  * @param filters_count_for_one_val[in]
1675  *   How many specifications should have the same match flag, what is the
1676  *   number of specifications before copying.
1677  * @param error[out]
1678  *   Perform verbose error reporting if not NULL.
1679  */
1680 static int
1681 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1682                                unsigned int filters_count_for_one_val,
1683                                struct rte_flow_error *error)
1684 {
1685         unsigned int i;
1686         static const efx_filter_match_flags_t vals[] = {
1687                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1688                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1689         };
1690
1691         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1692                 rte_flow_error_set(error, EINVAL,
1693                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1694                         "Number of specifications is incorrect while copying "
1695                         "by unknown destination flags");
1696                 return -rte_errno;
1697         }
1698
1699         for (i = 0; i < spec->count; i++) {
1700                 /* The check above ensures that divisor can't be zero here */
1701                 spec->filters[i].efs_match_flags |=
1702                         vals[i / filters_count_for_one_val];
1703         }
1704
1705         return 0;
1706 }
1707
1708 /**
1709  * Check that the following conditions are met:
1710  * - the list of supported filters has a filter
1711  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1712  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1713  *   be inserted.
1714  *
1715  * @param match[in]
1716  *   The match flags of filter.
1717  * @param spec[in]
1718  *   Specification to be supplemented.
1719  * @param filter[in]
1720  *   SFC filter with list of supported filters.
1721  */
1722 static boolean_t
1723 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1724                                  __rte_unused efx_filter_spec_t *spec,
1725                                  struct sfc_filter *filter)
1726 {
1727         unsigned int i;
1728         efx_filter_match_flags_t match_mcast_dst;
1729
1730         match_mcast_dst =
1731                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1732                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1733         for (i = 0; i < filter->supported_match_num; i++) {
1734                 if (match_mcast_dst == filter->supported_match[i])
1735                         return B_TRUE;
1736         }
1737
1738         return B_FALSE;
1739 }
1740
1741 /**
1742  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1743  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1744  * specifications after copying.
1745  *
1746  * @param spec[in, out]
1747  *   SFC flow specification to update.
1748  * @param filters_count_for_one_val[in]
1749  *   How many specifications should have the same EtherType value, what is the
1750  *   number of specifications before copying.
1751  * @param error[out]
1752  *   Perform verbose error reporting if not NULL.
1753  */
1754 static int
1755 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1756                         unsigned int filters_count_for_one_val,
1757                         struct rte_flow_error *error)
1758 {
1759         unsigned int i;
1760         static const uint16_t vals[] = {
1761                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1762         };
1763
1764         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1765                 rte_flow_error_set(error, EINVAL,
1766                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1767                         "Number of specifications is incorrect "
1768                         "while copying by Ethertype");
1769                 return -rte_errno;
1770         }
1771
1772         for (i = 0; i < spec->count; i++) {
1773                 spec->filters[i].efs_match_flags |=
1774                         EFX_FILTER_MATCH_ETHER_TYPE;
1775
1776                 /*
1777                  * The check above ensures that
1778                  * filters_count_for_one_val is not 0
1779                  */
1780                 spec->filters[i].efs_ether_type =
1781                         vals[i / filters_count_for_one_val];
1782         }
1783
1784         return 0;
1785 }
1786
1787 /**
1788  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1789  * in the same specifications after copying.
1790  *
1791  * @param spec[in, out]
1792  *   SFC flow specification to update.
1793  * @param filters_count_for_one_val[in]
1794  *   How many specifications should have the same match flag, what is the
1795  *   number of specifications before copying.
1796  * @param error[out]
1797  *   Perform verbose error reporting if not NULL.
1798  */
1799 static int
1800 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1801                             unsigned int filters_count_for_one_val,
1802                             struct rte_flow_error *error)
1803 {
1804         unsigned int i;
1805
1806         if (filters_count_for_one_val != spec->count) {
1807                 rte_flow_error_set(error, EINVAL,
1808                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1809                         "Number of specifications is incorrect "
1810                         "while copying by outer VLAN ID");
1811                 return -rte_errno;
1812         }
1813
1814         for (i = 0; i < spec->count; i++) {
1815                 spec->filters[i].efs_match_flags |=
1816                         EFX_FILTER_MATCH_OUTER_VID;
1817
1818                 spec->filters[i].efs_outer_vid = 0;
1819         }
1820
1821         return 0;
1822 }
1823
1824 /**
1825  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1826  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1827  * specifications after copying.
1828  *
1829  * @param spec[in, out]
1830  *   SFC flow specification to update.
1831  * @param filters_count_for_one_val[in]
1832  *   How many specifications should have the same match flag, what is the
1833  *   number of specifications before copying.
1834  * @param error[out]
1835  *   Perform verbose error reporting if not NULL.
1836  */
1837 static int
1838 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1839                                     unsigned int filters_count_for_one_val,
1840                                     struct rte_flow_error *error)
1841 {
1842         unsigned int i;
1843         static const efx_filter_match_flags_t vals[] = {
1844                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1845                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1846         };
1847
1848         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1849                 rte_flow_error_set(error, EINVAL,
1850                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1851                         "Number of specifications is incorrect while copying "
1852                         "by inner frame unknown destination flags");
1853                 return -rte_errno;
1854         }
1855
1856         for (i = 0; i < spec->count; i++) {
1857                 /* The check above ensures that divisor can't be zero here */
1858                 spec->filters[i].efs_match_flags |=
1859                         vals[i / filters_count_for_one_val];
1860         }
1861
1862         return 0;
1863 }
1864
1865 /**
1866  * Check that the following conditions are met:
1867  * - the specification corresponds to a filter for encapsulated traffic
1868  * - the list of supported filters has a filter
1869  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1870  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1871  *   be inserted.
1872  *
1873  * @param match[in]
1874  *   The match flags of filter.
1875  * @param spec[in]
1876  *   Specification to be supplemented.
1877  * @param filter[in]
1878  *   SFC filter with list of supported filters.
1879  */
1880 static boolean_t
1881 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1882                                       efx_filter_spec_t *spec,
1883                                       struct sfc_filter *filter)
1884 {
1885         unsigned int i;
1886         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1887         efx_filter_match_flags_t match_mcast_dst;
1888
1889         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1890                 return B_FALSE;
1891
1892         match_mcast_dst =
1893                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1894                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1895         for (i = 0; i < filter->supported_match_num; i++) {
1896                 if (match_mcast_dst == filter->supported_match[i])
1897                         return B_TRUE;
1898         }
1899
1900         return B_FALSE;
1901 }
1902
1903 /**
1904  * Check that the list of supported filters has a filter that differs
1905  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1906  * in this case that filter will be used and the flag
1907  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1908  *
1909  * @param match[in]
1910  *   The match flags of filter.
1911  * @param spec[in]
1912  *   Specification to be supplemented.
1913  * @param filter[in]
1914  *   SFC filter with list of supported filters.
1915  */
1916 static boolean_t
1917 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1918                               __rte_unused efx_filter_spec_t *spec,
1919                               struct sfc_filter *filter)
1920 {
1921         unsigned int i;
1922         efx_filter_match_flags_t match_without_vid =
1923                 match & ~EFX_FILTER_MATCH_OUTER_VID;
1924
1925         for (i = 0; i < filter->supported_match_num; i++) {
1926                 if (match_without_vid == filter->supported_match[i])
1927                         return B_FALSE;
1928         }
1929
1930         return B_TRUE;
1931 }
1932
1933 /*
1934  * Match flags that can be automatically added to filters.
1935  * Selecting the last minimum when searching for the copy flag ensures that the
1936  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1937  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1938  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1939  * filters.
1940  */
1941 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1942         {
1943                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1944                 .vals_count = 2,
1945                 .set_vals = sfc_flow_set_unknown_dst_flags,
1946                 .spec_check = sfc_flow_check_unknown_dst_flags,
1947         },
1948         {
1949                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1950                 .vals_count = 2,
1951                 .set_vals = sfc_flow_set_ethertypes,
1952                 .spec_check = NULL,
1953         },
1954         {
1955                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1956                 .vals_count = 2,
1957                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1958                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1959         },
1960         {
1961                 .flag = EFX_FILTER_MATCH_OUTER_VID,
1962                 .vals_count = 1,
1963                 .set_vals = sfc_flow_set_outer_vid_flag,
1964                 .spec_check = sfc_flow_check_outer_vid_flag,
1965         },
1966 };
1967
1968 /* Get item from array sfc_flow_copy_flags */
1969 static const struct sfc_flow_copy_flag *
1970 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1971 {
1972         unsigned int i;
1973
1974         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1975                 if (sfc_flow_copy_flags[i].flag == flag)
1976                         return &sfc_flow_copy_flags[i];
1977         }
1978
1979         return NULL;
1980 }
1981
1982 /**
1983  * Make copies of the specifications, set match flag and values
1984  * of the field that corresponds to it.
1985  *
1986  * @param spec[in, out]
1987  *   SFC flow specification to update.
1988  * @param flag[in]
1989  *   The match flag to add.
1990  * @param error[out]
1991  *   Perform verbose error reporting if not NULL.
1992  */
1993 static int
1994 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1995                              efx_filter_match_flags_t flag,
1996                              struct rte_flow_error *error)
1997 {
1998         unsigned int i;
1999         unsigned int new_filters_count;
2000         unsigned int filters_count_for_one_val;
2001         const struct sfc_flow_copy_flag *copy_flag;
2002         int rc;
2003
2004         copy_flag = sfc_flow_get_copy_flag(flag);
2005         if (copy_flag == NULL) {
2006                 rte_flow_error_set(error, ENOTSUP,
2007                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2008                                    "Unsupported spec field for copying");
2009                 return -rte_errno;
2010         }
2011
2012         new_filters_count = spec->count * copy_flag->vals_count;
2013         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2014                 rte_flow_error_set(error, EINVAL,
2015                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2016                         "Too much EFX specifications in the flow rule");
2017                 return -rte_errno;
2018         }
2019
2020         /* Copy filters specifications */
2021         for (i = spec->count; i < new_filters_count; i++)
2022                 spec->filters[i] = spec->filters[i - spec->count];
2023
2024         filters_count_for_one_val = spec->count;
2025         spec->count = new_filters_count;
2026
2027         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2028         if (rc != 0)
2029                 return rc;
2030
2031         return 0;
2032 }
2033
2034 /**
2035  * Check that the given set of match flags missing in the original filter spec
2036  * could be covered by adding spec copies which specify the corresponding
2037  * flags and packet field values to match.
2038  *
2039  * @param miss_flags[in]
2040  *   Flags that are missing until the supported filter.
2041  * @param spec[in]
2042  *   Specification to be supplemented.
2043  * @param filter[in]
2044  *   SFC filter.
2045  *
2046  * @return
2047  *   Number of specifications after copy or 0, if the flags can not be added.
2048  */
2049 static unsigned int
2050 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2051                              efx_filter_spec_t *spec,
2052                              struct sfc_filter *filter)
2053 {
2054         unsigned int i;
2055         efx_filter_match_flags_t copy_flags = 0;
2056         efx_filter_match_flags_t flag;
2057         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2058         sfc_flow_spec_check *check;
2059         unsigned int multiplier = 1;
2060
2061         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2062                 flag = sfc_flow_copy_flags[i].flag;
2063                 check = sfc_flow_copy_flags[i].spec_check;
2064                 if ((flag & miss_flags) == flag) {
2065                         if (check != NULL && (!check(match, spec, filter)))
2066                                 continue;
2067
2068                         copy_flags |= flag;
2069                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2070                 }
2071         }
2072
2073         if (copy_flags == miss_flags)
2074                 return multiplier;
2075
2076         return 0;
2077 }
2078
2079 /**
2080  * Attempt to supplement the specification template to the minimally
2081  * supported set of match flags. To do this, it is necessary to copy
2082  * the specifications, filling them with the values of fields that
2083  * correspond to the missing flags.
2084  * The necessary and sufficient filter is built from the fewest number
2085  * of copies which could be made to cover the minimally required set
2086  * of flags.
2087  *
2088  * @param sa[in]
2089  *   SFC adapter.
2090  * @param spec[in, out]
2091  *   SFC flow specification to update.
2092  * @param error[out]
2093  *   Perform verbose error reporting if not NULL.
2094  */
2095 static int
2096 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2097                                struct sfc_flow_spec *spec,
2098                                struct rte_flow_error *error)
2099 {
2100         struct sfc_filter *filter = &sa->filter;
2101         efx_filter_match_flags_t miss_flags;
2102         efx_filter_match_flags_t min_miss_flags = 0;
2103         efx_filter_match_flags_t match;
2104         unsigned int min_multiplier = UINT_MAX;
2105         unsigned int multiplier;
2106         unsigned int i;
2107         int rc;
2108
2109         match = spec->template.efs_match_flags;
2110         for (i = 0; i < filter->supported_match_num; i++) {
2111                 if ((match & filter->supported_match[i]) == match) {
2112                         miss_flags = filter->supported_match[i] & (~match);
2113                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2114                                 &spec->template, filter);
2115                         if (multiplier > 0) {
2116                                 if (multiplier <= min_multiplier) {
2117                                         min_multiplier = multiplier;
2118                                         min_miss_flags = miss_flags;
2119                                 }
2120                         }
2121                 }
2122         }
2123
2124         if (min_multiplier == UINT_MAX) {
2125                 rte_flow_error_set(error, ENOTSUP,
2126                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2127                                    "The flow rule pattern is unsupported");
2128                 return -rte_errno;
2129         }
2130
2131         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2132                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2133
2134                 if ((flag & min_miss_flags) == flag) {
2135                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2136                         if (rc != 0)
2137                                 return rc;
2138                 }
2139         }
2140
2141         return 0;
2142 }
2143
2144 /**
2145  * Check that set of match flags is referred to by a filter. Filter is
2146  * described by match flags with the ability to add OUTER_VID and INNER_VID
2147  * flags.
2148  *
2149  * @param match_flags[in]
2150  *   Set of match flags.
2151  * @param flags_pattern[in]
2152  *   Pattern of filter match flags.
2153  */
2154 static boolean_t
2155 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2156                             efx_filter_match_flags_t flags_pattern)
2157 {
2158         if ((match_flags & flags_pattern) != flags_pattern)
2159                 return B_FALSE;
2160
2161         switch (match_flags & ~flags_pattern) {
2162         case 0:
2163         case EFX_FILTER_MATCH_OUTER_VID:
2164         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2165                 return B_TRUE;
2166         default:
2167                 return B_FALSE;
2168         }
2169 }
2170
2171 /**
2172  * Check whether the spec maps to a hardware filter which is known to be
2173  * ineffective despite being valid.
2174  *
2175  * @param filter[in]
2176  *   SFC filter with list of supported filters.
2177  * @param spec[in]
2178  *   SFC flow specification.
2179  */
2180 static boolean_t
2181 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2182                                   struct sfc_flow_spec *spec)
2183 {
2184         unsigned int i;
2185         uint16_t ether_type;
2186         uint8_t ip_proto;
2187         efx_filter_match_flags_t match_flags;
2188
2189         for (i = 0; i < spec->count; i++) {
2190                 match_flags = spec->filters[i].efs_match_flags;
2191
2192                 if (sfc_flow_is_match_with_vids(match_flags,
2193                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2194                     sfc_flow_is_match_with_vids(match_flags,
2195                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2196                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2197                         ether_type = spec->filters[i].efs_ether_type;
2198                         if (filter->supports_ip_proto_or_addr_filter &&
2199                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2200                              ether_type == EFX_ETHER_TYPE_IPV6))
2201                                 return B_TRUE;
2202                 } else if (sfc_flow_is_match_with_vids(match_flags,
2203                                 EFX_FILTER_MATCH_ETHER_TYPE |
2204                                 EFX_FILTER_MATCH_IP_PROTO) ||
2205                            sfc_flow_is_match_with_vids(match_flags,
2206                                 EFX_FILTER_MATCH_ETHER_TYPE |
2207                                 EFX_FILTER_MATCH_IP_PROTO |
2208                                 EFX_FILTER_MATCH_LOC_MAC)) {
2209                         ip_proto = spec->filters[i].efs_ip_proto;
2210                         if (filter->supports_rem_or_local_port_filter &&
2211                             (ip_proto == EFX_IPPROTO_TCP ||
2212                              ip_proto == EFX_IPPROTO_UDP))
2213                                 return B_TRUE;
2214                 }
2215         }
2216
2217         return B_FALSE;
2218 }
2219
2220 static int
2221 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2222                               struct rte_flow *flow,
2223                               struct rte_flow_error *error)
2224 {
2225         efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2226         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2227         int rc;
2228
2229         /* Initialize the first filter spec with template */
2230         flow->spec.filters[0] = *spec_tmpl;
2231         flow->spec.count = 1;
2232
2233         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2234                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2235                 if (rc != 0)
2236                         return rc;
2237         }
2238
2239         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2240                 rte_flow_error_set(error, ENOTSUP,
2241                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2242                         "The flow rule pattern is unsupported");
2243                 return -rte_errno;
2244         }
2245
2246         return 0;
2247 }
2248
2249 static int
2250 sfc_flow_parse(struct rte_eth_dev *dev,
2251                const struct rte_flow_attr *attr,
2252                const struct rte_flow_item pattern[],
2253                const struct rte_flow_action actions[],
2254                struct rte_flow *flow,
2255                struct rte_flow_error *error)
2256 {
2257         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2258         int rc;
2259
2260         rc = sfc_flow_parse_attr(attr, flow, error);
2261         if (rc != 0)
2262                 goto fail_bad_value;
2263
2264         rc = sfc_flow_parse_pattern(pattern, flow, error);
2265         if (rc != 0)
2266                 goto fail_bad_value;
2267
2268         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2269         if (rc != 0)
2270                 goto fail_bad_value;
2271
2272         rc = sfc_flow_validate_match_flags(sa, flow, error);
2273         if (rc != 0)
2274                 goto fail_bad_value;
2275
2276         return 0;
2277
2278 fail_bad_value:
2279         return rc;
2280 }
2281
2282 static int
2283 sfc_flow_validate(struct rte_eth_dev *dev,
2284                   const struct rte_flow_attr *attr,
2285                   const struct rte_flow_item pattern[],
2286                   const struct rte_flow_action actions[],
2287                   struct rte_flow_error *error)
2288 {
2289         struct rte_flow flow;
2290
2291         memset(&flow, 0, sizeof(flow));
2292
2293         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2294 }
2295
2296 static struct rte_flow *
2297 sfc_flow_create(struct rte_eth_dev *dev,
2298                 const struct rte_flow_attr *attr,
2299                 const struct rte_flow_item pattern[],
2300                 const struct rte_flow_action actions[],
2301                 struct rte_flow_error *error)
2302 {
2303         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2304         struct rte_flow *flow = NULL;
2305         int rc;
2306
2307         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2308         if (flow == NULL) {
2309                 rte_flow_error_set(error, ENOMEM,
2310                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2311                                    "Failed to allocate memory");
2312                 goto fail_no_mem;
2313         }
2314
2315         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2316         if (rc != 0)
2317                 goto fail_bad_value;
2318
2319         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2320
2321         sfc_adapter_lock(sa);
2322
2323         if (sa->state == SFC_ADAPTER_STARTED) {
2324                 rc = sfc_flow_filter_insert(sa, flow);
2325                 if (rc != 0) {
2326                         rte_flow_error_set(error, rc,
2327                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2328                                 "Failed to insert filter");
2329                         goto fail_filter_insert;
2330                 }
2331         }
2332
2333         sfc_adapter_unlock(sa);
2334
2335         return flow;
2336
2337 fail_filter_insert:
2338         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2339
2340 fail_bad_value:
2341         rte_free(flow);
2342         sfc_adapter_unlock(sa);
2343
2344 fail_no_mem:
2345         return NULL;
2346 }
2347
2348 static int
2349 sfc_flow_remove(struct sfc_adapter *sa,
2350                 struct rte_flow *flow,
2351                 struct rte_flow_error *error)
2352 {
2353         int rc = 0;
2354
2355         SFC_ASSERT(sfc_adapter_is_locked(sa));
2356
2357         if (sa->state == SFC_ADAPTER_STARTED) {
2358                 rc = sfc_flow_filter_remove(sa, flow);
2359                 if (rc != 0)
2360                         rte_flow_error_set(error, rc,
2361                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2362                                 "Failed to destroy flow rule");
2363         }
2364
2365         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2366         rte_free(flow);
2367
2368         return rc;
2369 }
2370
2371 static int
2372 sfc_flow_destroy(struct rte_eth_dev *dev,
2373                  struct rte_flow *flow,
2374                  struct rte_flow_error *error)
2375 {
2376         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2377         struct rte_flow *flow_ptr;
2378         int rc = EINVAL;
2379
2380         sfc_adapter_lock(sa);
2381
2382         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2383                 if (flow_ptr == flow)
2384                         rc = 0;
2385         }
2386         if (rc != 0) {
2387                 rte_flow_error_set(error, rc,
2388                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2389                                    "Failed to find flow rule to destroy");
2390                 goto fail_bad_value;
2391         }
2392
2393         rc = sfc_flow_remove(sa, flow, error);
2394
2395 fail_bad_value:
2396         sfc_adapter_unlock(sa);
2397
2398         return -rc;
2399 }
2400
2401 static int
2402 sfc_flow_flush(struct rte_eth_dev *dev,
2403                struct rte_flow_error *error)
2404 {
2405         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2406         struct rte_flow *flow;
2407         int rc = 0;
2408         int ret = 0;
2409
2410         sfc_adapter_lock(sa);
2411
2412         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2413                 rc = sfc_flow_remove(sa, flow, error);
2414                 if (rc != 0)
2415                         ret = rc;
2416         }
2417
2418         sfc_adapter_unlock(sa);
2419
2420         return -ret;
2421 }
2422
2423 static int
2424 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2425                  struct rte_flow_error *error)
2426 {
2427         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2428         int ret = 0;
2429
2430         sfc_adapter_lock(sa);
2431         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2432                 rte_flow_error_set(error, EBUSY,
2433                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2434                                    NULL, "please close the port first");
2435                 ret = -rte_errno;
2436         } else {
2437                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2438         }
2439         sfc_adapter_unlock(sa);
2440
2441         return ret;
2442 }
2443
2444 const struct rte_flow_ops sfc_flow_ops = {
2445         .validate = sfc_flow_validate,
2446         .create = sfc_flow_create,
2447         .destroy = sfc_flow_destroy,
2448         .flush = sfc_flow_flush,
2449         .query = NULL,
2450         .isolate = sfc_flow_isolate,
2451 };
2452
2453 void
2454 sfc_flow_init(struct sfc_adapter *sa)
2455 {
2456         SFC_ASSERT(sfc_adapter_is_locked(sa));
2457
2458         TAILQ_INIT(&sa->filter.flow_list);
2459 }
2460
2461 void
2462 sfc_flow_fini(struct sfc_adapter *sa)
2463 {
2464         struct rte_flow *flow;
2465
2466         SFC_ASSERT(sfc_adapter_is_locked(sa));
2467
2468         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2469                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2470                 rte_free(flow);
2471         }
2472 }
2473
2474 void
2475 sfc_flow_stop(struct sfc_adapter *sa)
2476 {
2477         struct rte_flow *flow;
2478
2479         SFC_ASSERT(sfc_adapter_is_locked(sa));
2480
2481         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2482                 sfc_flow_filter_remove(sa, flow);
2483 }
2484
2485 int
2486 sfc_flow_start(struct sfc_adapter *sa)
2487 {
2488         struct rte_flow *flow;
2489         int rc = 0;
2490
2491         sfc_log_init(sa, "entry");
2492
2493         SFC_ASSERT(sfc_adapter_is_locked(sa));
2494
2495         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2496                 rc = sfc_flow_filter_insert(sa, flow);
2497                 if (rc != 0)
2498                         goto fail_bad_flow;
2499         }
2500
2501         sfc_log_init(sa, "done");
2502
2503 fail_bad_flow:
2504         return rc;
2505 }