cb802d7991ffec02ad1cbbd704aeba12ab4b7c7d
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 struct sfc_flow_ops_by_spec {
29         sfc_flow_parse_cb_t     *parse;
30         sfc_flow_insert_cb_t    *insert;
31         sfc_flow_remove_cb_t    *remove;
32 };
33
34 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
35 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
36 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
37
38 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
39         .parse = sfc_flow_parse_rte_to_filter,
40         .insert = sfc_flow_filter_insert,
41         .remove = sfc_flow_filter_remove,
42 };
43
44 static const struct sfc_flow_ops_by_spec *
45 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
46 {
47         struct sfc_flow_spec *spec = &flow->spec;
48         const struct sfc_flow_ops_by_spec *ops = NULL;
49
50         switch (spec->type) {
51         case SFC_FLOW_SPEC_FILTER:
52                 ops = &sfc_flow_ops_filter;
53                 break;
54         default:
55                 SFC_ASSERT(false);
56                 break;
57         }
58
59         return ops;
60 }
61
62 /*
63  * Currently, filter-based (VNIC) flow API is implemented in such a manner
64  * that each flow rule is converted to one or more hardware filters.
65  * All elements of flow rule (attributes, pattern items, actions)
66  * correspond to one or more fields in the efx_filter_spec_s structure
67  * that is responsible for the hardware filter.
68  * If some required field is unset in the flow rule, then a handful
69  * of filter copies will be created to cover all possible values
70  * of such a field.
71  */
72
73 static sfc_flow_item_parse sfc_flow_parse_void;
74 static sfc_flow_item_parse sfc_flow_parse_eth;
75 static sfc_flow_item_parse sfc_flow_parse_vlan;
76 static sfc_flow_item_parse sfc_flow_parse_ipv4;
77 static sfc_flow_item_parse sfc_flow_parse_ipv6;
78 static sfc_flow_item_parse sfc_flow_parse_tcp;
79 static sfc_flow_item_parse sfc_flow_parse_udp;
80 static sfc_flow_item_parse sfc_flow_parse_vxlan;
81 static sfc_flow_item_parse sfc_flow_parse_geneve;
82 static sfc_flow_item_parse sfc_flow_parse_nvgre;
83
84 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
85                                      unsigned int filters_count_for_one_val,
86                                      struct rte_flow_error *error);
87
88 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
89                                         efx_filter_spec_t *spec,
90                                         struct sfc_filter *filter);
91
92 struct sfc_flow_copy_flag {
93         /* EFX filter specification match flag */
94         efx_filter_match_flags_t flag;
95         /* Number of values of corresponding field */
96         unsigned int vals_count;
97         /* Function to set values in specifications */
98         sfc_flow_spec_set_vals *set_vals;
99         /*
100          * Function to check that the specification is suitable
101          * for adding this match flag
102          */
103         sfc_flow_spec_check *spec_check;
104 };
105
106 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
107 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
108 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
109 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
110 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
111 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
112 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
113
114 static boolean_t
115 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
116 {
117         uint8_t sum = 0;
118         unsigned int i;
119
120         for (i = 0; i < size; i++)
121                 sum |= buf[i];
122
123         return (sum == 0) ? B_TRUE : B_FALSE;
124 }
125
126 /*
127  * Validate item and prepare structures spec and mask for parsing
128  */
129 int
130 sfc_flow_parse_init(const struct rte_flow_item *item,
131                     const void **spec_ptr,
132                     const void **mask_ptr,
133                     const void *supp_mask,
134                     const void *def_mask,
135                     unsigned int size,
136                     struct rte_flow_error *error)
137 {
138         const uint8_t *spec;
139         const uint8_t *mask;
140         const uint8_t *last;
141         uint8_t supp;
142         unsigned int i;
143
144         if (item == NULL) {
145                 rte_flow_error_set(error, EINVAL,
146                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
147                                    "NULL item");
148                 return -rte_errno;
149         }
150
151         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
152                 rte_flow_error_set(error, EINVAL,
153                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
154                                    "Mask or last is set without spec");
155                 return -rte_errno;
156         }
157
158         /*
159          * If "mask" is not set, default mask is used,
160          * but if default mask is NULL, "mask" should be set
161          */
162         if (item->mask == NULL) {
163                 if (def_mask == NULL) {
164                         rte_flow_error_set(error, EINVAL,
165                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
166                                 "Mask should be specified");
167                         return -rte_errno;
168                 }
169
170                 mask = def_mask;
171         } else {
172                 mask = item->mask;
173         }
174
175         spec = item->spec;
176         last = item->last;
177
178         if (spec == NULL)
179                 goto exit;
180
181         /*
182          * If field values in "last" are either 0 or equal to the corresponding
183          * values in "spec" then they are ignored
184          */
185         if (last != NULL &&
186             !sfc_flow_is_zero(last, size) &&
187             memcmp(last, spec, size) != 0) {
188                 rte_flow_error_set(error, ENOTSUP,
189                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
190                                    "Ranging is not supported");
191                 return -rte_errno;
192         }
193
194         if (supp_mask == NULL) {
195                 rte_flow_error_set(error, EINVAL,
196                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
197                         "Supported mask for item should be specified");
198                 return -rte_errno;
199         }
200
201         /* Check that mask does not ask for more match than supp_mask */
202         for (i = 0; i < size; i++) {
203                 supp = ((const uint8_t *)supp_mask)[i];
204
205                 if (~supp & mask[i]) {
206                         rte_flow_error_set(error, ENOTSUP,
207                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
208                                            "Item's field is not supported");
209                         return -rte_errno;
210                 }
211         }
212
213 exit:
214         *spec_ptr = spec;
215         *mask_ptr = mask;
216         return 0;
217 }
218
219 /*
220  * Protocol parsers.
221  * Masking is not supported, so masks in items should be either
222  * full or empty (zeroed) and set only for supported fields which
223  * are specified in the supp_mask.
224  */
225
226 static int
227 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
228                     __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
229                     __rte_unused struct rte_flow_error *error)
230 {
231         return 0;
232 }
233
234 /**
235  * Convert Ethernet item to EFX filter specification.
236  *
237  * @param item[in]
238  *   Item specification. Outer frame specification may only comprise
239  *   source/destination addresses and Ethertype field.
240  *   Inner frame specification may contain destination address only.
241  *   There is support for individual/group mask as well as for empty and full.
242  *   If the mask is NULL, default mask will be used. Ranging is not supported.
243  * @param efx_spec[in, out]
244  *   EFX filter specification to update.
245  * @param[out] error
246  *   Perform verbose error reporting if not NULL.
247  */
248 static int
249 sfc_flow_parse_eth(const struct rte_flow_item *item,
250                    struct sfc_flow_parse_ctx *parse_ctx,
251                    struct rte_flow_error *error)
252 {
253         int rc;
254         efx_filter_spec_t *efx_spec = parse_ctx->filter;
255         const struct rte_flow_item_eth *spec = NULL;
256         const struct rte_flow_item_eth *mask = NULL;
257         const struct rte_flow_item_eth supp_mask = {
258                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
259                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
260                 .type = 0xffff,
261         };
262         const struct rte_flow_item_eth ifrm_supp_mask = {
263                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
264         };
265         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
266                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
267         };
268         const struct rte_flow_item_eth *supp_mask_p;
269         const struct rte_flow_item_eth *def_mask_p;
270         uint8_t *loc_mac = NULL;
271         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
272                 EFX_TUNNEL_PROTOCOL_NONE);
273
274         if (is_ifrm) {
275                 supp_mask_p = &ifrm_supp_mask;
276                 def_mask_p = &ifrm_supp_mask;
277                 loc_mac = efx_spec->efs_ifrm_loc_mac;
278         } else {
279                 supp_mask_p = &supp_mask;
280                 def_mask_p = &rte_flow_item_eth_mask;
281                 loc_mac = efx_spec->efs_loc_mac;
282         }
283
284         rc = sfc_flow_parse_init(item,
285                                  (const void **)&spec,
286                                  (const void **)&mask,
287                                  supp_mask_p, def_mask_p,
288                                  sizeof(struct rte_flow_item_eth),
289                                  error);
290         if (rc != 0)
291                 return rc;
292
293         /* If "spec" is not set, could be any Ethernet */
294         if (spec == NULL)
295                 return 0;
296
297         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
298                 efx_spec->efs_match_flags |= is_ifrm ?
299                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
300                         EFX_FILTER_MATCH_LOC_MAC;
301                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
302                            EFX_MAC_ADDR_LEN);
303         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
304                           EFX_MAC_ADDR_LEN) == 0) {
305                 if (rte_is_unicast_ether_addr(&spec->dst))
306                         efx_spec->efs_match_flags |= is_ifrm ?
307                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
308                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
309                 else
310                         efx_spec->efs_match_flags |= is_ifrm ?
311                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
312                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
313         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
314                 goto fail_bad_mask;
315         }
316
317         /*
318          * ifrm_supp_mask ensures that the source address and
319          * ethertype masks are equal to zero in inner frame,
320          * so these fields are filled in only for the outer frame
321          */
322         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
323                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
324                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
325                            EFX_MAC_ADDR_LEN);
326         } else if (!rte_is_zero_ether_addr(&mask->src)) {
327                 goto fail_bad_mask;
328         }
329
330         /*
331          * Ether type is in big-endian byte order in item and
332          * in little-endian in efx_spec, so byte swap is used
333          */
334         if (mask->type == supp_mask.type) {
335                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
336                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
337         } else if (mask->type != 0) {
338                 goto fail_bad_mask;
339         }
340
341         return 0;
342
343 fail_bad_mask:
344         rte_flow_error_set(error, EINVAL,
345                            RTE_FLOW_ERROR_TYPE_ITEM, item,
346                            "Bad mask in the ETH pattern item");
347         return -rte_errno;
348 }
349
350 /**
351  * Convert VLAN item to EFX filter specification.
352  *
353  * @param item[in]
354  *   Item specification. Only VID field is supported.
355  *   The mask can not be NULL. Ranging is not supported.
356  * @param efx_spec[in, out]
357  *   EFX filter specification to update.
358  * @param[out] error
359  *   Perform verbose error reporting if not NULL.
360  */
361 static int
362 sfc_flow_parse_vlan(const struct rte_flow_item *item,
363                     struct sfc_flow_parse_ctx *parse_ctx,
364                     struct rte_flow_error *error)
365 {
366         int rc;
367         uint16_t vid;
368         efx_filter_spec_t *efx_spec = parse_ctx->filter;
369         const struct rte_flow_item_vlan *spec = NULL;
370         const struct rte_flow_item_vlan *mask = NULL;
371         const struct rte_flow_item_vlan supp_mask = {
372                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
373                 .inner_type = RTE_BE16(0xffff),
374         };
375
376         rc = sfc_flow_parse_init(item,
377                                  (const void **)&spec,
378                                  (const void **)&mask,
379                                  &supp_mask,
380                                  NULL,
381                                  sizeof(struct rte_flow_item_vlan),
382                                  error);
383         if (rc != 0)
384                 return rc;
385
386         /*
387          * VID is in big-endian byte order in item and
388          * in little-endian in efx_spec, so byte swap is used.
389          * If two VLAN items are included, the first matches
390          * the outer tag and the next matches the inner tag.
391          */
392         if (mask->tci == supp_mask.tci) {
393                 /* Apply mask to keep VID only */
394                 vid = rte_bswap16(spec->tci & mask->tci);
395
396                 if (!(efx_spec->efs_match_flags &
397                       EFX_FILTER_MATCH_OUTER_VID)) {
398                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
399                         efx_spec->efs_outer_vid = vid;
400                 } else if (!(efx_spec->efs_match_flags &
401                              EFX_FILTER_MATCH_INNER_VID)) {
402                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
403                         efx_spec->efs_inner_vid = vid;
404                 } else {
405                         rte_flow_error_set(error, EINVAL,
406                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
407                                            "More than two VLAN items");
408                         return -rte_errno;
409                 }
410         } else {
411                 rte_flow_error_set(error, EINVAL,
412                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
413                                    "VLAN ID in TCI match is required");
414                 return -rte_errno;
415         }
416
417         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
418                 rte_flow_error_set(error, EINVAL,
419                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
420                                    "VLAN TPID matching is not supported");
421                 return -rte_errno;
422         }
423         if (mask->inner_type == supp_mask.inner_type) {
424                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
425                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
426         } else if (mask->inner_type) {
427                 rte_flow_error_set(error, EINVAL,
428                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
429                                    "Bad mask for VLAN inner_type");
430                 return -rte_errno;
431         }
432
433         return 0;
434 }
435
436 /**
437  * Convert IPv4 item to EFX filter specification.
438  *
439  * @param item[in]
440  *   Item specification. Only source and destination addresses and
441  *   protocol fields are supported. If the mask is NULL, default
442  *   mask will be used. Ranging is not supported.
443  * @param efx_spec[in, out]
444  *   EFX filter specification to update.
445  * @param[out] error
446  *   Perform verbose error reporting if not NULL.
447  */
448 static int
449 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
450                     struct sfc_flow_parse_ctx *parse_ctx,
451                     struct rte_flow_error *error)
452 {
453         int rc;
454         efx_filter_spec_t *efx_spec = parse_ctx->filter;
455         const struct rte_flow_item_ipv4 *spec = NULL;
456         const struct rte_flow_item_ipv4 *mask = NULL;
457         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
458         const struct rte_flow_item_ipv4 supp_mask = {
459                 .hdr = {
460                         .src_addr = 0xffffffff,
461                         .dst_addr = 0xffffffff,
462                         .next_proto_id = 0xff,
463                 }
464         };
465
466         rc = sfc_flow_parse_init(item,
467                                  (const void **)&spec,
468                                  (const void **)&mask,
469                                  &supp_mask,
470                                  &rte_flow_item_ipv4_mask,
471                                  sizeof(struct rte_flow_item_ipv4),
472                                  error);
473         if (rc != 0)
474                 return rc;
475
476         /*
477          * Filtering by IPv4 source and destination addresses requires
478          * the appropriate ETHER_TYPE in hardware filters
479          */
480         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
481                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
482                 efx_spec->efs_ether_type = ether_type_ipv4;
483         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_ITEM, item,
486                         "Ethertype in pattern with IPV4 item should be appropriate");
487                 return -rte_errno;
488         }
489
490         if (spec == NULL)
491                 return 0;
492
493         /*
494          * IPv4 addresses are in big-endian byte order in item and in
495          * efx_spec
496          */
497         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
498                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
499                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
500         } else if (mask->hdr.src_addr != 0) {
501                 goto fail_bad_mask;
502         }
503
504         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
505                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
506                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
507         } else if (mask->hdr.dst_addr != 0) {
508                 goto fail_bad_mask;
509         }
510
511         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
512                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
513                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
514         } else if (mask->hdr.next_proto_id != 0) {
515                 goto fail_bad_mask;
516         }
517
518         return 0;
519
520 fail_bad_mask:
521         rte_flow_error_set(error, EINVAL,
522                            RTE_FLOW_ERROR_TYPE_ITEM, item,
523                            "Bad mask in the IPV4 pattern item");
524         return -rte_errno;
525 }
526
527 /**
528  * Convert IPv6 item to EFX filter specification.
529  *
530  * @param item[in]
531  *   Item specification. Only source and destination addresses and
532  *   next header fields are supported. If the mask is NULL, default
533  *   mask will be used. Ranging is not supported.
534  * @param efx_spec[in, out]
535  *   EFX filter specification to update.
536  * @param[out] error
537  *   Perform verbose error reporting if not NULL.
538  */
539 static int
540 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
541                     struct sfc_flow_parse_ctx *parse_ctx,
542                     struct rte_flow_error *error)
543 {
544         int rc;
545         efx_filter_spec_t *efx_spec = parse_ctx->filter;
546         const struct rte_flow_item_ipv6 *spec = NULL;
547         const struct rte_flow_item_ipv6 *mask = NULL;
548         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
549         const struct rte_flow_item_ipv6 supp_mask = {
550                 .hdr = {
551                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
552                                       0xff, 0xff, 0xff, 0xff,
553                                       0xff, 0xff, 0xff, 0xff,
554                                       0xff, 0xff, 0xff, 0xff },
555                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
556                                       0xff, 0xff, 0xff, 0xff,
557                                       0xff, 0xff, 0xff, 0xff,
558                                       0xff, 0xff, 0xff, 0xff },
559                         .proto = 0xff,
560                 }
561         };
562
563         rc = sfc_flow_parse_init(item,
564                                  (const void **)&spec,
565                                  (const void **)&mask,
566                                  &supp_mask,
567                                  &rte_flow_item_ipv6_mask,
568                                  sizeof(struct rte_flow_item_ipv6),
569                                  error);
570         if (rc != 0)
571                 return rc;
572
573         /*
574          * Filtering by IPv6 source and destination addresses requires
575          * the appropriate ETHER_TYPE in hardware filters
576          */
577         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
578                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
579                 efx_spec->efs_ether_type = ether_type_ipv6;
580         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
581                 rte_flow_error_set(error, EINVAL,
582                         RTE_FLOW_ERROR_TYPE_ITEM, item,
583                         "Ethertype in pattern with IPV6 item should be appropriate");
584                 return -rte_errno;
585         }
586
587         if (spec == NULL)
588                 return 0;
589
590         /*
591          * IPv6 addresses are in big-endian byte order in item and in
592          * efx_spec
593          */
594         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
595                    sizeof(mask->hdr.src_addr)) == 0) {
596                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
597
598                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
599                                  sizeof(spec->hdr.src_addr));
600                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
601                            sizeof(efx_spec->efs_rem_host));
602         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
603                                      sizeof(mask->hdr.src_addr))) {
604                 goto fail_bad_mask;
605         }
606
607         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
608                    sizeof(mask->hdr.dst_addr)) == 0) {
609                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
610
611                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
612                                  sizeof(spec->hdr.dst_addr));
613                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
614                            sizeof(efx_spec->efs_loc_host));
615         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
616                                      sizeof(mask->hdr.dst_addr))) {
617                 goto fail_bad_mask;
618         }
619
620         if (mask->hdr.proto == supp_mask.hdr.proto) {
621                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
622                 efx_spec->efs_ip_proto = spec->hdr.proto;
623         } else if (mask->hdr.proto != 0) {
624                 goto fail_bad_mask;
625         }
626
627         return 0;
628
629 fail_bad_mask:
630         rte_flow_error_set(error, EINVAL,
631                            RTE_FLOW_ERROR_TYPE_ITEM, item,
632                            "Bad mask in the IPV6 pattern item");
633         return -rte_errno;
634 }
635
636 /**
637  * Convert TCP item to EFX filter specification.
638  *
639  * @param item[in]
640  *   Item specification. Only source and destination ports fields
641  *   are supported. If the mask is NULL, default mask will be used.
642  *   Ranging is not supported.
643  * @param efx_spec[in, out]
644  *   EFX filter specification to update.
645  * @param[out] error
646  *   Perform verbose error reporting if not NULL.
647  */
648 static int
649 sfc_flow_parse_tcp(const struct rte_flow_item *item,
650                    struct sfc_flow_parse_ctx *parse_ctx,
651                    struct rte_flow_error *error)
652 {
653         int rc;
654         efx_filter_spec_t *efx_spec = parse_ctx->filter;
655         const struct rte_flow_item_tcp *spec = NULL;
656         const struct rte_flow_item_tcp *mask = NULL;
657         const struct rte_flow_item_tcp supp_mask = {
658                 .hdr = {
659                         .src_port = 0xffff,
660                         .dst_port = 0xffff,
661                 }
662         };
663
664         rc = sfc_flow_parse_init(item,
665                                  (const void **)&spec,
666                                  (const void **)&mask,
667                                  &supp_mask,
668                                  &rte_flow_item_tcp_mask,
669                                  sizeof(struct rte_flow_item_tcp),
670                                  error);
671         if (rc != 0)
672                 return rc;
673
674         /*
675          * Filtering by TCP source and destination ports requires
676          * the appropriate IP_PROTO in hardware filters
677          */
678         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
679                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
680                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
681         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
682                 rte_flow_error_set(error, EINVAL,
683                         RTE_FLOW_ERROR_TYPE_ITEM, item,
684                         "IP proto in pattern with TCP item should be appropriate");
685                 return -rte_errno;
686         }
687
688         if (spec == NULL)
689                 return 0;
690
691         /*
692          * Source and destination ports are in big-endian byte order in item and
693          * in little-endian in efx_spec, so byte swap is used
694          */
695         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
696                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
697                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
698         } else if (mask->hdr.src_port != 0) {
699                 goto fail_bad_mask;
700         }
701
702         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
703                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
704                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
705         } else if (mask->hdr.dst_port != 0) {
706                 goto fail_bad_mask;
707         }
708
709         return 0;
710
711 fail_bad_mask:
712         rte_flow_error_set(error, EINVAL,
713                            RTE_FLOW_ERROR_TYPE_ITEM, item,
714                            "Bad mask in the TCP pattern item");
715         return -rte_errno;
716 }
717
718 /**
719  * Convert UDP item to EFX filter specification.
720  *
721  * @param item[in]
722  *   Item specification. Only source and destination ports fields
723  *   are supported. If the mask is NULL, default mask will be used.
724  *   Ranging is not supported.
725  * @param efx_spec[in, out]
726  *   EFX filter specification to update.
727  * @param[out] error
728  *   Perform verbose error reporting if not NULL.
729  */
730 static int
731 sfc_flow_parse_udp(const struct rte_flow_item *item,
732                    struct sfc_flow_parse_ctx *parse_ctx,
733                    struct rte_flow_error *error)
734 {
735         int rc;
736         efx_filter_spec_t *efx_spec = parse_ctx->filter;
737         const struct rte_flow_item_udp *spec = NULL;
738         const struct rte_flow_item_udp *mask = NULL;
739         const struct rte_flow_item_udp supp_mask = {
740                 .hdr = {
741                         .src_port = 0xffff,
742                         .dst_port = 0xffff,
743                 }
744         };
745
746         rc = sfc_flow_parse_init(item,
747                                  (const void **)&spec,
748                                  (const void **)&mask,
749                                  &supp_mask,
750                                  &rte_flow_item_udp_mask,
751                                  sizeof(struct rte_flow_item_udp),
752                                  error);
753         if (rc != 0)
754                 return rc;
755
756         /*
757          * Filtering by UDP source and destination ports requires
758          * the appropriate IP_PROTO in hardware filters
759          */
760         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
761                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
762                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
763         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
764                 rte_flow_error_set(error, EINVAL,
765                         RTE_FLOW_ERROR_TYPE_ITEM, item,
766                         "IP proto in pattern with UDP item should be appropriate");
767                 return -rte_errno;
768         }
769
770         if (spec == NULL)
771                 return 0;
772
773         /*
774          * Source and destination ports are in big-endian byte order in item and
775          * in little-endian in efx_spec, so byte swap is used
776          */
777         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
778                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
779                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
780         } else if (mask->hdr.src_port != 0) {
781                 goto fail_bad_mask;
782         }
783
784         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
785                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
786                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
787         } else if (mask->hdr.dst_port != 0) {
788                 goto fail_bad_mask;
789         }
790
791         return 0;
792
793 fail_bad_mask:
794         rte_flow_error_set(error, EINVAL,
795                            RTE_FLOW_ERROR_TYPE_ITEM, item,
796                            "Bad mask in the UDP pattern item");
797         return -rte_errno;
798 }
799
800 /*
801  * Filters for encapsulated packets match based on the EtherType and IP
802  * protocol in the outer frame.
803  */
804 static int
805 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
806                                         efx_filter_spec_t *efx_spec,
807                                         uint8_t ip_proto,
808                                         struct rte_flow_error *error)
809 {
810         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
811                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
812                 efx_spec->efs_ip_proto = ip_proto;
813         } else if (efx_spec->efs_ip_proto != ip_proto) {
814                 switch (ip_proto) {
815                 case EFX_IPPROTO_UDP:
816                         rte_flow_error_set(error, EINVAL,
817                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
818                                 "Outer IP header protocol must be UDP "
819                                 "in VxLAN/GENEVE pattern");
820                         return -rte_errno;
821
822                 case EFX_IPPROTO_GRE:
823                         rte_flow_error_set(error, EINVAL,
824                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
825                                 "Outer IP header protocol must be GRE "
826                                 "in NVGRE pattern");
827                         return -rte_errno;
828
829                 default:
830                         rte_flow_error_set(error, EINVAL,
831                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
832                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
833                                 "are supported");
834                         return -rte_errno;
835                 }
836         }
837
838         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
839             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
840             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
841                 rte_flow_error_set(error, EINVAL,
842                         RTE_FLOW_ERROR_TYPE_ITEM, item,
843                         "Outer frame EtherType in pattern with tunneling "
844                         "must be IPv4 or IPv6");
845                 return -rte_errno;
846         }
847
848         return 0;
849 }
850
851 static int
852 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
853                                   const uint8_t *vni_or_vsid_val,
854                                   const uint8_t *vni_or_vsid_mask,
855                                   const struct rte_flow_item *item,
856                                   struct rte_flow_error *error)
857 {
858         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
859                 0xff, 0xff, 0xff
860         };
861
862         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
863                    EFX_VNI_OR_VSID_LEN) == 0) {
864                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
865                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
866                            EFX_VNI_OR_VSID_LEN);
867         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
868                 rte_flow_error_set(error, EINVAL,
869                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
870                                    "Unsupported VNI/VSID mask");
871                 return -rte_errno;
872         }
873
874         return 0;
875 }
876
877 /**
878  * Convert VXLAN item to EFX filter specification.
879  *
880  * @param item[in]
881  *   Item specification. Only VXLAN network identifier field is supported.
882  *   If the mask is NULL, default mask will be used.
883  *   Ranging is not supported.
884  * @param efx_spec[in, out]
885  *   EFX filter specification to update.
886  * @param[out] error
887  *   Perform verbose error reporting if not NULL.
888  */
889 static int
890 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
891                      struct sfc_flow_parse_ctx *parse_ctx,
892                      struct rte_flow_error *error)
893 {
894         int rc;
895         efx_filter_spec_t *efx_spec = parse_ctx->filter;
896         const struct rte_flow_item_vxlan *spec = NULL;
897         const struct rte_flow_item_vxlan *mask = NULL;
898         const struct rte_flow_item_vxlan supp_mask = {
899                 .vni = { 0xff, 0xff, 0xff }
900         };
901
902         rc = sfc_flow_parse_init(item,
903                                  (const void **)&spec,
904                                  (const void **)&mask,
905                                  &supp_mask,
906                                  &rte_flow_item_vxlan_mask,
907                                  sizeof(struct rte_flow_item_vxlan),
908                                  error);
909         if (rc != 0)
910                 return rc;
911
912         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
913                                                      EFX_IPPROTO_UDP, error);
914         if (rc != 0)
915                 return rc;
916
917         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
918         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
919
920         if (spec == NULL)
921                 return 0;
922
923         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
924                                                mask->vni, item, error);
925
926         return rc;
927 }
928
929 /**
930  * Convert GENEVE item to EFX filter specification.
931  *
932  * @param item[in]
933  *   Item specification. Only Virtual Network Identifier and protocol type
934  *   fields are supported. But protocol type can be only Ethernet (0x6558).
935  *   If the mask is NULL, default mask will be used.
936  *   Ranging is not supported.
937  * @param efx_spec[in, out]
938  *   EFX filter specification to update.
939  * @param[out] error
940  *   Perform verbose error reporting if not NULL.
941  */
942 static int
943 sfc_flow_parse_geneve(const struct rte_flow_item *item,
944                       struct sfc_flow_parse_ctx *parse_ctx,
945                       struct rte_flow_error *error)
946 {
947         int rc;
948         efx_filter_spec_t *efx_spec = parse_ctx->filter;
949         const struct rte_flow_item_geneve *spec = NULL;
950         const struct rte_flow_item_geneve *mask = NULL;
951         const struct rte_flow_item_geneve supp_mask = {
952                 .protocol = RTE_BE16(0xffff),
953                 .vni = { 0xff, 0xff, 0xff }
954         };
955
956         rc = sfc_flow_parse_init(item,
957                                  (const void **)&spec,
958                                  (const void **)&mask,
959                                  &supp_mask,
960                                  &rte_flow_item_geneve_mask,
961                                  sizeof(struct rte_flow_item_geneve),
962                                  error);
963         if (rc != 0)
964                 return rc;
965
966         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
967                                                      EFX_IPPROTO_UDP, error);
968         if (rc != 0)
969                 return rc;
970
971         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
972         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
973
974         if (spec == NULL)
975                 return 0;
976
977         if (mask->protocol == supp_mask.protocol) {
978                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
979                         rte_flow_error_set(error, EINVAL,
980                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
981                                 "GENEVE encap. protocol must be Ethernet "
982                                 "(0x6558) in the GENEVE pattern item");
983                         return -rte_errno;
984                 }
985         } else if (mask->protocol != 0) {
986                 rte_flow_error_set(error, EINVAL,
987                         RTE_FLOW_ERROR_TYPE_ITEM, item,
988                         "Unsupported mask for GENEVE encap. protocol");
989                 return -rte_errno;
990         }
991
992         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
993                                                mask->vni, item, error);
994
995         return rc;
996 }
997
998 /**
999  * Convert NVGRE item to EFX filter specification.
1000  *
1001  * @param item[in]
1002  *   Item specification. Only virtual subnet ID field is supported.
1003  *   If the mask is NULL, default mask will be used.
1004  *   Ranging is not supported.
1005  * @param efx_spec[in, out]
1006  *   EFX filter specification to update.
1007  * @param[out] error
1008  *   Perform verbose error reporting if not NULL.
1009  */
1010 static int
1011 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1012                      struct sfc_flow_parse_ctx *parse_ctx,
1013                      struct rte_flow_error *error)
1014 {
1015         int rc;
1016         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1017         const struct rte_flow_item_nvgre *spec = NULL;
1018         const struct rte_flow_item_nvgre *mask = NULL;
1019         const struct rte_flow_item_nvgre supp_mask = {
1020                 .tni = { 0xff, 0xff, 0xff }
1021         };
1022
1023         rc = sfc_flow_parse_init(item,
1024                                  (const void **)&spec,
1025                                  (const void **)&mask,
1026                                  &supp_mask,
1027                                  &rte_flow_item_nvgre_mask,
1028                                  sizeof(struct rte_flow_item_nvgre),
1029                                  error);
1030         if (rc != 0)
1031                 return rc;
1032
1033         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1034                                                      EFX_IPPROTO_GRE, error);
1035         if (rc != 0)
1036                 return rc;
1037
1038         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1039         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1040
1041         if (spec == NULL)
1042                 return 0;
1043
1044         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1045                                                mask->tni, item, error);
1046
1047         return rc;
1048 }
1049
1050 static const struct sfc_flow_item sfc_flow_items[] = {
1051         {
1052                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1053                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1054                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1055                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1056                 .parse = sfc_flow_parse_void,
1057         },
1058         {
1059                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1060                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1061                 .layer = SFC_FLOW_ITEM_L2,
1062                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1063                 .parse = sfc_flow_parse_eth,
1064         },
1065         {
1066                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1067                 .prev_layer = SFC_FLOW_ITEM_L2,
1068                 .layer = SFC_FLOW_ITEM_L2,
1069                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1070                 .parse = sfc_flow_parse_vlan,
1071         },
1072         {
1073                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1074                 .prev_layer = SFC_FLOW_ITEM_L2,
1075                 .layer = SFC_FLOW_ITEM_L3,
1076                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1077                 .parse = sfc_flow_parse_ipv4,
1078         },
1079         {
1080                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1081                 .prev_layer = SFC_FLOW_ITEM_L2,
1082                 .layer = SFC_FLOW_ITEM_L3,
1083                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1084                 .parse = sfc_flow_parse_ipv6,
1085         },
1086         {
1087                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1088                 .prev_layer = SFC_FLOW_ITEM_L3,
1089                 .layer = SFC_FLOW_ITEM_L4,
1090                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1091                 .parse = sfc_flow_parse_tcp,
1092         },
1093         {
1094                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1095                 .prev_layer = SFC_FLOW_ITEM_L3,
1096                 .layer = SFC_FLOW_ITEM_L4,
1097                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1098                 .parse = sfc_flow_parse_udp,
1099         },
1100         {
1101                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1102                 .prev_layer = SFC_FLOW_ITEM_L4,
1103                 .layer = SFC_FLOW_ITEM_START_LAYER,
1104                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1105                 .parse = sfc_flow_parse_vxlan,
1106         },
1107         {
1108                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1109                 .prev_layer = SFC_FLOW_ITEM_L4,
1110                 .layer = SFC_FLOW_ITEM_START_LAYER,
1111                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1112                 .parse = sfc_flow_parse_geneve,
1113         },
1114         {
1115                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1116                 .prev_layer = SFC_FLOW_ITEM_L3,
1117                 .layer = SFC_FLOW_ITEM_START_LAYER,
1118                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1119                 .parse = sfc_flow_parse_nvgre,
1120         },
1121 };
1122
1123 /*
1124  * Protocol-independent flow API support
1125  */
1126 static int
1127 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1128                     struct rte_flow *flow,
1129                     struct rte_flow_error *error)
1130 {
1131         struct sfc_flow_spec *spec = &flow->spec;
1132         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1133
1134         if (attr == NULL) {
1135                 rte_flow_error_set(error, EINVAL,
1136                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1137                                    "NULL attribute");
1138                 return -rte_errno;
1139         }
1140         if (attr->group != 0) {
1141                 rte_flow_error_set(error, ENOTSUP,
1142                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1143                                    "Groups are not supported");
1144                 return -rte_errno;
1145         }
1146         if (attr->egress != 0) {
1147                 rte_flow_error_set(error, ENOTSUP,
1148                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1149                                    "Egress is not supported");
1150                 return -rte_errno;
1151         }
1152         if (attr->ingress == 0) {
1153                 rte_flow_error_set(error, ENOTSUP,
1154                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1155                                    "Ingress is compulsory");
1156                 return -rte_errno;
1157         }
1158         if (attr->transfer == 0) {
1159                 if (attr->priority != 0) {
1160                         rte_flow_error_set(error, ENOTSUP,
1161                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1162                                            attr, "Priorities are unsupported");
1163                         return -rte_errno;
1164                 }
1165                 spec->type = SFC_FLOW_SPEC_FILTER;
1166                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1167                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1168                 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1169         } else {
1170                 rte_flow_error_set(error, ENOTSUP,
1171                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1172                                    "Transfer is not supported");
1173                 return -rte_errno;
1174         }
1175
1176         return 0;
1177 }
1178
1179 /* Get item from array sfc_flow_items */
1180 static const struct sfc_flow_item *
1181 sfc_flow_get_item(const struct sfc_flow_item *items,
1182                   unsigned int nb_items,
1183                   enum rte_flow_item_type type)
1184 {
1185         unsigned int i;
1186
1187         for (i = 0; i < nb_items; i++)
1188                 if (items[i].type == type)
1189                         return &items[i];
1190
1191         return NULL;
1192 }
1193
1194 int
1195 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1196                        unsigned int nb_flow_items,
1197                        const struct rte_flow_item pattern[],
1198                        struct sfc_flow_parse_ctx *parse_ctx,
1199                        struct rte_flow_error *error)
1200 {
1201         int rc;
1202         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1203         boolean_t is_ifrm = B_FALSE;
1204         const struct sfc_flow_item *item;
1205
1206         if (pattern == NULL) {
1207                 rte_flow_error_set(error, EINVAL,
1208                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1209                                    "NULL pattern");
1210                 return -rte_errno;
1211         }
1212
1213         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1214                 item = sfc_flow_get_item(flow_items, nb_flow_items,
1215                                          pattern->type);
1216                 if (item == NULL) {
1217                         rte_flow_error_set(error, ENOTSUP,
1218                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1219                                            "Unsupported pattern item");
1220                         return -rte_errno;
1221                 }
1222
1223                 /*
1224                  * Omitting one or several protocol layers at the beginning
1225                  * of pattern is supported
1226                  */
1227                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1228                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1229                     item->prev_layer != prev_layer) {
1230                         rte_flow_error_set(error, ENOTSUP,
1231                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1232                                            "Unexpected sequence of pattern items");
1233                         return -rte_errno;
1234                 }
1235
1236                 /*
1237                  * Allow only VOID and ETH pattern items in the inner frame.
1238                  * Also check that there is only one tunneling protocol.
1239                  */
1240                 switch (item->type) {
1241                 case RTE_FLOW_ITEM_TYPE_VOID:
1242                 case RTE_FLOW_ITEM_TYPE_ETH:
1243                         break;
1244
1245                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1246                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1247                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1248                         if (is_ifrm) {
1249                                 rte_flow_error_set(error, EINVAL,
1250                                         RTE_FLOW_ERROR_TYPE_ITEM,
1251                                         pattern,
1252                                         "More than one tunneling protocol");
1253                                 return -rte_errno;
1254                         }
1255                         is_ifrm = B_TRUE;
1256                         break;
1257
1258                 default:
1259                         if (is_ifrm) {
1260                                 rte_flow_error_set(error, EINVAL,
1261                                         RTE_FLOW_ERROR_TYPE_ITEM,
1262                                         pattern,
1263                                         "There is an unsupported pattern item "
1264                                         "in the inner frame");
1265                                 return -rte_errno;
1266                         }
1267                         break;
1268                 }
1269
1270                 if (parse_ctx->type != item->ctx_type) {
1271                         rte_flow_error_set(error, EINVAL,
1272                                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1273                                         "Parse context type mismatch");
1274                         return -rte_errno;
1275                 }
1276
1277                 rc = item->parse(pattern, parse_ctx, error);
1278                 if (rc != 0)
1279                         return rc;
1280
1281                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1282                         prev_layer = item->layer;
1283         }
1284
1285         return 0;
1286 }
1287
1288 static int
1289 sfc_flow_parse_queue(struct sfc_adapter *sa,
1290                      const struct rte_flow_action_queue *queue,
1291                      struct rte_flow *flow)
1292 {
1293         struct sfc_flow_spec *spec = &flow->spec;
1294         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1295         struct sfc_rxq *rxq;
1296         struct sfc_rxq_info *rxq_info;
1297
1298         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1299                 return -EINVAL;
1300
1301         rxq = &sa->rxq_ctrl[queue->index];
1302         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1303
1304         rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1305         spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1306                                             SFC_RXQ_FLAG_RSS_HASH);
1307
1308         return 0;
1309 }
1310
1311 static int
1312 sfc_flow_parse_rss(struct sfc_adapter *sa,
1313                    const struct rte_flow_action_rss *action_rss,
1314                    struct rte_flow *flow)
1315 {
1316         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1317         struct sfc_rss *rss = &sas->rss;
1318         unsigned int rxq_sw_index;
1319         struct sfc_rxq *rxq;
1320         unsigned int rxq_hw_index_min;
1321         unsigned int rxq_hw_index_max;
1322         efx_rx_hash_type_t efx_hash_types;
1323         const uint8_t *rss_key;
1324         struct sfc_flow_spec *spec = &flow->spec;
1325         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1326         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1327         unsigned int i;
1328
1329         if (action_rss->queue_num == 0)
1330                 return -EINVAL;
1331
1332         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1333         rxq = &sa->rxq_ctrl[rxq_sw_index];
1334         rxq_hw_index_min = rxq->hw_index;
1335         rxq_hw_index_max = 0;
1336
1337         for (i = 0; i < action_rss->queue_num; ++i) {
1338                 rxq_sw_index = action_rss->queue[i];
1339
1340                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1341                         return -EINVAL;
1342
1343                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1344
1345                 if (rxq->hw_index < rxq_hw_index_min)
1346                         rxq_hw_index_min = rxq->hw_index;
1347
1348                 if (rxq->hw_index > rxq_hw_index_max)
1349                         rxq_hw_index_max = rxq->hw_index;
1350         }
1351
1352         switch (action_rss->func) {
1353         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1354         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1355                 break;
1356         default:
1357                 return -EINVAL;
1358         }
1359
1360         if (action_rss->level)
1361                 return -EINVAL;
1362
1363         /*
1364          * Dummy RSS action with only one queue and no specific settings
1365          * for hash types and key does not require dedicated RSS context
1366          * and may be simplified to single queue action.
1367          */
1368         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1369             action_rss->key_len == 0) {
1370                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1371                 return 0;
1372         }
1373
1374         if (action_rss->types) {
1375                 int rc;
1376
1377                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1378                                           &efx_hash_types);
1379                 if (rc != 0)
1380                         return -rc;
1381         } else {
1382                 unsigned int i;
1383
1384                 efx_hash_types = 0;
1385                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1386                         efx_hash_types |= rss->hf_map[i].efx;
1387         }
1388
1389         if (action_rss->key_len) {
1390                 if (action_rss->key_len != sizeof(rss->key))
1391                         return -EINVAL;
1392
1393                 rss_key = action_rss->key;
1394         } else {
1395                 rss_key = rss->key;
1396         }
1397
1398         spec_filter->rss = B_TRUE;
1399
1400         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1401         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1402         sfc_rss_conf->rss_hash_types = efx_hash_types;
1403         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1404
1405         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1406                 unsigned int nb_queues = action_rss->queue_num;
1407                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1408                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1409
1410                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1411         }
1412
1413         return 0;
1414 }
1415
1416 static int
1417 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1418                     unsigned int filters_count)
1419 {
1420         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1421         unsigned int i;
1422         int ret = 0;
1423
1424         for (i = 0; i < filters_count; i++) {
1425                 int rc;
1426
1427                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1428                 if (ret == 0 && rc != 0) {
1429                         sfc_err(sa, "failed to remove filter specification "
1430                                 "(rc = %d)", rc);
1431                         ret = rc;
1432                 }
1433         }
1434
1435         return ret;
1436 }
1437
1438 static int
1439 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1440 {
1441         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1442         unsigned int i;
1443         int rc = 0;
1444
1445         for (i = 0; i < spec_filter->count; i++) {
1446                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1447                 if (rc != 0) {
1448                         sfc_flow_spec_flush(sa, spec, i);
1449                         break;
1450                 }
1451         }
1452
1453         return rc;
1454 }
1455
1456 static int
1457 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1458 {
1459         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1460
1461         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1462 }
1463
1464 static int
1465 sfc_flow_filter_insert(struct sfc_adapter *sa,
1466                        struct rte_flow *flow)
1467 {
1468         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1469         struct sfc_rss *rss = &sas->rss;
1470         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1471         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1472         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1473         boolean_t create_context;
1474         unsigned int i;
1475         int rc = 0;
1476
1477         create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1478                         rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1479
1480         if (create_context) {
1481                 unsigned int rss_spread;
1482                 unsigned int rss_hash_types;
1483                 uint8_t *rss_key;
1484
1485                 if (spec_filter->rss) {
1486                         rss_spread = MIN(flow_rss->rxq_hw_index_max -
1487                                         flow_rss->rxq_hw_index_min + 1,
1488                                         EFX_MAXRSS);
1489                         rss_hash_types = flow_rss->rss_hash_types;
1490                         rss_key = flow_rss->rss_key;
1491                 } else {
1492                         /*
1493                          * Initialize dummy RSS context parameters to have
1494                          * valid RSS hash. Use default RSS hash function and
1495                          * key.
1496                          */
1497                         rss_spread = 1;
1498                         rss_hash_types = rss->hash_types;
1499                         rss_key = rss->key;
1500                 }
1501
1502                 rc = efx_rx_scale_context_alloc(sa->nic,
1503                                                 EFX_RX_SCALE_EXCLUSIVE,
1504                                                 rss_spread,
1505                                                 &efs_rss_context);
1506                 if (rc != 0)
1507                         goto fail_scale_context_alloc;
1508
1509                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1510                                            rss->hash_alg,
1511                                            rss_hash_types, B_TRUE);
1512                 if (rc != 0)
1513                         goto fail_scale_mode_set;
1514
1515                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1516                                           rss_key, sizeof(rss->key));
1517                 if (rc != 0)
1518                         goto fail_scale_key_set;
1519         } else {
1520                 efs_rss_context = rss->dummy_rss_context;
1521         }
1522
1523         if (spec_filter->rss || spec_filter->rss_hash_required) {
1524                 /*
1525                  * At this point, fully elaborated filter specifications
1526                  * have been produced from the template. To make sure that
1527                  * RSS behaviour is consistent between them, set the same
1528                  * RSS context value everywhere.
1529                  */
1530                 for (i = 0; i < spec_filter->count; i++) {
1531                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1532
1533                         spec->efs_rss_context = efs_rss_context;
1534                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1535                         if (spec_filter->rss)
1536                                 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1537                 }
1538         }
1539
1540         rc = sfc_flow_spec_insert(sa, &flow->spec);
1541         if (rc != 0)
1542                 goto fail_filter_insert;
1543
1544         if (create_context) {
1545                 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1546                 unsigned int *tbl;
1547
1548                 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1549
1550                 /*
1551                  * Scale table is set after filter insertion because
1552                  * the table entries are relative to the base RxQ ID
1553                  * and the latter is submitted to the HW by means of
1554                  * inserting a filter, so by the time of the request
1555                  * the HW knows all the information needed to verify
1556                  * the table entries, and the operation will succeed
1557                  */
1558                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1559                                           tbl, RTE_DIM(flow_rss->rss_tbl));
1560                 if (rc != 0)
1561                         goto fail_scale_tbl_set;
1562
1563                 /* Remember created dummy RSS context */
1564                 if (!spec_filter->rss)
1565                         rss->dummy_rss_context = efs_rss_context;
1566         }
1567
1568         return 0;
1569
1570 fail_scale_tbl_set:
1571         sfc_flow_spec_remove(sa, &flow->spec);
1572
1573 fail_filter_insert:
1574 fail_scale_key_set:
1575 fail_scale_mode_set:
1576         if (create_context)
1577                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1578
1579 fail_scale_context_alloc:
1580         return rc;
1581 }
1582
1583 static int
1584 sfc_flow_filter_remove(struct sfc_adapter *sa,
1585                        struct rte_flow *flow)
1586 {
1587         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1588         int rc = 0;
1589
1590         rc = sfc_flow_spec_remove(sa, &flow->spec);
1591         if (rc != 0)
1592                 return rc;
1593
1594         if (spec_filter->rss) {
1595                 /*
1596                  * All specifications for a given flow rule have the same RSS
1597                  * context, so that RSS context value is taken from the first
1598                  * filter specification
1599                  */
1600                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1601
1602                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1603         }
1604
1605         return rc;
1606 }
1607
1608 static int
1609 sfc_flow_parse_mark(struct sfc_adapter *sa,
1610                     const struct rte_flow_action_mark *mark,
1611                     struct rte_flow *flow)
1612 {
1613         struct sfc_flow_spec *spec = &flow->spec;
1614         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1615         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1616
1617         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1618                 return EINVAL;
1619
1620         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1621         spec_filter->template.efs_mark = mark->id;
1622
1623         return 0;
1624 }
1625
1626 static int
1627 sfc_flow_parse_actions(struct sfc_adapter *sa,
1628                        const struct rte_flow_action actions[],
1629                        struct rte_flow *flow,
1630                        struct rte_flow_error *error)
1631 {
1632         int rc;
1633         struct sfc_flow_spec *spec = &flow->spec;
1634         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1635         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1636         uint32_t actions_set = 0;
1637         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1638                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1639                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1640         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1641                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1642
1643         if (actions == NULL) {
1644                 rte_flow_error_set(error, EINVAL,
1645                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1646                                    "NULL actions");
1647                 return -rte_errno;
1648         }
1649
1650 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1651         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1652
1653         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1654                 switch (actions->type) {
1655                 case RTE_FLOW_ACTION_TYPE_VOID:
1656                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1657                                                actions_set);
1658                         break;
1659
1660                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1661                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1662                                                actions_set);
1663                         if ((actions_set & fate_actions_mask) != 0)
1664                                 goto fail_fate_actions;
1665
1666                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1667                         if (rc != 0) {
1668                                 rte_flow_error_set(error, EINVAL,
1669                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1670                                         "Bad QUEUE action");
1671                                 return -rte_errno;
1672                         }
1673                         break;
1674
1675                 case RTE_FLOW_ACTION_TYPE_RSS:
1676                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1677                                                actions_set);
1678                         if ((actions_set & fate_actions_mask) != 0)
1679                                 goto fail_fate_actions;
1680
1681                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1682                         if (rc != 0) {
1683                                 rte_flow_error_set(error, -rc,
1684                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1685                                         "Bad RSS action");
1686                                 return -rte_errno;
1687                         }
1688                         break;
1689
1690                 case RTE_FLOW_ACTION_TYPE_DROP:
1691                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1692                                                actions_set);
1693                         if ((actions_set & fate_actions_mask) != 0)
1694                                 goto fail_fate_actions;
1695
1696                         spec_filter->template.efs_dmaq_id =
1697                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1698                         break;
1699
1700                 case RTE_FLOW_ACTION_TYPE_FLAG:
1701                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1702                                                actions_set);
1703                         if ((actions_set & mark_actions_mask) != 0)
1704                                 goto fail_actions_overlap;
1705
1706                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1707                                 rte_flow_error_set(error, ENOTSUP,
1708                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1709                                         "FLAG action is not supported on the current Rx datapath");
1710                                 return -rte_errno;
1711                         }
1712
1713                         spec_filter->template.efs_flags |=
1714                                 EFX_FILTER_FLAG_ACTION_FLAG;
1715                         break;
1716
1717                 case RTE_FLOW_ACTION_TYPE_MARK:
1718                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1719                                                actions_set);
1720                         if ((actions_set & mark_actions_mask) != 0)
1721                                 goto fail_actions_overlap;
1722
1723                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1724                                 rte_flow_error_set(error, ENOTSUP,
1725                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1726                                         "MARK action is not supported on the current Rx datapath");
1727                                 return -rte_errno;
1728                         }
1729
1730                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1731                         if (rc != 0) {
1732                                 rte_flow_error_set(error, rc,
1733                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1734                                         "Bad MARK action");
1735                                 return -rte_errno;
1736                         }
1737                         break;
1738
1739                 default:
1740                         rte_flow_error_set(error, ENOTSUP,
1741                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1742                                            "Action is not supported");
1743                         return -rte_errno;
1744                 }
1745
1746                 actions_set |= (1UL << actions->type);
1747         }
1748 #undef SFC_BUILD_SET_OVERFLOW
1749
1750         /* When fate is unknown, drop traffic. */
1751         if ((actions_set & fate_actions_mask) == 0) {
1752                 spec_filter->template.efs_dmaq_id =
1753                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1754         }
1755
1756         return 0;
1757
1758 fail_fate_actions:
1759         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1760                            "Cannot combine several fate-deciding actions, "
1761                            "choose between QUEUE, RSS or DROP");
1762         return -rte_errno;
1763
1764 fail_actions_overlap:
1765         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1766                            "Overlapping actions are not supported");
1767         return -rte_errno;
1768 }
1769
1770 /**
1771  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1772  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1773  * specifications after copying.
1774  *
1775  * @param spec[in, out]
1776  *   SFC flow specification to update.
1777  * @param filters_count_for_one_val[in]
1778  *   How many specifications should have the same match flag, what is the
1779  *   number of specifications before copying.
1780  * @param error[out]
1781  *   Perform verbose error reporting if not NULL.
1782  */
1783 static int
1784 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1785                                unsigned int filters_count_for_one_val,
1786                                struct rte_flow_error *error)
1787 {
1788         unsigned int i;
1789         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1790         static const efx_filter_match_flags_t vals[] = {
1791                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1792                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1793         };
1794
1795         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1796                 rte_flow_error_set(error, EINVAL,
1797                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1798                         "Number of specifications is incorrect while copying "
1799                         "by unknown destination flags");
1800                 return -rte_errno;
1801         }
1802
1803         for (i = 0; i < spec_filter->count; i++) {
1804                 /* The check above ensures that divisor can't be zero here */
1805                 spec_filter->filters[i].efs_match_flags |=
1806                         vals[i / filters_count_for_one_val];
1807         }
1808
1809         return 0;
1810 }
1811
1812 /**
1813  * Check that the following conditions are met:
1814  * - the list of supported filters has a filter
1815  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1816  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1817  *   be inserted.
1818  *
1819  * @param match[in]
1820  *   The match flags of filter.
1821  * @param spec[in]
1822  *   Specification to be supplemented.
1823  * @param filter[in]
1824  *   SFC filter with list of supported filters.
1825  */
1826 static boolean_t
1827 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1828                                  __rte_unused efx_filter_spec_t *spec,
1829                                  struct sfc_filter *filter)
1830 {
1831         unsigned int i;
1832         efx_filter_match_flags_t match_mcast_dst;
1833
1834         match_mcast_dst =
1835                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1836                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1837         for (i = 0; i < filter->supported_match_num; i++) {
1838                 if (match_mcast_dst == filter->supported_match[i])
1839                         return B_TRUE;
1840         }
1841
1842         return B_FALSE;
1843 }
1844
1845 /**
1846  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1847  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1848  * specifications after copying.
1849  *
1850  * @param spec[in, out]
1851  *   SFC flow specification to update.
1852  * @param filters_count_for_one_val[in]
1853  *   How many specifications should have the same EtherType value, what is the
1854  *   number of specifications before copying.
1855  * @param error[out]
1856  *   Perform verbose error reporting if not NULL.
1857  */
1858 static int
1859 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1860                         unsigned int filters_count_for_one_val,
1861                         struct rte_flow_error *error)
1862 {
1863         unsigned int i;
1864         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1865         static const uint16_t vals[] = {
1866                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1867         };
1868
1869         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1870                 rte_flow_error_set(error, EINVAL,
1871                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1872                         "Number of specifications is incorrect "
1873                         "while copying by Ethertype");
1874                 return -rte_errno;
1875         }
1876
1877         for (i = 0; i < spec_filter->count; i++) {
1878                 spec_filter->filters[i].efs_match_flags |=
1879                         EFX_FILTER_MATCH_ETHER_TYPE;
1880
1881                 /*
1882                  * The check above ensures that
1883                  * filters_count_for_one_val is not 0
1884                  */
1885                 spec_filter->filters[i].efs_ether_type =
1886                         vals[i / filters_count_for_one_val];
1887         }
1888
1889         return 0;
1890 }
1891
1892 /**
1893  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1894  * in the same specifications after copying.
1895  *
1896  * @param spec[in, out]
1897  *   SFC flow specification to update.
1898  * @param filters_count_for_one_val[in]
1899  *   How many specifications should have the same match flag, what is the
1900  *   number of specifications before copying.
1901  * @param error[out]
1902  *   Perform verbose error reporting if not NULL.
1903  */
1904 static int
1905 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1906                             unsigned int filters_count_for_one_val,
1907                             struct rte_flow_error *error)
1908 {
1909         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1910         unsigned int i;
1911
1912         if (filters_count_for_one_val != spec_filter->count) {
1913                 rte_flow_error_set(error, EINVAL,
1914                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1915                         "Number of specifications is incorrect "
1916                         "while copying by outer VLAN ID");
1917                 return -rte_errno;
1918         }
1919
1920         for (i = 0; i < spec_filter->count; i++) {
1921                 spec_filter->filters[i].efs_match_flags |=
1922                         EFX_FILTER_MATCH_OUTER_VID;
1923
1924                 spec_filter->filters[i].efs_outer_vid = 0;
1925         }
1926
1927         return 0;
1928 }
1929
1930 /**
1931  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1932  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1933  * specifications after copying.
1934  *
1935  * @param spec[in, out]
1936  *   SFC flow specification to update.
1937  * @param filters_count_for_one_val[in]
1938  *   How many specifications should have the same match flag, what is the
1939  *   number of specifications before copying.
1940  * @param error[out]
1941  *   Perform verbose error reporting if not NULL.
1942  */
1943 static int
1944 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1945                                     unsigned int filters_count_for_one_val,
1946                                     struct rte_flow_error *error)
1947 {
1948         unsigned int i;
1949         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1950         static const efx_filter_match_flags_t vals[] = {
1951                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1952                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1953         };
1954
1955         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1956                 rte_flow_error_set(error, EINVAL,
1957                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1958                         "Number of specifications is incorrect while copying "
1959                         "by inner frame unknown destination flags");
1960                 return -rte_errno;
1961         }
1962
1963         for (i = 0; i < spec_filter->count; i++) {
1964                 /* The check above ensures that divisor can't be zero here */
1965                 spec_filter->filters[i].efs_match_flags |=
1966                         vals[i / filters_count_for_one_val];
1967         }
1968
1969         return 0;
1970 }
1971
1972 /**
1973  * Check that the following conditions are met:
1974  * - the specification corresponds to a filter for encapsulated traffic
1975  * - the list of supported filters has a filter
1976  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1977  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1978  *   be inserted.
1979  *
1980  * @param match[in]
1981  *   The match flags of filter.
1982  * @param spec[in]
1983  *   Specification to be supplemented.
1984  * @param filter[in]
1985  *   SFC filter with list of supported filters.
1986  */
1987 static boolean_t
1988 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1989                                       efx_filter_spec_t *spec,
1990                                       struct sfc_filter *filter)
1991 {
1992         unsigned int i;
1993         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1994         efx_filter_match_flags_t match_mcast_dst;
1995
1996         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1997                 return B_FALSE;
1998
1999         match_mcast_dst =
2000                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2001                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2002         for (i = 0; i < filter->supported_match_num; i++) {
2003                 if (match_mcast_dst == filter->supported_match[i])
2004                         return B_TRUE;
2005         }
2006
2007         return B_FALSE;
2008 }
2009
2010 /**
2011  * Check that the list of supported filters has a filter that differs
2012  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2013  * in this case that filter will be used and the flag
2014  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2015  *
2016  * @param match[in]
2017  *   The match flags of filter.
2018  * @param spec[in]
2019  *   Specification to be supplemented.
2020  * @param filter[in]
2021  *   SFC filter with list of supported filters.
2022  */
2023 static boolean_t
2024 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2025                               __rte_unused efx_filter_spec_t *spec,
2026                               struct sfc_filter *filter)
2027 {
2028         unsigned int i;
2029         efx_filter_match_flags_t match_without_vid =
2030                 match & ~EFX_FILTER_MATCH_OUTER_VID;
2031
2032         for (i = 0; i < filter->supported_match_num; i++) {
2033                 if (match_without_vid == filter->supported_match[i])
2034                         return B_FALSE;
2035         }
2036
2037         return B_TRUE;
2038 }
2039
2040 /*
2041  * Match flags that can be automatically added to filters.
2042  * Selecting the last minimum when searching for the copy flag ensures that the
2043  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2044  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2045  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2046  * filters.
2047  */
2048 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2049         {
2050                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2051                 .vals_count = 2,
2052                 .set_vals = sfc_flow_set_unknown_dst_flags,
2053                 .spec_check = sfc_flow_check_unknown_dst_flags,
2054         },
2055         {
2056                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2057                 .vals_count = 2,
2058                 .set_vals = sfc_flow_set_ethertypes,
2059                 .spec_check = NULL,
2060         },
2061         {
2062                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2063                 .vals_count = 2,
2064                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2065                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2066         },
2067         {
2068                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2069                 .vals_count = 1,
2070                 .set_vals = sfc_flow_set_outer_vid_flag,
2071                 .spec_check = sfc_flow_check_outer_vid_flag,
2072         },
2073 };
2074
2075 /* Get item from array sfc_flow_copy_flags */
2076 static const struct sfc_flow_copy_flag *
2077 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2078 {
2079         unsigned int i;
2080
2081         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2082                 if (sfc_flow_copy_flags[i].flag == flag)
2083                         return &sfc_flow_copy_flags[i];
2084         }
2085
2086         return NULL;
2087 }
2088
2089 /**
2090  * Make copies of the specifications, set match flag and values
2091  * of the field that corresponds to it.
2092  *
2093  * @param spec[in, out]
2094  *   SFC flow specification to update.
2095  * @param flag[in]
2096  *   The match flag to add.
2097  * @param error[out]
2098  *   Perform verbose error reporting if not NULL.
2099  */
2100 static int
2101 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2102                              efx_filter_match_flags_t flag,
2103                              struct rte_flow_error *error)
2104 {
2105         unsigned int i;
2106         unsigned int new_filters_count;
2107         unsigned int filters_count_for_one_val;
2108         const struct sfc_flow_copy_flag *copy_flag;
2109         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2110         int rc;
2111
2112         copy_flag = sfc_flow_get_copy_flag(flag);
2113         if (copy_flag == NULL) {
2114                 rte_flow_error_set(error, ENOTSUP,
2115                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2116                                    "Unsupported spec field for copying");
2117                 return -rte_errno;
2118         }
2119
2120         new_filters_count = spec_filter->count * copy_flag->vals_count;
2121         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2122                 rte_flow_error_set(error, EINVAL,
2123                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2124                         "Too much EFX specifications in the flow rule");
2125                 return -rte_errno;
2126         }
2127
2128         /* Copy filters specifications */
2129         for (i = spec_filter->count; i < new_filters_count; i++) {
2130                 spec_filter->filters[i] =
2131                         spec_filter->filters[i - spec_filter->count];
2132         }
2133
2134         filters_count_for_one_val = spec_filter->count;
2135         spec_filter->count = new_filters_count;
2136
2137         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2138         if (rc != 0)
2139                 return rc;
2140
2141         return 0;
2142 }
2143
2144 /**
2145  * Check that the given set of match flags missing in the original filter spec
2146  * could be covered by adding spec copies which specify the corresponding
2147  * flags and packet field values to match.
2148  *
2149  * @param miss_flags[in]
2150  *   Flags that are missing until the supported filter.
2151  * @param spec[in]
2152  *   Specification to be supplemented.
2153  * @param filter[in]
2154  *   SFC filter.
2155  *
2156  * @return
2157  *   Number of specifications after copy or 0, if the flags can not be added.
2158  */
2159 static unsigned int
2160 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2161                              efx_filter_spec_t *spec,
2162                              struct sfc_filter *filter)
2163 {
2164         unsigned int i;
2165         efx_filter_match_flags_t copy_flags = 0;
2166         efx_filter_match_flags_t flag;
2167         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2168         sfc_flow_spec_check *check;
2169         unsigned int multiplier = 1;
2170
2171         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2172                 flag = sfc_flow_copy_flags[i].flag;
2173                 check = sfc_flow_copy_flags[i].spec_check;
2174                 if ((flag & miss_flags) == flag) {
2175                         if (check != NULL && (!check(match, spec, filter)))
2176                                 continue;
2177
2178                         copy_flags |= flag;
2179                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2180                 }
2181         }
2182
2183         if (copy_flags == miss_flags)
2184                 return multiplier;
2185
2186         return 0;
2187 }
2188
2189 /**
2190  * Attempt to supplement the specification template to the minimally
2191  * supported set of match flags. To do this, it is necessary to copy
2192  * the specifications, filling them with the values of fields that
2193  * correspond to the missing flags.
2194  * The necessary and sufficient filter is built from the fewest number
2195  * of copies which could be made to cover the minimally required set
2196  * of flags.
2197  *
2198  * @param sa[in]
2199  *   SFC adapter.
2200  * @param spec[in, out]
2201  *   SFC flow specification to update.
2202  * @param error[out]
2203  *   Perform verbose error reporting if not NULL.
2204  */
2205 static int
2206 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2207                                struct sfc_flow_spec *spec,
2208                                struct rte_flow_error *error)
2209 {
2210         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2211         struct sfc_filter *filter = &sa->filter;
2212         efx_filter_match_flags_t miss_flags;
2213         efx_filter_match_flags_t min_miss_flags = 0;
2214         efx_filter_match_flags_t match;
2215         unsigned int min_multiplier = UINT_MAX;
2216         unsigned int multiplier;
2217         unsigned int i;
2218         int rc;
2219
2220         match = spec_filter->template.efs_match_flags;
2221         for (i = 0; i < filter->supported_match_num; i++) {
2222                 if ((match & filter->supported_match[i]) == match) {
2223                         miss_flags = filter->supported_match[i] & (~match);
2224                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2225                                 &spec_filter->template, filter);
2226                         if (multiplier > 0) {
2227                                 if (multiplier <= min_multiplier) {
2228                                         min_multiplier = multiplier;
2229                                         min_miss_flags = miss_flags;
2230                                 }
2231                         }
2232                 }
2233         }
2234
2235         if (min_multiplier == UINT_MAX) {
2236                 rte_flow_error_set(error, ENOTSUP,
2237                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2238                                    "The flow rule pattern is unsupported");
2239                 return -rte_errno;
2240         }
2241
2242         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2243                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2244
2245                 if ((flag & min_miss_flags) == flag) {
2246                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2247                         if (rc != 0)
2248                                 return rc;
2249                 }
2250         }
2251
2252         return 0;
2253 }
2254
2255 /**
2256  * Check that set of match flags is referred to by a filter. Filter is
2257  * described by match flags with the ability to add OUTER_VID and INNER_VID
2258  * flags.
2259  *
2260  * @param match_flags[in]
2261  *   Set of match flags.
2262  * @param flags_pattern[in]
2263  *   Pattern of filter match flags.
2264  */
2265 static boolean_t
2266 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2267                             efx_filter_match_flags_t flags_pattern)
2268 {
2269         if ((match_flags & flags_pattern) != flags_pattern)
2270                 return B_FALSE;
2271
2272         switch (match_flags & ~flags_pattern) {
2273         case 0:
2274         case EFX_FILTER_MATCH_OUTER_VID:
2275         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2276                 return B_TRUE;
2277         default:
2278                 return B_FALSE;
2279         }
2280 }
2281
2282 /**
2283  * Check whether the spec maps to a hardware filter which is known to be
2284  * ineffective despite being valid.
2285  *
2286  * @param filter[in]
2287  *   SFC filter with list of supported filters.
2288  * @param spec[in]
2289  *   SFC flow specification.
2290  */
2291 static boolean_t
2292 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2293                                   struct sfc_flow_spec *spec)
2294 {
2295         unsigned int i;
2296         uint16_t ether_type;
2297         uint8_t ip_proto;
2298         efx_filter_match_flags_t match_flags;
2299         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2300
2301         for (i = 0; i < spec_filter->count; i++) {
2302                 match_flags = spec_filter->filters[i].efs_match_flags;
2303
2304                 if (sfc_flow_is_match_with_vids(match_flags,
2305                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2306                     sfc_flow_is_match_with_vids(match_flags,
2307                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2308                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2309                         ether_type = spec_filter->filters[i].efs_ether_type;
2310                         if (filter->supports_ip_proto_or_addr_filter &&
2311                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2312                              ether_type == EFX_ETHER_TYPE_IPV6))
2313                                 return B_TRUE;
2314                 } else if (sfc_flow_is_match_with_vids(match_flags,
2315                                 EFX_FILTER_MATCH_ETHER_TYPE |
2316                                 EFX_FILTER_MATCH_IP_PROTO) ||
2317                            sfc_flow_is_match_with_vids(match_flags,
2318                                 EFX_FILTER_MATCH_ETHER_TYPE |
2319                                 EFX_FILTER_MATCH_IP_PROTO |
2320                                 EFX_FILTER_MATCH_LOC_MAC)) {
2321                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2322                         if (filter->supports_rem_or_local_port_filter &&
2323                             (ip_proto == EFX_IPPROTO_TCP ||
2324                              ip_proto == EFX_IPPROTO_UDP))
2325                                 return B_TRUE;
2326                 }
2327         }
2328
2329         return B_FALSE;
2330 }
2331
2332 static int
2333 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2334                               struct rte_flow *flow,
2335                               struct rte_flow_error *error)
2336 {
2337         struct sfc_flow_spec *spec = &flow->spec;
2338         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2339         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2340         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2341         int rc;
2342
2343         /* Initialize the first filter spec with template */
2344         spec_filter->filters[0] = *spec_tmpl;
2345         spec_filter->count = 1;
2346
2347         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2348                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2349                 if (rc != 0)
2350                         return rc;
2351         }
2352
2353         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2354                 rte_flow_error_set(error, ENOTSUP,
2355                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2356                         "The flow rule pattern is unsupported");
2357                 return -rte_errno;
2358         }
2359
2360         return 0;
2361 }
2362
2363 static int
2364 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2365                              const struct rte_flow_item pattern[],
2366                              const struct rte_flow_action actions[],
2367                              struct rte_flow *flow,
2368                              struct rte_flow_error *error)
2369 {
2370         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2371         struct sfc_flow_spec *spec = &flow->spec;
2372         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2373         struct sfc_flow_parse_ctx ctx;
2374         int rc;
2375
2376         ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2377         ctx.filter = &spec_filter->template;
2378
2379         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2380                                     pattern, &ctx, error);
2381         if (rc != 0)
2382                 goto fail_bad_value;
2383
2384         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2385         if (rc != 0)
2386                 goto fail_bad_value;
2387
2388         rc = sfc_flow_validate_match_flags(sa, flow, error);
2389         if (rc != 0)
2390                 goto fail_bad_value;
2391
2392         return 0;
2393
2394 fail_bad_value:
2395         return rc;
2396 }
2397
2398 static int
2399 sfc_flow_parse(struct rte_eth_dev *dev,
2400                const struct rte_flow_attr *attr,
2401                const struct rte_flow_item pattern[],
2402                const struct rte_flow_action actions[],
2403                struct rte_flow *flow,
2404                struct rte_flow_error *error)
2405 {
2406         const struct sfc_flow_ops_by_spec *ops;
2407         int rc;
2408
2409         rc = sfc_flow_parse_attr(attr, flow, error);
2410         if (rc != 0)
2411                 return rc;
2412
2413         ops = sfc_flow_get_ops_by_spec(flow);
2414         if (ops == NULL || ops->parse == NULL) {
2415                 rte_flow_error_set(error, ENOTSUP,
2416                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2417                                    "No backend to handle this flow");
2418                 return -rte_errno;
2419         }
2420
2421         return ops->parse(dev, pattern, actions, flow, error);
2422 }
2423
2424 static struct rte_flow *
2425 sfc_flow_zmalloc(struct rte_flow_error *error)
2426 {
2427         struct rte_flow *flow;
2428
2429         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2430         if (flow == NULL) {
2431                 rte_flow_error_set(error, ENOMEM,
2432                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2433                                    "Failed to allocate memory");
2434         }
2435
2436         return flow;
2437 }
2438
2439 static void
2440 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2441 {
2442         rte_free(flow);
2443 }
2444
2445 static int
2446 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2447                 struct rte_flow_error *error)
2448 {
2449         const struct sfc_flow_ops_by_spec *ops;
2450         int rc;
2451
2452         ops = sfc_flow_get_ops_by_spec(flow);
2453         if (ops == NULL || ops->insert == NULL) {
2454                 rte_flow_error_set(error, ENOTSUP,
2455                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2456                                    "No backend to handle this flow");
2457                 return rte_errno;
2458         }
2459
2460         rc = ops->insert(sa, flow);
2461         if (rc != 0) {
2462                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2463                                    NULL, "Failed to insert the flow rule");
2464         }
2465
2466         return rc;
2467 }
2468
2469 static int
2470 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2471                 struct rte_flow_error *error)
2472 {
2473         const struct sfc_flow_ops_by_spec *ops;
2474         int rc;
2475
2476         ops = sfc_flow_get_ops_by_spec(flow);
2477         if (ops == NULL || ops->remove == NULL) {
2478                 rte_flow_error_set(error, ENOTSUP,
2479                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2480                                    "No backend to handle this flow");
2481                 return rte_errno;
2482         }
2483
2484         rc = ops->remove(sa, flow);
2485         if (rc != 0) {
2486                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2487                                    NULL, "Failed to remove the flow rule");
2488         }
2489
2490         return rc;
2491 }
2492
2493 static int
2494 sfc_flow_validate(struct rte_eth_dev *dev,
2495                   const struct rte_flow_attr *attr,
2496                   const struct rte_flow_item pattern[],
2497                   const struct rte_flow_action actions[],
2498                   struct rte_flow_error *error)
2499 {
2500         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2501         struct rte_flow *flow;
2502         int rc;
2503
2504         flow = sfc_flow_zmalloc(error);
2505         if (flow == NULL)
2506                 return -rte_errno;
2507
2508         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2509
2510         sfc_flow_free(sa, flow);
2511
2512         return rc;
2513 }
2514
2515 static struct rte_flow *
2516 sfc_flow_create(struct rte_eth_dev *dev,
2517                 const struct rte_flow_attr *attr,
2518                 const struct rte_flow_item pattern[],
2519                 const struct rte_flow_action actions[],
2520                 struct rte_flow_error *error)
2521 {
2522         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2523         struct rte_flow *flow = NULL;
2524         int rc;
2525
2526         flow = sfc_flow_zmalloc(error);
2527         if (flow == NULL)
2528                 goto fail_no_mem;
2529
2530         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2531         if (rc != 0)
2532                 goto fail_bad_value;
2533
2534         sfc_adapter_lock(sa);
2535
2536         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2537
2538         if (sa->state == SFC_ADAPTER_STARTED) {
2539                 rc = sfc_flow_insert(sa, flow, error);
2540                 if (rc != 0)
2541                         goto fail_flow_insert;
2542         }
2543
2544         sfc_adapter_unlock(sa);
2545
2546         return flow;
2547
2548 fail_flow_insert:
2549         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2550
2551 fail_bad_value:
2552         sfc_flow_free(sa, flow);
2553         sfc_adapter_unlock(sa);
2554
2555 fail_no_mem:
2556         return NULL;
2557 }
2558
2559 static int
2560 sfc_flow_destroy(struct rte_eth_dev *dev,
2561                  struct rte_flow *flow,
2562                  struct rte_flow_error *error)
2563 {
2564         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2565         struct rte_flow *flow_ptr;
2566         int rc = EINVAL;
2567
2568         sfc_adapter_lock(sa);
2569
2570         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2571                 if (flow_ptr == flow)
2572                         rc = 0;
2573         }
2574         if (rc != 0) {
2575                 rte_flow_error_set(error, rc,
2576                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2577                                    "Failed to find flow rule to destroy");
2578                 goto fail_bad_value;
2579         }
2580
2581         if (sa->state == SFC_ADAPTER_STARTED)
2582                 rc = sfc_flow_remove(sa, flow, error);
2583
2584         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2585         sfc_flow_free(sa, flow);
2586
2587 fail_bad_value:
2588         sfc_adapter_unlock(sa);
2589
2590         return -rc;
2591 }
2592
2593 static int
2594 sfc_flow_flush(struct rte_eth_dev *dev,
2595                struct rte_flow_error *error)
2596 {
2597         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2598         struct rte_flow *flow;
2599         int ret = 0;
2600
2601         sfc_adapter_lock(sa);
2602
2603         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2604                 if (sa->state == SFC_ADAPTER_STARTED) {
2605                         int rc;
2606
2607                         rc = sfc_flow_remove(sa, flow, error);
2608                         if (rc != 0)
2609                                 ret = rc;
2610                 }
2611
2612                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2613                 sfc_flow_free(sa, flow);
2614         }
2615
2616         sfc_adapter_unlock(sa);
2617
2618         return -ret;
2619 }
2620
2621 static int
2622 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2623                  struct rte_flow_error *error)
2624 {
2625         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2626         int ret = 0;
2627
2628         sfc_adapter_lock(sa);
2629         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2630                 rte_flow_error_set(error, EBUSY,
2631                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2632                                    NULL, "please close the port first");
2633                 ret = -rte_errno;
2634         } else {
2635                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2636         }
2637         sfc_adapter_unlock(sa);
2638
2639         return ret;
2640 }
2641
2642 const struct rte_flow_ops sfc_flow_ops = {
2643         .validate = sfc_flow_validate,
2644         .create = sfc_flow_create,
2645         .destroy = sfc_flow_destroy,
2646         .flush = sfc_flow_flush,
2647         .query = NULL,
2648         .isolate = sfc_flow_isolate,
2649 };
2650
2651 void
2652 sfc_flow_init(struct sfc_adapter *sa)
2653 {
2654         SFC_ASSERT(sfc_adapter_is_locked(sa));
2655
2656         TAILQ_INIT(&sa->flow_list);
2657 }
2658
2659 void
2660 sfc_flow_fini(struct sfc_adapter *sa)
2661 {
2662         struct rte_flow *flow;
2663
2664         SFC_ASSERT(sfc_adapter_is_locked(sa));
2665
2666         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2667                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2668                 sfc_flow_free(sa, flow);
2669         }
2670 }
2671
2672 void
2673 sfc_flow_stop(struct sfc_adapter *sa)
2674 {
2675         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2676         struct sfc_rss *rss = &sas->rss;
2677         struct rte_flow *flow;
2678
2679         SFC_ASSERT(sfc_adapter_is_locked(sa));
2680
2681         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2682                 sfc_flow_remove(sa, flow, NULL);
2683
2684         if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2685                 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2686                 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2687         }
2688 }
2689
2690 int
2691 sfc_flow_start(struct sfc_adapter *sa)
2692 {
2693         struct rte_flow *flow;
2694         int rc = 0;
2695
2696         sfc_log_init(sa, "entry");
2697
2698         SFC_ASSERT(sfc_adapter_is_locked(sa));
2699
2700         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2701                 rc = sfc_flow_insert(sa, flow, NULL);
2702                 if (rc != 0)
2703                         goto fail_bad_flow;
2704         }
2705
2706         sfc_log_init(sa, "done");
2707
2708 fail_bad_flow:
2709         return rc;
2710 }