net/sfc: implement flow insert/remove in MAE backend
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 struct sfc_flow_ops_by_spec {
29         sfc_flow_parse_cb_t     *parse;
30         sfc_flow_verify_cb_t    *verify;
31         sfc_flow_cleanup_cb_t   *cleanup;
32         sfc_flow_insert_cb_t    *insert;
33         sfc_flow_remove_cb_t    *remove;
34 };
35
36 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
37 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
38 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
39 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
40
41 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
42         .parse = sfc_flow_parse_rte_to_filter,
43         .verify = NULL,
44         .cleanup = NULL,
45         .insert = sfc_flow_filter_insert,
46         .remove = sfc_flow_filter_remove,
47 };
48
49 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
50         .parse = sfc_flow_parse_rte_to_mae,
51         .verify = sfc_mae_flow_verify,
52         .cleanup = sfc_mae_flow_cleanup,
53         .insert = sfc_mae_flow_insert,
54         .remove = sfc_mae_flow_remove,
55 };
56
57 static const struct sfc_flow_ops_by_spec *
58 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
59 {
60         struct sfc_flow_spec *spec = &flow->spec;
61         const struct sfc_flow_ops_by_spec *ops = NULL;
62
63         switch (spec->type) {
64         case SFC_FLOW_SPEC_FILTER:
65                 ops = &sfc_flow_ops_filter;
66                 break;
67         case SFC_FLOW_SPEC_MAE:
68                 ops = &sfc_flow_ops_mae;
69                 break;
70         default:
71                 SFC_ASSERT(false);
72                 break;
73         }
74
75         return ops;
76 }
77
78 /*
79  * Currently, filter-based (VNIC) flow API is implemented in such a manner
80  * that each flow rule is converted to one or more hardware filters.
81  * All elements of flow rule (attributes, pattern items, actions)
82  * correspond to one or more fields in the efx_filter_spec_s structure
83  * that is responsible for the hardware filter.
84  * If some required field is unset in the flow rule, then a handful
85  * of filter copies will be created to cover all possible values
86  * of such a field.
87  */
88
89 static sfc_flow_item_parse sfc_flow_parse_void;
90 static sfc_flow_item_parse sfc_flow_parse_eth;
91 static sfc_flow_item_parse sfc_flow_parse_vlan;
92 static sfc_flow_item_parse sfc_flow_parse_ipv4;
93 static sfc_flow_item_parse sfc_flow_parse_ipv6;
94 static sfc_flow_item_parse sfc_flow_parse_tcp;
95 static sfc_flow_item_parse sfc_flow_parse_udp;
96 static sfc_flow_item_parse sfc_flow_parse_vxlan;
97 static sfc_flow_item_parse sfc_flow_parse_geneve;
98 static sfc_flow_item_parse sfc_flow_parse_nvgre;
99
100 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
101                                      unsigned int filters_count_for_one_val,
102                                      struct rte_flow_error *error);
103
104 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
105                                         efx_filter_spec_t *spec,
106                                         struct sfc_filter *filter);
107
108 struct sfc_flow_copy_flag {
109         /* EFX filter specification match flag */
110         efx_filter_match_flags_t flag;
111         /* Number of values of corresponding field */
112         unsigned int vals_count;
113         /* Function to set values in specifications */
114         sfc_flow_spec_set_vals *set_vals;
115         /*
116          * Function to check that the specification is suitable
117          * for adding this match flag
118          */
119         sfc_flow_spec_check *spec_check;
120 };
121
122 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
123 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
124 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
125 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
126 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
127 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
128 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
129
130 static boolean_t
131 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
132 {
133         uint8_t sum = 0;
134         unsigned int i;
135
136         for (i = 0; i < size; i++)
137                 sum |= buf[i];
138
139         return (sum == 0) ? B_TRUE : B_FALSE;
140 }
141
142 /*
143  * Validate item and prepare structures spec and mask for parsing
144  */
145 int
146 sfc_flow_parse_init(const struct rte_flow_item *item,
147                     const void **spec_ptr,
148                     const void **mask_ptr,
149                     const void *supp_mask,
150                     const void *def_mask,
151                     unsigned int size,
152                     struct rte_flow_error *error)
153 {
154         const uint8_t *spec;
155         const uint8_t *mask;
156         const uint8_t *last;
157         uint8_t supp;
158         unsigned int i;
159
160         if (item == NULL) {
161                 rte_flow_error_set(error, EINVAL,
162                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
163                                    "NULL item");
164                 return -rte_errno;
165         }
166
167         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
168                 rte_flow_error_set(error, EINVAL,
169                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
170                                    "Mask or last is set without spec");
171                 return -rte_errno;
172         }
173
174         /*
175          * If "mask" is not set, default mask is used,
176          * but if default mask is NULL, "mask" should be set
177          */
178         if (item->mask == NULL) {
179                 if (def_mask == NULL) {
180                         rte_flow_error_set(error, EINVAL,
181                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
182                                 "Mask should be specified");
183                         return -rte_errno;
184                 }
185
186                 mask = def_mask;
187         } else {
188                 mask = item->mask;
189         }
190
191         spec = item->spec;
192         last = item->last;
193
194         if (spec == NULL)
195                 goto exit;
196
197         /*
198          * If field values in "last" are either 0 or equal to the corresponding
199          * values in "spec" then they are ignored
200          */
201         if (last != NULL &&
202             !sfc_flow_is_zero(last, size) &&
203             memcmp(last, spec, size) != 0) {
204                 rte_flow_error_set(error, ENOTSUP,
205                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
206                                    "Ranging is not supported");
207                 return -rte_errno;
208         }
209
210         if (supp_mask == NULL) {
211                 rte_flow_error_set(error, EINVAL,
212                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
213                         "Supported mask for item should be specified");
214                 return -rte_errno;
215         }
216
217         /* Check that mask does not ask for more match than supp_mask */
218         for (i = 0; i < size; i++) {
219                 supp = ((const uint8_t *)supp_mask)[i];
220
221                 if (~supp & mask[i]) {
222                         rte_flow_error_set(error, ENOTSUP,
223                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
224                                            "Item's field is not supported");
225                         return -rte_errno;
226                 }
227         }
228
229 exit:
230         *spec_ptr = spec;
231         *mask_ptr = mask;
232         return 0;
233 }
234
235 /*
236  * Protocol parsers.
237  * Masking is not supported, so masks in items should be either
238  * full or empty (zeroed) and set only for supported fields which
239  * are specified in the supp_mask.
240  */
241
242 static int
243 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
244                     __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
245                     __rte_unused struct rte_flow_error *error)
246 {
247         return 0;
248 }
249
250 /**
251  * Convert Ethernet item to EFX filter specification.
252  *
253  * @param item[in]
254  *   Item specification. Outer frame specification may only comprise
255  *   source/destination addresses and Ethertype field.
256  *   Inner frame specification may contain destination address only.
257  *   There is support for individual/group mask as well as for empty and full.
258  *   If the mask is NULL, default mask will be used. Ranging is not supported.
259  * @param efx_spec[in, out]
260  *   EFX filter specification to update.
261  * @param[out] error
262  *   Perform verbose error reporting if not NULL.
263  */
264 static int
265 sfc_flow_parse_eth(const struct rte_flow_item *item,
266                    struct sfc_flow_parse_ctx *parse_ctx,
267                    struct rte_flow_error *error)
268 {
269         int rc;
270         efx_filter_spec_t *efx_spec = parse_ctx->filter;
271         const struct rte_flow_item_eth *spec = NULL;
272         const struct rte_flow_item_eth *mask = NULL;
273         const struct rte_flow_item_eth supp_mask = {
274                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
275                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
276                 .type = 0xffff,
277         };
278         const struct rte_flow_item_eth ifrm_supp_mask = {
279                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
280         };
281         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
282                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
283         };
284         const struct rte_flow_item_eth *supp_mask_p;
285         const struct rte_flow_item_eth *def_mask_p;
286         uint8_t *loc_mac = NULL;
287         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
288                 EFX_TUNNEL_PROTOCOL_NONE);
289
290         if (is_ifrm) {
291                 supp_mask_p = &ifrm_supp_mask;
292                 def_mask_p = &ifrm_supp_mask;
293                 loc_mac = efx_spec->efs_ifrm_loc_mac;
294         } else {
295                 supp_mask_p = &supp_mask;
296                 def_mask_p = &rte_flow_item_eth_mask;
297                 loc_mac = efx_spec->efs_loc_mac;
298         }
299
300         rc = sfc_flow_parse_init(item,
301                                  (const void **)&spec,
302                                  (const void **)&mask,
303                                  supp_mask_p, def_mask_p,
304                                  sizeof(struct rte_flow_item_eth),
305                                  error);
306         if (rc != 0)
307                 return rc;
308
309         /* If "spec" is not set, could be any Ethernet */
310         if (spec == NULL)
311                 return 0;
312
313         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
314                 efx_spec->efs_match_flags |= is_ifrm ?
315                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
316                         EFX_FILTER_MATCH_LOC_MAC;
317                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
318                            EFX_MAC_ADDR_LEN);
319         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
320                           EFX_MAC_ADDR_LEN) == 0) {
321                 if (rte_is_unicast_ether_addr(&spec->dst))
322                         efx_spec->efs_match_flags |= is_ifrm ?
323                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
324                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
325                 else
326                         efx_spec->efs_match_flags |= is_ifrm ?
327                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
328                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
329         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
330                 goto fail_bad_mask;
331         }
332
333         /*
334          * ifrm_supp_mask ensures that the source address and
335          * ethertype masks are equal to zero in inner frame,
336          * so these fields are filled in only for the outer frame
337          */
338         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
339                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
340                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
341                            EFX_MAC_ADDR_LEN);
342         } else if (!rte_is_zero_ether_addr(&mask->src)) {
343                 goto fail_bad_mask;
344         }
345
346         /*
347          * Ether type is in big-endian byte order in item and
348          * in little-endian in efx_spec, so byte swap is used
349          */
350         if (mask->type == supp_mask.type) {
351                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
352                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
353         } else if (mask->type != 0) {
354                 goto fail_bad_mask;
355         }
356
357         return 0;
358
359 fail_bad_mask:
360         rte_flow_error_set(error, EINVAL,
361                            RTE_FLOW_ERROR_TYPE_ITEM, item,
362                            "Bad mask in the ETH pattern item");
363         return -rte_errno;
364 }
365
366 /**
367  * Convert VLAN item to EFX filter specification.
368  *
369  * @param item[in]
370  *   Item specification. Only VID field is supported.
371  *   The mask can not be NULL. Ranging is not supported.
372  * @param efx_spec[in, out]
373  *   EFX filter specification to update.
374  * @param[out] error
375  *   Perform verbose error reporting if not NULL.
376  */
377 static int
378 sfc_flow_parse_vlan(const struct rte_flow_item *item,
379                     struct sfc_flow_parse_ctx *parse_ctx,
380                     struct rte_flow_error *error)
381 {
382         int rc;
383         uint16_t vid;
384         efx_filter_spec_t *efx_spec = parse_ctx->filter;
385         const struct rte_flow_item_vlan *spec = NULL;
386         const struct rte_flow_item_vlan *mask = NULL;
387         const struct rte_flow_item_vlan supp_mask = {
388                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
389                 .inner_type = RTE_BE16(0xffff),
390         };
391
392         rc = sfc_flow_parse_init(item,
393                                  (const void **)&spec,
394                                  (const void **)&mask,
395                                  &supp_mask,
396                                  NULL,
397                                  sizeof(struct rte_flow_item_vlan),
398                                  error);
399         if (rc != 0)
400                 return rc;
401
402         /*
403          * VID is in big-endian byte order in item and
404          * in little-endian in efx_spec, so byte swap is used.
405          * If two VLAN items are included, the first matches
406          * the outer tag and the next matches the inner tag.
407          */
408         if (mask->tci == supp_mask.tci) {
409                 /* Apply mask to keep VID only */
410                 vid = rte_bswap16(spec->tci & mask->tci);
411
412                 if (!(efx_spec->efs_match_flags &
413                       EFX_FILTER_MATCH_OUTER_VID)) {
414                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
415                         efx_spec->efs_outer_vid = vid;
416                 } else if (!(efx_spec->efs_match_flags &
417                              EFX_FILTER_MATCH_INNER_VID)) {
418                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
419                         efx_spec->efs_inner_vid = vid;
420                 } else {
421                         rte_flow_error_set(error, EINVAL,
422                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
423                                            "More than two VLAN items");
424                         return -rte_errno;
425                 }
426         } else {
427                 rte_flow_error_set(error, EINVAL,
428                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
429                                    "VLAN ID in TCI match is required");
430                 return -rte_errno;
431         }
432
433         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
434                 rte_flow_error_set(error, EINVAL,
435                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
436                                    "VLAN TPID matching is not supported");
437                 return -rte_errno;
438         }
439         if (mask->inner_type == supp_mask.inner_type) {
440                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
441                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
442         } else if (mask->inner_type) {
443                 rte_flow_error_set(error, EINVAL,
444                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
445                                    "Bad mask for VLAN inner_type");
446                 return -rte_errno;
447         }
448
449         return 0;
450 }
451
452 /**
453  * Convert IPv4 item to EFX filter specification.
454  *
455  * @param item[in]
456  *   Item specification. Only source and destination addresses and
457  *   protocol fields are supported. If the mask is NULL, default
458  *   mask will be used. Ranging is not supported.
459  * @param efx_spec[in, out]
460  *   EFX filter specification to update.
461  * @param[out] error
462  *   Perform verbose error reporting if not NULL.
463  */
464 static int
465 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
466                     struct sfc_flow_parse_ctx *parse_ctx,
467                     struct rte_flow_error *error)
468 {
469         int rc;
470         efx_filter_spec_t *efx_spec = parse_ctx->filter;
471         const struct rte_flow_item_ipv4 *spec = NULL;
472         const struct rte_flow_item_ipv4 *mask = NULL;
473         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
474         const struct rte_flow_item_ipv4 supp_mask = {
475                 .hdr = {
476                         .src_addr = 0xffffffff,
477                         .dst_addr = 0xffffffff,
478                         .next_proto_id = 0xff,
479                 }
480         };
481
482         rc = sfc_flow_parse_init(item,
483                                  (const void **)&spec,
484                                  (const void **)&mask,
485                                  &supp_mask,
486                                  &rte_flow_item_ipv4_mask,
487                                  sizeof(struct rte_flow_item_ipv4),
488                                  error);
489         if (rc != 0)
490                 return rc;
491
492         /*
493          * Filtering by IPv4 source and destination addresses requires
494          * the appropriate ETHER_TYPE in hardware filters
495          */
496         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
497                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
498                 efx_spec->efs_ether_type = ether_type_ipv4;
499         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
500                 rte_flow_error_set(error, EINVAL,
501                         RTE_FLOW_ERROR_TYPE_ITEM, item,
502                         "Ethertype in pattern with IPV4 item should be appropriate");
503                 return -rte_errno;
504         }
505
506         if (spec == NULL)
507                 return 0;
508
509         /*
510          * IPv4 addresses are in big-endian byte order in item and in
511          * efx_spec
512          */
513         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
514                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
515                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
516         } else if (mask->hdr.src_addr != 0) {
517                 goto fail_bad_mask;
518         }
519
520         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
521                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
522                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
523         } else if (mask->hdr.dst_addr != 0) {
524                 goto fail_bad_mask;
525         }
526
527         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
528                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
529                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
530         } else if (mask->hdr.next_proto_id != 0) {
531                 goto fail_bad_mask;
532         }
533
534         return 0;
535
536 fail_bad_mask:
537         rte_flow_error_set(error, EINVAL,
538                            RTE_FLOW_ERROR_TYPE_ITEM, item,
539                            "Bad mask in the IPV4 pattern item");
540         return -rte_errno;
541 }
542
543 /**
544  * Convert IPv6 item to EFX filter specification.
545  *
546  * @param item[in]
547  *   Item specification. Only source and destination addresses and
548  *   next header fields are supported. If the mask is NULL, default
549  *   mask will be used. Ranging is not supported.
550  * @param efx_spec[in, out]
551  *   EFX filter specification to update.
552  * @param[out] error
553  *   Perform verbose error reporting if not NULL.
554  */
555 static int
556 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
557                     struct sfc_flow_parse_ctx *parse_ctx,
558                     struct rte_flow_error *error)
559 {
560         int rc;
561         efx_filter_spec_t *efx_spec = parse_ctx->filter;
562         const struct rte_flow_item_ipv6 *spec = NULL;
563         const struct rte_flow_item_ipv6 *mask = NULL;
564         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
565         const struct rte_flow_item_ipv6 supp_mask = {
566                 .hdr = {
567                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
568                                       0xff, 0xff, 0xff, 0xff,
569                                       0xff, 0xff, 0xff, 0xff,
570                                       0xff, 0xff, 0xff, 0xff },
571                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
572                                       0xff, 0xff, 0xff, 0xff,
573                                       0xff, 0xff, 0xff, 0xff,
574                                       0xff, 0xff, 0xff, 0xff },
575                         .proto = 0xff,
576                 }
577         };
578
579         rc = sfc_flow_parse_init(item,
580                                  (const void **)&spec,
581                                  (const void **)&mask,
582                                  &supp_mask,
583                                  &rte_flow_item_ipv6_mask,
584                                  sizeof(struct rte_flow_item_ipv6),
585                                  error);
586         if (rc != 0)
587                 return rc;
588
589         /*
590          * Filtering by IPv6 source and destination addresses requires
591          * the appropriate ETHER_TYPE in hardware filters
592          */
593         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
594                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
595                 efx_spec->efs_ether_type = ether_type_ipv6;
596         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
597                 rte_flow_error_set(error, EINVAL,
598                         RTE_FLOW_ERROR_TYPE_ITEM, item,
599                         "Ethertype in pattern with IPV6 item should be appropriate");
600                 return -rte_errno;
601         }
602
603         if (spec == NULL)
604                 return 0;
605
606         /*
607          * IPv6 addresses are in big-endian byte order in item and in
608          * efx_spec
609          */
610         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
611                    sizeof(mask->hdr.src_addr)) == 0) {
612                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
613
614                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
615                                  sizeof(spec->hdr.src_addr));
616                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
617                            sizeof(efx_spec->efs_rem_host));
618         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
619                                      sizeof(mask->hdr.src_addr))) {
620                 goto fail_bad_mask;
621         }
622
623         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
624                    sizeof(mask->hdr.dst_addr)) == 0) {
625                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
626
627                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
628                                  sizeof(spec->hdr.dst_addr));
629                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
630                            sizeof(efx_spec->efs_loc_host));
631         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
632                                      sizeof(mask->hdr.dst_addr))) {
633                 goto fail_bad_mask;
634         }
635
636         if (mask->hdr.proto == supp_mask.hdr.proto) {
637                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
638                 efx_spec->efs_ip_proto = spec->hdr.proto;
639         } else if (mask->hdr.proto != 0) {
640                 goto fail_bad_mask;
641         }
642
643         return 0;
644
645 fail_bad_mask:
646         rte_flow_error_set(error, EINVAL,
647                            RTE_FLOW_ERROR_TYPE_ITEM, item,
648                            "Bad mask in the IPV6 pattern item");
649         return -rte_errno;
650 }
651
652 /**
653  * Convert TCP item to EFX filter specification.
654  *
655  * @param item[in]
656  *   Item specification. Only source and destination ports fields
657  *   are supported. If the mask is NULL, default mask will be used.
658  *   Ranging is not supported.
659  * @param efx_spec[in, out]
660  *   EFX filter specification to update.
661  * @param[out] error
662  *   Perform verbose error reporting if not NULL.
663  */
664 static int
665 sfc_flow_parse_tcp(const struct rte_flow_item *item,
666                    struct sfc_flow_parse_ctx *parse_ctx,
667                    struct rte_flow_error *error)
668 {
669         int rc;
670         efx_filter_spec_t *efx_spec = parse_ctx->filter;
671         const struct rte_flow_item_tcp *spec = NULL;
672         const struct rte_flow_item_tcp *mask = NULL;
673         const struct rte_flow_item_tcp supp_mask = {
674                 .hdr = {
675                         .src_port = 0xffff,
676                         .dst_port = 0xffff,
677                 }
678         };
679
680         rc = sfc_flow_parse_init(item,
681                                  (const void **)&spec,
682                                  (const void **)&mask,
683                                  &supp_mask,
684                                  &rte_flow_item_tcp_mask,
685                                  sizeof(struct rte_flow_item_tcp),
686                                  error);
687         if (rc != 0)
688                 return rc;
689
690         /*
691          * Filtering by TCP source and destination ports requires
692          * the appropriate IP_PROTO in hardware filters
693          */
694         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
695                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
696                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
697         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
698                 rte_flow_error_set(error, EINVAL,
699                         RTE_FLOW_ERROR_TYPE_ITEM, item,
700                         "IP proto in pattern with TCP item should be appropriate");
701                 return -rte_errno;
702         }
703
704         if (spec == NULL)
705                 return 0;
706
707         /*
708          * Source and destination ports are in big-endian byte order in item and
709          * in little-endian in efx_spec, so byte swap is used
710          */
711         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
712                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
713                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
714         } else if (mask->hdr.src_port != 0) {
715                 goto fail_bad_mask;
716         }
717
718         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
719                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
720                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
721         } else if (mask->hdr.dst_port != 0) {
722                 goto fail_bad_mask;
723         }
724
725         return 0;
726
727 fail_bad_mask:
728         rte_flow_error_set(error, EINVAL,
729                            RTE_FLOW_ERROR_TYPE_ITEM, item,
730                            "Bad mask in the TCP pattern item");
731         return -rte_errno;
732 }
733
734 /**
735  * Convert UDP item to EFX filter specification.
736  *
737  * @param item[in]
738  *   Item specification. Only source and destination ports fields
739  *   are supported. If the mask is NULL, default mask will be used.
740  *   Ranging is not supported.
741  * @param efx_spec[in, out]
742  *   EFX filter specification to update.
743  * @param[out] error
744  *   Perform verbose error reporting if not NULL.
745  */
746 static int
747 sfc_flow_parse_udp(const struct rte_flow_item *item,
748                    struct sfc_flow_parse_ctx *parse_ctx,
749                    struct rte_flow_error *error)
750 {
751         int rc;
752         efx_filter_spec_t *efx_spec = parse_ctx->filter;
753         const struct rte_flow_item_udp *spec = NULL;
754         const struct rte_flow_item_udp *mask = NULL;
755         const struct rte_flow_item_udp supp_mask = {
756                 .hdr = {
757                         .src_port = 0xffff,
758                         .dst_port = 0xffff,
759                 }
760         };
761
762         rc = sfc_flow_parse_init(item,
763                                  (const void **)&spec,
764                                  (const void **)&mask,
765                                  &supp_mask,
766                                  &rte_flow_item_udp_mask,
767                                  sizeof(struct rte_flow_item_udp),
768                                  error);
769         if (rc != 0)
770                 return rc;
771
772         /*
773          * Filtering by UDP source and destination ports requires
774          * the appropriate IP_PROTO in hardware filters
775          */
776         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
777                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
778                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
779         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
780                 rte_flow_error_set(error, EINVAL,
781                         RTE_FLOW_ERROR_TYPE_ITEM, item,
782                         "IP proto in pattern with UDP item should be appropriate");
783                 return -rte_errno;
784         }
785
786         if (spec == NULL)
787                 return 0;
788
789         /*
790          * Source and destination ports are in big-endian byte order in item and
791          * in little-endian in efx_spec, so byte swap is used
792          */
793         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
794                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
795                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
796         } else if (mask->hdr.src_port != 0) {
797                 goto fail_bad_mask;
798         }
799
800         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
801                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
802                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
803         } else if (mask->hdr.dst_port != 0) {
804                 goto fail_bad_mask;
805         }
806
807         return 0;
808
809 fail_bad_mask:
810         rte_flow_error_set(error, EINVAL,
811                            RTE_FLOW_ERROR_TYPE_ITEM, item,
812                            "Bad mask in the UDP pattern item");
813         return -rte_errno;
814 }
815
816 /*
817  * Filters for encapsulated packets match based on the EtherType and IP
818  * protocol in the outer frame.
819  */
820 static int
821 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
822                                         efx_filter_spec_t *efx_spec,
823                                         uint8_t ip_proto,
824                                         struct rte_flow_error *error)
825 {
826         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
827                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
828                 efx_spec->efs_ip_proto = ip_proto;
829         } else if (efx_spec->efs_ip_proto != ip_proto) {
830                 switch (ip_proto) {
831                 case EFX_IPPROTO_UDP:
832                         rte_flow_error_set(error, EINVAL,
833                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
834                                 "Outer IP header protocol must be UDP "
835                                 "in VxLAN/GENEVE pattern");
836                         return -rte_errno;
837
838                 case EFX_IPPROTO_GRE:
839                         rte_flow_error_set(error, EINVAL,
840                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
841                                 "Outer IP header protocol must be GRE "
842                                 "in NVGRE pattern");
843                         return -rte_errno;
844
845                 default:
846                         rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
848                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
849                                 "are supported");
850                         return -rte_errno;
851                 }
852         }
853
854         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
855             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
856             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
857                 rte_flow_error_set(error, EINVAL,
858                         RTE_FLOW_ERROR_TYPE_ITEM, item,
859                         "Outer frame EtherType in pattern with tunneling "
860                         "must be IPv4 or IPv6");
861                 return -rte_errno;
862         }
863
864         return 0;
865 }
866
867 static int
868 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
869                                   const uint8_t *vni_or_vsid_val,
870                                   const uint8_t *vni_or_vsid_mask,
871                                   const struct rte_flow_item *item,
872                                   struct rte_flow_error *error)
873 {
874         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
875                 0xff, 0xff, 0xff
876         };
877
878         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
879                    EFX_VNI_OR_VSID_LEN) == 0) {
880                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
881                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
882                            EFX_VNI_OR_VSID_LEN);
883         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
884                 rte_flow_error_set(error, EINVAL,
885                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
886                                    "Unsupported VNI/VSID mask");
887                 return -rte_errno;
888         }
889
890         return 0;
891 }
892
893 /**
894  * Convert VXLAN item to EFX filter specification.
895  *
896  * @param item[in]
897  *   Item specification. Only VXLAN network identifier field is supported.
898  *   If the mask is NULL, default mask will be used.
899  *   Ranging is not supported.
900  * @param efx_spec[in, out]
901  *   EFX filter specification to update.
902  * @param[out] error
903  *   Perform verbose error reporting if not NULL.
904  */
905 static int
906 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
907                      struct sfc_flow_parse_ctx *parse_ctx,
908                      struct rte_flow_error *error)
909 {
910         int rc;
911         efx_filter_spec_t *efx_spec = parse_ctx->filter;
912         const struct rte_flow_item_vxlan *spec = NULL;
913         const struct rte_flow_item_vxlan *mask = NULL;
914         const struct rte_flow_item_vxlan supp_mask = {
915                 .vni = { 0xff, 0xff, 0xff }
916         };
917
918         rc = sfc_flow_parse_init(item,
919                                  (const void **)&spec,
920                                  (const void **)&mask,
921                                  &supp_mask,
922                                  &rte_flow_item_vxlan_mask,
923                                  sizeof(struct rte_flow_item_vxlan),
924                                  error);
925         if (rc != 0)
926                 return rc;
927
928         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
929                                                      EFX_IPPROTO_UDP, error);
930         if (rc != 0)
931                 return rc;
932
933         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
934         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
935
936         if (spec == NULL)
937                 return 0;
938
939         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
940                                                mask->vni, item, error);
941
942         return rc;
943 }
944
945 /**
946  * Convert GENEVE item to EFX filter specification.
947  *
948  * @param item[in]
949  *   Item specification. Only Virtual Network Identifier and protocol type
950  *   fields are supported. But protocol type can be only Ethernet (0x6558).
951  *   If the mask is NULL, default mask will be used.
952  *   Ranging is not supported.
953  * @param efx_spec[in, out]
954  *   EFX filter specification to update.
955  * @param[out] error
956  *   Perform verbose error reporting if not NULL.
957  */
958 static int
959 sfc_flow_parse_geneve(const struct rte_flow_item *item,
960                       struct sfc_flow_parse_ctx *parse_ctx,
961                       struct rte_flow_error *error)
962 {
963         int rc;
964         efx_filter_spec_t *efx_spec = parse_ctx->filter;
965         const struct rte_flow_item_geneve *spec = NULL;
966         const struct rte_flow_item_geneve *mask = NULL;
967         const struct rte_flow_item_geneve supp_mask = {
968                 .protocol = RTE_BE16(0xffff),
969                 .vni = { 0xff, 0xff, 0xff }
970         };
971
972         rc = sfc_flow_parse_init(item,
973                                  (const void **)&spec,
974                                  (const void **)&mask,
975                                  &supp_mask,
976                                  &rte_flow_item_geneve_mask,
977                                  sizeof(struct rte_flow_item_geneve),
978                                  error);
979         if (rc != 0)
980                 return rc;
981
982         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
983                                                      EFX_IPPROTO_UDP, error);
984         if (rc != 0)
985                 return rc;
986
987         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
988         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
989
990         if (spec == NULL)
991                 return 0;
992
993         if (mask->protocol == supp_mask.protocol) {
994                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
995                         rte_flow_error_set(error, EINVAL,
996                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
997                                 "GENEVE encap. protocol must be Ethernet "
998                                 "(0x6558) in the GENEVE pattern item");
999                         return -rte_errno;
1000                 }
1001         } else if (mask->protocol != 0) {
1002                 rte_flow_error_set(error, EINVAL,
1003                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1004                         "Unsupported mask for GENEVE encap. protocol");
1005                 return -rte_errno;
1006         }
1007
1008         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1009                                                mask->vni, item, error);
1010
1011         return rc;
1012 }
1013
1014 /**
1015  * Convert NVGRE item to EFX filter specification.
1016  *
1017  * @param item[in]
1018  *   Item specification. Only virtual subnet ID field is supported.
1019  *   If the mask is NULL, default mask will be used.
1020  *   Ranging is not supported.
1021  * @param efx_spec[in, out]
1022  *   EFX filter specification to update.
1023  * @param[out] error
1024  *   Perform verbose error reporting if not NULL.
1025  */
1026 static int
1027 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1028                      struct sfc_flow_parse_ctx *parse_ctx,
1029                      struct rte_flow_error *error)
1030 {
1031         int rc;
1032         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1033         const struct rte_flow_item_nvgre *spec = NULL;
1034         const struct rte_flow_item_nvgre *mask = NULL;
1035         const struct rte_flow_item_nvgre supp_mask = {
1036                 .tni = { 0xff, 0xff, 0xff }
1037         };
1038
1039         rc = sfc_flow_parse_init(item,
1040                                  (const void **)&spec,
1041                                  (const void **)&mask,
1042                                  &supp_mask,
1043                                  &rte_flow_item_nvgre_mask,
1044                                  sizeof(struct rte_flow_item_nvgre),
1045                                  error);
1046         if (rc != 0)
1047                 return rc;
1048
1049         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1050                                                      EFX_IPPROTO_GRE, error);
1051         if (rc != 0)
1052                 return rc;
1053
1054         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1055         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1056
1057         if (spec == NULL)
1058                 return 0;
1059
1060         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1061                                                mask->tni, item, error);
1062
1063         return rc;
1064 }
1065
1066 static const struct sfc_flow_item sfc_flow_items[] = {
1067         {
1068                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1069                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1070                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1071                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1072                 .parse = sfc_flow_parse_void,
1073         },
1074         {
1075                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1076                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1077                 .layer = SFC_FLOW_ITEM_L2,
1078                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1079                 .parse = sfc_flow_parse_eth,
1080         },
1081         {
1082                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1083                 .prev_layer = SFC_FLOW_ITEM_L2,
1084                 .layer = SFC_FLOW_ITEM_L2,
1085                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1086                 .parse = sfc_flow_parse_vlan,
1087         },
1088         {
1089                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1090                 .prev_layer = SFC_FLOW_ITEM_L2,
1091                 .layer = SFC_FLOW_ITEM_L3,
1092                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1093                 .parse = sfc_flow_parse_ipv4,
1094         },
1095         {
1096                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1097                 .prev_layer = SFC_FLOW_ITEM_L2,
1098                 .layer = SFC_FLOW_ITEM_L3,
1099                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1100                 .parse = sfc_flow_parse_ipv6,
1101         },
1102         {
1103                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1104                 .prev_layer = SFC_FLOW_ITEM_L3,
1105                 .layer = SFC_FLOW_ITEM_L4,
1106                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1107                 .parse = sfc_flow_parse_tcp,
1108         },
1109         {
1110                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1111                 .prev_layer = SFC_FLOW_ITEM_L3,
1112                 .layer = SFC_FLOW_ITEM_L4,
1113                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1114                 .parse = sfc_flow_parse_udp,
1115         },
1116         {
1117                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1118                 .prev_layer = SFC_FLOW_ITEM_L4,
1119                 .layer = SFC_FLOW_ITEM_START_LAYER,
1120                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1121                 .parse = sfc_flow_parse_vxlan,
1122         },
1123         {
1124                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1125                 .prev_layer = SFC_FLOW_ITEM_L4,
1126                 .layer = SFC_FLOW_ITEM_START_LAYER,
1127                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1128                 .parse = sfc_flow_parse_geneve,
1129         },
1130         {
1131                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1132                 .prev_layer = SFC_FLOW_ITEM_L3,
1133                 .layer = SFC_FLOW_ITEM_START_LAYER,
1134                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1135                 .parse = sfc_flow_parse_nvgre,
1136         },
1137 };
1138
1139 /*
1140  * Protocol-independent flow API support
1141  */
1142 static int
1143 sfc_flow_parse_attr(struct sfc_adapter *sa,
1144                     const struct rte_flow_attr *attr,
1145                     struct rte_flow *flow,
1146                     struct rte_flow_error *error)
1147 {
1148         struct sfc_flow_spec *spec = &flow->spec;
1149         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1150         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1151         struct sfc_mae *mae = &sa->mae;
1152
1153         if (attr == NULL) {
1154                 rte_flow_error_set(error, EINVAL,
1155                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1156                                    "NULL attribute");
1157                 return -rte_errno;
1158         }
1159         if (attr->group != 0) {
1160                 rte_flow_error_set(error, ENOTSUP,
1161                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1162                                    "Groups are not supported");
1163                 return -rte_errno;
1164         }
1165         if (attr->egress != 0) {
1166                 rte_flow_error_set(error, ENOTSUP,
1167                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1168                                    "Egress is not supported");
1169                 return -rte_errno;
1170         }
1171         if (attr->ingress == 0) {
1172                 rte_flow_error_set(error, ENOTSUP,
1173                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1174                                    "Ingress is compulsory");
1175                 return -rte_errno;
1176         }
1177         if (attr->transfer == 0) {
1178                 if (attr->priority != 0) {
1179                         rte_flow_error_set(error, ENOTSUP,
1180                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1181                                            attr, "Priorities are unsupported");
1182                         return -rte_errno;
1183                 }
1184                 spec->type = SFC_FLOW_SPEC_FILTER;
1185                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1186                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1187                 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1188         } else {
1189                 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
1190                         rte_flow_error_set(error, ENOTSUP,
1191                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1192                                            attr, "Transfer is not supported");
1193                         return -rte_errno;
1194                 }
1195                 if (attr->priority > mae->nb_action_rule_prios_max) {
1196                         rte_flow_error_set(error, ENOTSUP,
1197                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1198                                            attr, "Unsupported priority level");
1199                         return -rte_errno;
1200                 }
1201                 spec->type = SFC_FLOW_SPEC_MAE;
1202                 spec_mae->priority = attr->priority;
1203                 spec_mae->match_spec = NULL;
1204                 spec_mae->action_set = NULL;
1205                 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1206         }
1207
1208         return 0;
1209 }
1210
1211 /* Get item from array sfc_flow_items */
1212 static const struct sfc_flow_item *
1213 sfc_flow_get_item(const struct sfc_flow_item *items,
1214                   unsigned int nb_items,
1215                   enum rte_flow_item_type type)
1216 {
1217         unsigned int i;
1218
1219         for (i = 0; i < nb_items; i++)
1220                 if (items[i].type == type)
1221                         return &items[i];
1222
1223         return NULL;
1224 }
1225
1226 int
1227 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1228                        unsigned int nb_flow_items,
1229                        const struct rte_flow_item pattern[],
1230                        struct sfc_flow_parse_ctx *parse_ctx,
1231                        struct rte_flow_error *error)
1232 {
1233         int rc;
1234         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1235         boolean_t is_ifrm = B_FALSE;
1236         const struct sfc_flow_item *item;
1237
1238         if (pattern == NULL) {
1239                 rte_flow_error_set(error, EINVAL,
1240                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1241                                    "NULL pattern");
1242                 return -rte_errno;
1243         }
1244
1245         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1246                 item = sfc_flow_get_item(flow_items, nb_flow_items,
1247                                          pattern->type);
1248                 if (item == NULL) {
1249                         rte_flow_error_set(error, ENOTSUP,
1250                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1251                                            "Unsupported pattern item");
1252                         return -rte_errno;
1253                 }
1254
1255                 /*
1256                  * Omitting one or several protocol layers at the beginning
1257                  * of pattern is supported
1258                  */
1259                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1260                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1261                     item->prev_layer != prev_layer) {
1262                         rte_flow_error_set(error, ENOTSUP,
1263                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1264                                            "Unexpected sequence of pattern items");
1265                         return -rte_errno;
1266                 }
1267
1268                 /*
1269                  * Allow only VOID and ETH pattern items in the inner frame.
1270                  * Also check that there is only one tunneling protocol.
1271                  */
1272                 switch (item->type) {
1273                 case RTE_FLOW_ITEM_TYPE_VOID:
1274                 case RTE_FLOW_ITEM_TYPE_ETH:
1275                         break;
1276
1277                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1278                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1279                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1280                         if (is_ifrm) {
1281                                 rte_flow_error_set(error, EINVAL,
1282                                         RTE_FLOW_ERROR_TYPE_ITEM,
1283                                         pattern,
1284                                         "More than one tunneling protocol");
1285                                 return -rte_errno;
1286                         }
1287                         is_ifrm = B_TRUE;
1288                         break;
1289
1290                 default:
1291                         if (is_ifrm) {
1292                                 rte_flow_error_set(error, EINVAL,
1293                                         RTE_FLOW_ERROR_TYPE_ITEM,
1294                                         pattern,
1295                                         "There is an unsupported pattern item "
1296                                         "in the inner frame");
1297                                 return -rte_errno;
1298                         }
1299                         break;
1300                 }
1301
1302                 if (parse_ctx->type != item->ctx_type) {
1303                         rte_flow_error_set(error, EINVAL,
1304                                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1305                                         "Parse context type mismatch");
1306                         return -rte_errno;
1307                 }
1308
1309                 rc = item->parse(pattern, parse_ctx, error);
1310                 if (rc != 0)
1311                         return rc;
1312
1313                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1314                         prev_layer = item->layer;
1315         }
1316
1317         return 0;
1318 }
1319
1320 static int
1321 sfc_flow_parse_queue(struct sfc_adapter *sa,
1322                      const struct rte_flow_action_queue *queue,
1323                      struct rte_flow *flow)
1324 {
1325         struct sfc_flow_spec *spec = &flow->spec;
1326         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1327         struct sfc_rxq *rxq;
1328         struct sfc_rxq_info *rxq_info;
1329
1330         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1331                 return -EINVAL;
1332
1333         rxq = &sa->rxq_ctrl[queue->index];
1334         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1335
1336         rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1337         spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1338                                             SFC_RXQ_FLAG_RSS_HASH);
1339
1340         return 0;
1341 }
1342
1343 static int
1344 sfc_flow_parse_rss(struct sfc_adapter *sa,
1345                    const struct rte_flow_action_rss *action_rss,
1346                    struct rte_flow *flow)
1347 {
1348         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1349         struct sfc_rss *rss = &sas->rss;
1350         unsigned int rxq_sw_index;
1351         struct sfc_rxq *rxq;
1352         unsigned int rxq_hw_index_min;
1353         unsigned int rxq_hw_index_max;
1354         efx_rx_hash_type_t efx_hash_types;
1355         const uint8_t *rss_key;
1356         struct sfc_flow_spec *spec = &flow->spec;
1357         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1358         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1359         unsigned int i;
1360
1361         if (action_rss->queue_num == 0)
1362                 return -EINVAL;
1363
1364         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1365         rxq = &sa->rxq_ctrl[rxq_sw_index];
1366         rxq_hw_index_min = rxq->hw_index;
1367         rxq_hw_index_max = 0;
1368
1369         for (i = 0; i < action_rss->queue_num; ++i) {
1370                 rxq_sw_index = action_rss->queue[i];
1371
1372                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1373                         return -EINVAL;
1374
1375                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1376
1377                 if (rxq->hw_index < rxq_hw_index_min)
1378                         rxq_hw_index_min = rxq->hw_index;
1379
1380                 if (rxq->hw_index > rxq_hw_index_max)
1381                         rxq_hw_index_max = rxq->hw_index;
1382         }
1383
1384         switch (action_rss->func) {
1385         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1386         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1387                 break;
1388         default:
1389                 return -EINVAL;
1390         }
1391
1392         if (action_rss->level)
1393                 return -EINVAL;
1394
1395         /*
1396          * Dummy RSS action with only one queue and no specific settings
1397          * for hash types and key does not require dedicated RSS context
1398          * and may be simplified to single queue action.
1399          */
1400         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1401             action_rss->key_len == 0) {
1402                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1403                 return 0;
1404         }
1405
1406         if (action_rss->types) {
1407                 int rc;
1408
1409                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1410                                           &efx_hash_types);
1411                 if (rc != 0)
1412                         return -rc;
1413         } else {
1414                 unsigned int i;
1415
1416                 efx_hash_types = 0;
1417                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1418                         efx_hash_types |= rss->hf_map[i].efx;
1419         }
1420
1421         if (action_rss->key_len) {
1422                 if (action_rss->key_len != sizeof(rss->key))
1423                         return -EINVAL;
1424
1425                 rss_key = action_rss->key;
1426         } else {
1427                 rss_key = rss->key;
1428         }
1429
1430         spec_filter->rss = B_TRUE;
1431
1432         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1433         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1434         sfc_rss_conf->rss_hash_types = efx_hash_types;
1435         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1436
1437         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1438                 unsigned int nb_queues = action_rss->queue_num;
1439                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1440                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1441
1442                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1443         }
1444
1445         return 0;
1446 }
1447
1448 static int
1449 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1450                     unsigned int filters_count)
1451 {
1452         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1453         unsigned int i;
1454         int ret = 0;
1455
1456         for (i = 0; i < filters_count; i++) {
1457                 int rc;
1458
1459                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1460                 if (ret == 0 && rc != 0) {
1461                         sfc_err(sa, "failed to remove filter specification "
1462                                 "(rc = %d)", rc);
1463                         ret = rc;
1464                 }
1465         }
1466
1467         return ret;
1468 }
1469
1470 static int
1471 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1472 {
1473         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1474         unsigned int i;
1475         int rc = 0;
1476
1477         for (i = 0; i < spec_filter->count; i++) {
1478                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1479                 if (rc != 0) {
1480                         sfc_flow_spec_flush(sa, spec, i);
1481                         break;
1482                 }
1483         }
1484
1485         return rc;
1486 }
1487
1488 static int
1489 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1490 {
1491         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1492
1493         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1494 }
1495
1496 static int
1497 sfc_flow_filter_insert(struct sfc_adapter *sa,
1498                        struct rte_flow *flow)
1499 {
1500         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1501         struct sfc_rss *rss = &sas->rss;
1502         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1503         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1504         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1505         boolean_t create_context;
1506         unsigned int i;
1507         int rc = 0;
1508
1509         create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1510                         rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1511
1512         if (create_context) {
1513                 unsigned int rss_spread;
1514                 unsigned int rss_hash_types;
1515                 uint8_t *rss_key;
1516
1517                 if (spec_filter->rss) {
1518                         rss_spread = MIN(flow_rss->rxq_hw_index_max -
1519                                         flow_rss->rxq_hw_index_min + 1,
1520                                         EFX_MAXRSS);
1521                         rss_hash_types = flow_rss->rss_hash_types;
1522                         rss_key = flow_rss->rss_key;
1523                 } else {
1524                         /*
1525                          * Initialize dummy RSS context parameters to have
1526                          * valid RSS hash. Use default RSS hash function and
1527                          * key.
1528                          */
1529                         rss_spread = 1;
1530                         rss_hash_types = rss->hash_types;
1531                         rss_key = rss->key;
1532                 }
1533
1534                 rc = efx_rx_scale_context_alloc(sa->nic,
1535                                                 EFX_RX_SCALE_EXCLUSIVE,
1536                                                 rss_spread,
1537                                                 &efs_rss_context);
1538                 if (rc != 0)
1539                         goto fail_scale_context_alloc;
1540
1541                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1542                                            rss->hash_alg,
1543                                            rss_hash_types, B_TRUE);
1544                 if (rc != 0)
1545                         goto fail_scale_mode_set;
1546
1547                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1548                                           rss_key, sizeof(rss->key));
1549                 if (rc != 0)
1550                         goto fail_scale_key_set;
1551         } else {
1552                 efs_rss_context = rss->dummy_rss_context;
1553         }
1554
1555         if (spec_filter->rss || spec_filter->rss_hash_required) {
1556                 /*
1557                  * At this point, fully elaborated filter specifications
1558                  * have been produced from the template. To make sure that
1559                  * RSS behaviour is consistent between them, set the same
1560                  * RSS context value everywhere.
1561                  */
1562                 for (i = 0; i < spec_filter->count; i++) {
1563                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1564
1565                         spec->efs_rss_context = efs_rss_context;
1566                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1567                         if (spec_filter->rss)
1568                                 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1569                 }
1570         }
1571
1572         rc = sfc_flow_spec_insert(sa, &flow->spec);
1573         if (rc != 0)
1574                 goto fail_filter_insert;
1575
1576         if (create_context) {
1577                 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1578                 unsigned int *tbl;
1579
1580                 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1581
1582                 /*
1583                  * Scale table is set after filter insertion because
1584                  * the table entries are relative to the base RxQ ID
1585                  * and the latter is submitted to the HW by means of
1586                  * inserting a filter, so by the time of the request
1587                  * the HW knows all the information needed to verify
1588                  * the table entries, and the operation will succeed
1589                  */
1590                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1591                                           tbl, RTE_DIM(flow_rss->rss_tbl));
1592                 if (rc != 0)
1593                         goto fail_scale_tbl_set;
1594
1595                 /* Remember created dummy RSS context */
1596                 if (!spec_filter->rss)
1597                         rss->dummy_rss_context = efs_rss_context;
1598         }
1599
1600         return 0;
1601
1602 fail_scale_tbl_set:
1603         sfc_flow_spec_remove(sa, &flow->spec);
1604
1605 fail_filter_insert:
1606 fail_scale_key_set:
1607 fail_scale_mode_set:
1608         if (create_context)
1609                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1610
1611 fail_scale_context_alloc:
1612         return rc;
1613 }
1614
1615 static int
1616 sfc_flow_filter_remove(struct sfc_adapter *sa,
1617                        struct rte_flow *flow)
1618 {
1619         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1620         int rc = 0;
1621
1622         rc = sfc_flow_spec_remove(sa, &flow->spec);
1623         if (rc != 0)
1624                 return rc;
1625
1626         if (spec_filter->rss) {
1627                 /*
1628                  * All specifications for a given flow rule have the same RSS
1629                  * context, so that RSS context value is taken from the first
1630                  * filter specification
1631                  */
1632                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1633
1634                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1635         }
1636
1637         return rc;
1638 }
1639
1640 static int
1641 sfc_flow_parse_mark(struct sfc_adapter *sa,
1642                     const struct rte_flow_action_mark *mark,
1643                     struct rte_flow *flow)
1644 {
1645         struct sfc_flow_spec *spec = &flow->spec;
1646         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1647         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1648
1649         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1650                 return EINVAL;
1651
1652         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1653         spec_filter->template.efs_mark = mark->id;
1654
1655         return 0;
1656 }
1657
1658 static int
1659 sfc_flow_parse_actions(struct sfc_adapter *sa,
1660                        const struct rte_flow_action actions[],
1661                        struct rte_flow *flow,
1662                        struct rte_flow_error *error)
1663 {
1664         int rc;
1665         struct sfc_flow_spec *spec = &flow->spec;
1666         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1667         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1668         uint32_t actions_set = 0;
1669         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1670                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1671                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1672         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1673                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1674
1675         if (actions == NULL) {
1676                 rte_flow_error_set(error, EINVAL,
1677                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1678                                    "NULL actions");
1679                 return -rte_errno;
1680         }
1681
1682 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1683         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1684
1685         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1686                 switch (actions->type) {
1687                 case RTE_FLOW_ACTION_TYPE_VOID:
1688                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1689                                                actions_set);
1690                         break;
1691
1692                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1693                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1694                                                actions_set);
1695                         if ((actions_set & fate_actions_mask) != 0)
1696                                 goto fail_fate_actions;
1697
1698                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1699                         if (rc != 0) {
1700                                 rte_flow_error_set(error, EINVAL,
1701                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1702                                         "Bad QUEUE action");
1703                                 return -rte_errno;
1704                         }
1705                         break;
1706
1707                 case RTE_FLOW_ACTION_TYPE_RSS:
1708                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1709                                                actions_set);
1710                         if ((actions_set & fate_actions_mask) != 0)
1711                                 goto fail_fate_actions;
1712
1713                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1714                         if (rc != 0) {
1715                                 rte_flow_error_set(error, -rc,
1716                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1717                                         "Bad RSS action");
1718                                 return -rte_errno;
1719                         }
1720                         break;
1721
1722                 case RTE_FLOW_ACTION_TYPE_DROP:
1723                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1724                                                actions_set);
1725                         if ((actions_set & fate_actions_mask) != 0)
1726                                 goto fail_fate_actions;
1727
1728                         spec_filter->template.efs_dmaq_id =
1729                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1730                         break;
1731
1732                 case RTE_FLOW_ACTION_TYPE_FLAG:
1733                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1734                                                actions_set);
1735                         if ((actions_set & mark_actions_mask) != 0)
1736                                 goto fail_actions_overlap;
1737
1738                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1739                                 rte_flow_error_set(error, ENOTSUP,
1740                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1741                                         "FLAG action is not supported on the current Rx datapath");
1742                                 return -rte_errno;
1743                         }
1744
1745                         spec_filter->template.efs_flags |=
1746                                 EFX_FILTER_FLAG_ACTION_FLAG;
1747                         break;
1748
1749                 case RTE_FLOW_ACTION_TYPE_MARK:
1750                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1751                                                actions_set);
1752                         if ((actions_set & mark_actions_mask) != 0)
1753                                 goto fail_actions_overlap;
1754
1755                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1756                                 rte_flow_error_set(error, ENOTSUP,
1757                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1758                                         "MARK action is not supported on the current Rx datapath");
1759                                 return -rte_errno;
1760                         }
1761
1762                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1763                         if (rc != 0) {
1764                                 rte_flow_error_set(error, rc,
1765                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1766                                         "Bad MARK action");
1767                                 return -rte_errno;
1768                         }
1769                         break;
1770
1771                 default:
1772                         rte_flow_error_set(error, ENOTSUP,
1773                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1774                                            "Action is not supported");
1775                         return -rte_errno;
1776                 }
1777
1778                 actions_set |= (1UL << actions->type);
1779         }
1780 #undef SFC_BUILD_SET_OVERFLOW
1781
1782         /* When fate is unknown, drop traffic. */
1783         if ((actions_set & fate_actions_mask) == 0) {
1784                 spec_filter->template.efs_dmaq_id =
1785                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1786         }
1787
1788         return 0;
1789
1790 fail_fate_actions:
1791         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1792                            "Cannot combine several fate-deciding actions, "
1793                            "choose between QUEUE, RSS or DROP");
1794         return -rte_errno;
1795
1796 fail_actions_overlap:
1797         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1798                            "Overlapping actions are not supported");
1799         return -rte_errno;
1800 }
1801
1802 /**
1803  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1804  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1805  * specifications after copying.
1806  *
1807  * @param spec[in, out]
1808  *   SFC flow specification to update.
1809  * @param filters_count_for_one_val[in]
1810  *   How many specifications should have the same match flag, what is the
1811  *   number of specifications before copying.
1812  * @param error[out]
1813  *   Perform verbose error reporting if not NULL.
1814  */
1815 static int
1816 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1817                                unsigned int filters_count_for_one_val,
1818                                struct rte_flow_error *error)
1819 {
1820         unsigned int i;
1821         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1822         static const efx_filter_match_flags_t vals[] = {
1823                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1824                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1825         };
1826
1827         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1828                 rte_flow_error_set(error, EINVAL,
1829                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1830                         "Number of specifications is incorrect while copying "
1831                         "by unknown destination flags");
1832                 return -rte_errno;
1833         }
1834
1835         for (i = 0; i < spec_filter->count; i++) {
1836                 /* The check above ensures that divisor can't be zero here */
1837                 spec_filter->filters[i].efs_match_flags |=
1838                         vals[i / filters_count_for_one_val];
1839         }
1840
1841         return 0;
1842 }
1843
1844 /**
1845  * Check that the following conditions are met:
1846  * - the list of supported filters has a filter
1847  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1848  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1849  *   be inserted.
1850  *
1851  * @param match[in]
1852  *   The match flags of filter.
1853  * @param spec[in]
1854  *   Specification to be supplemented.
1855  * @param filter[in]
1856  *   SFC filter with list of supported filters.
1857  */
1858 static boolean_t
1859 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1860                                  __rte_unused efx_filter_spec_t *spec,
1861                                  struct sfc_filter *filter)
1862 {
1863         unsigned int i;
1864         efx_filter_match_flags_t match_mcast_dst;
1865
1866         match_mcast_dst =
1867                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1868                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1869         for (i = 0; i < filter->supported_match_num; i++) {
1870                 if (match_mcast_dst == filter->supported_match[i])
1871                         return B_TRUE;
1872         }
1873
1874         return B_FALSE;
1875 }
1876
1877 /**
1878  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1879  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1880  * specifications after copying.
1881  *
1882  * @param spec[in, out]
1883  *   SFC flow specification to update.
1884  * @param filters_count_for_one_val[in]
1885  *   How many specifications should have the same EtherType value, what is the
1886  *   number of specifications before copying.
1887  * @param error[out]
1888  *   Perform verbose error reporting if not NULL.
1889  */
1890 static int
1891 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1892                         unsigned int filters_count_for_one_val,
1893                         struct rte_flow_error *error)
1894 {
1895         unsigned int i;
1896         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1897         static const uint16_t vals[] = {
1898                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1899         };
1900
1901         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1902                 rte_flow_error_set(error, EINVAL,
1903                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1904                         "Number of specifications is incorrect "
1905                         "while copying by Ethertype");
1906                 return -rte_errno;
1907         }
1908
1909         for (i = 0; i < spec_filter->count; i++) {
1910                 spec_filter->filters[i].efs_match_flags |=
1911                         EFX_FILTER_MATCH_ETHER_TYPE;
1912
1913                 /*
1914                  * The check above ensures that
1915                  * filters_count_for_one_val is not 0
1916                  */
1917                 spec_filter->filters[i].efs_ether_type =
1918                         vals[i / filters_count_for_one_val];
1919         }
1920
1921         return 0;
1922 }
1923
1924 /**
1925  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1926  * in the same specifications after copying.
1927  *
1928  * @param spec[in, out]
1929  *   SFC flow specification to update.
1930  * @param filters_count_for_one_val[in]
1931  *   How many specifications should have the same match flag, what is the
1932  *   number of specifications before copying.
1933  * @param error[out]
1934  *   Perform verbose error reporting if not NULL.
1935  */
1936 static int
1937 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1938                             unsigned int filters_count_for_one_val,
1939                             struct rte_flow_error *error)
1940 {
1941         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1942         unsigned int i;
1943
1944         if (filters_count_for_one_val != spec_filter->count) {
1945                 rte_flow_error_set(error, EINVAL,
1946                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1947                         "Number of specifications is incorrect "
1948                         "while copying by outer VLAN ID");
1949                 return -rte_errno;
1950         }
1951
1952         for (i = 0; i < spec_filter->count; i++) {
1953                 spec_filter->filters[i].efs_match_flags |=
1954                         EFX_FILTER_MATCH_OUTER_VID;
1955
1956                 spec_filter->filters[i].efs_outer_vid = 0;
1957         }
1958
1959         return 0;
1960 }
1961
1962 /**
1963  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1964  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1965  * specifications after copying.
1966  *
1967  * @param spec[in, out]
1968  *   SFC flow specification to update.
1969  * @param filters_count_for_one_val[in]
1970  *   How many specifications should have the same match flag, what is the
1971  *   number of specifications before copying.
1972  * @param error[out]
1973  *   Perform verbose error reporting if not NULL.
1974  */
1975 static int
1976 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1977                                     unsigned int filters_count_for_one_val,
1978                                     struct rte_flow_error *error)
1979 {
1980         unsigned int i;
1981         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1982         static const efx_filter_match_flags_t vals[] = {
1983                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1984                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1985         };
1986
1987         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1988                 rte_flow_error_set(error, EINVAL,
1989                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1990                         "Number of specifications is incorrect while copying "
1991                         "by inner frame unknown destination flags");
1992                 return -rte_errno;
1993         }
1994
1995         for (i = 0; i < spec_filter->count; i++) {
1996                 /* The check above ensures that divisor can't be zero here */
1997                 spec_filter->filters[i].efs_match_flags |=
1998                         vals[i / filters_count_for_one_val];
1999         }
2000
2001         return 0;
2002 }
2003
2004 /**
2005  * Check that the following conditions are met:
2006  * - the specification corresponds to a filter for encapsulated traffic
2007  * - the list of supported filters has a filter
2008  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2009  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2010  *   be inserted.
2011  *
2012  * @param match[in]
2013  *   The match flags of filter.
2014  * @param spec[in]
2015  *   Specification to be supplemented.
2016  * @param filter[in]
2017  *   SFC filter with list of supported filters.
2018  */
2019 static boolean_t
2020 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2021                                       efx_filter_spec_t *spec,
2022                                       struct sfc_filter *filter)
2023 {
2024         unsigned int i;
2025         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2026         efx_filter_match_flags_t match_mcast_dst;
2027
2028         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2029                 return B_FALSE;
2030
2031         match_mcast_dst =
2032                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2033                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2034         for (i = 0; i < filter->supported_match_num; i++) {
2035                 if (match_mcast_dst == filter->supported_match[i])
2036                         return B_TRUE;
2037         }
2038
2039         return B_FALSE;
2040 }
2041
2042 /**
2043  * Check that the list of supported filters has a filter that differs
2044  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2045  * in this case that filter will be used and the flag
2046  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2047  *
2048  * @param match[in]
2049  *   The match flags of filter.
2050  * @param spec[in]
2051  *   Specification to be supplemented.
2052  * @param filter[in]
2053  *   SFC filter with list of supported filters.
2054  */
2055 static boolean_t
2056 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2057                               __rte_unused efx_filter_spec_t *spec,
2058                               struct sfc_filter *filter)
2059 {
2060         unsigned int i;
2061         efx_filter_match_flags_t match_without_vid =
2062                 match & ~EFX_FILTER_MATCH_OUTER_VID;
2063
2064         for (i = 0; i < filter->supported_match_num; i++) {
2065                 if (match_without_vid == filter->supported_match[i])
2066                         return B_FALSE;
2067         }
2068
2069         return B_TRUE;
2070 }
2071
2072 /*
2073  * Match flags that can be automatically added to filters.
2074  * Selecting the last minimum when searching for the copy flag ensures that the
2075  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2076  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2077  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2078  * filters.
2079  */
2080 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2081         {
2082                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2083                 .vals_count = 2,
2084                 .set_vals = sfc_flow_set_unknown_dst_flags,
2085                 .spec_check = sfc_flow_check_unknown_dst_flags,
2086         },
2087         {
2088                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2089                 .vals_count = 2,
2090                 .set_vals = sfc_flow_set_ethertypes,
2091                 .spec_check = NULL,
2092         },
2093         {
2094                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2095                 .vals_count = 2,
2096                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2097                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2098         },
2099         {
2100                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2101                 .vals_count = 1,
2102                 .set_vals = sfc_flow_set_outer_vid_flag,
2103                 .spec_check = sfc_flow_check_outer_vid_flag,
2104         },
2105 };
2106
2107 /* Get item from array sfc_flow_copy_flags */
2108 static const struct sfc_flow_copy_flag *
2109 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2110 {
2111         unsigned int i;
2112
2113         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2114                 if (sfc_flow_copy_flags[i].flag == flag)
2115                         return &sfc_flow_copy_flags[i];
2116         }
2117
2118         return NULL;
2119 }
2120
2121 /**
2122  * Make copies of the specifications, set match flag and values
2123  * of the field that corresponds to it.
2124  *
2125  * @param spec[in, out]
2126  *   SFC flow specification to update.
2127  * @param flag[in]
2128  *   The match flag to add.
2129  * @param error[out]
2130  *   Perform verbose error reporting if not NULL.
2131  */
2132 static int
2133 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2134                              efx_filter_match_flags_t flag,
2135                              struct rte_flow_error *error)
2136 {
2137         unsigned int i;
2138         unsigned int new_filters_count;
2139         unsigned int filters_count_for_one_val;
2140         const struct sfc_flow_copy_flag *copy_flag;
2141         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2142         int rc;
2143
2144         copy_flag = sfc_flow_get_copy_flag(flag);
2145         if (copy_flag == NULL) {
2146                 rte_flow_error_set(error, ENOTSUP,
2147                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2148                                    "Unsupported spec field for copying");
2149                 return -rte_errno;
2150         }
2151
2152         new_filters_count = spec_filter->count * copy_flag->vals_count;
2153         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2154                 rte_flow_error_set(error, EINVAL,
2155                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2156                         "Too much EFX specifications in the flow rule");
2157                 return -rte_errno;
2158         }
2159
2160         /* Copy filters specifications */
2161         for (i = spec_filter->count; i < new_filters_count; i++) {
2162                 spec_filter->filters[i] =
2163                         spec_filter->filters[i - spec_filter->count];
2164         }
2165
2166         filters_count_for_one_val = spec_filter->count;
2167         spec_filter->count = new_filters_count;
2168
2169         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2170         if (rc != 0)
2171                 return rc;
2172
2173         return 0;
2174 }
2175
2176 /**
2177  * Check that the given set of match flags missing in the original filter spec
2178  * could be covered by adding spec copies which specify the corresponding
2179  * flags and packet field values to match.
2180  *
2181  * @param miss_flags[in]
2182  *   Flags that are missing until the supported filter.
2183  * @param spec[in]
2184  *   Specification to be supplemented.
2185  * @param filter[in]
2186  *   SFC filter.
2187  *
2188  * @return
2189  *   Number of specifications after copy or 0, if the flags can not be added.
2190  */
2191 static unsigned int
2192 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2193                              efx_filter_spec_t *spec,
2194                              struct sfc_filter *filter)
2195 {
2196         unsigned int i;
2197         efx_filter_match_flags_t copy_flags = 0;
2198         efx_filter_match_flags_t flag;
2199         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2200         sfc_flow_spec_check *check;
2201         unsigned int multiplier = 1;
2202
2203         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2204                 flag = sfc_flow_copy_flags[i].flag;
2205                 check = sfc_flow_copy_flags[i].spec_check;
2206                 if ((flag & miss_flags) == flag) {
2207                         if (check != NULL && (!check(match, spec, filter)))
2208                                 continue;
2209
2210                         copy_flags |= flag;
2211                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2212                 }
2213         }
2214
2215         if (copy_flags == miss_flags)
2216                 return multiplier;
2217
2218         return 0;
2219 }
2220
2221 /**
2222  * Attempt to supplement the specification template to the minimally
2223  * supported set of match flags. To do this, it is necessary to copy
2224  * the specifications, filling them with the values of fields that
2225  * correspond to the missing flags.
2226  * The necessary and sufficient filter is built from the fewest number
2227  * of copies which could be made to cover the minimally required set
2228  * of flags.
2229  *
2230  * @param sa[in]
2231  *   SFC adapter.
2232  * @param spec[in, out]
2233  *   SFC flow specification to update.
2234  * @param error[out]
2235  *   Perform verbose error reporting if not NULL.
2236  */
2237 static int
2238 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2239                                struct sfc_flow_spec *spec,
2240                                struct rte_flow_error *error)
2241 {
2242         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2243         struct sfc_filter *filter = &sa->filter;
2244         efx_filter_match_flags_t miss_flags;
2245         efx_filter_match_flags_t min_miss_flags = 0;
2246         efx_filter_match_flags_t match;
2247         unsigned int min_multiplier = UINT_MAX;
2248         unsigned int multiplier;
2249         unsigned int i;
2250         int rc;
2251
2252         match = spec_filter->template.efs_match_flags;
2253         for (i = 0; i < filter->supported_match_num; i++) {
2254                 if ((match & filter->supported_match[i]) == match) {
2255                         miss_flags = filter->supported_match[i] & (~match);
2256                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2257                                 &spec_filter->template, filter);
2258                         if (multiplier > 0) {
2259                                 if (multiplier <= min_multiplier) {
2260                                         min_multiplier = multiplier;
2261                                         min_miss_flags = miss_flags;
2262                                 }
2263                         }
2264                 }
2265         }
2266
2267         if (min_multiplier == UINT_MAX) {
2268                 rte_flow_error_set(error, ENOTSUP,
2269                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2270                                    "The flow rule pattern is unsupported");
2271                 return -rte_errno;
2272         }
2273
2274         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2275                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2276
2277                 if ((flag & min_miss_flags) == flag) {
2278                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2279                         if (rc != 0)
2280                                 return rc;
2281                 }
2282         }
2283
2284         return 0;
2285 }
2286
2287 /**
2288  * Check that set of match flags is referred to by a filter. Filter is
2289  * described by match flags with the ability to add OUTER_VID and INNER_VID
2290  * flags.
2291  *
2292  * @param match_flags[in]
2293  *   Set of match flags.
2294  * @param flags_pattern[in]
2295  *   Pattern of filter match flags.
2296  */
2297 static boolean_t
2298 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2299                             efx_filter_match_flags_t flags_pattern)
2300 {
2301         if ((match_flags & flags_pattern) != flags_pattern)
2302                 return B_FALSE;
2303
2304         switch (match_flags & ~flags_pattern) {
2305         case 0:
2306         case EFX_FILTER_MATCH_OUTER_VID:
2307         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2308                 return B_TRUE;
2309         default:
2310                 return B_FALSE;
2311         }
2312 }
2313
2314 /**
2315  * Check whether the spec maps to a hardware filter which is known to be
2316  * ineffective despite being valid.
2317  *
2318  * @param filter[in]
2319  *   SFC filter with list of supported filters.
2320  * @param spec[in]
2321  *   SFC flow specification.
2322  */
2323 static boolean_t
2324 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2325                                   struct sfc_flow_spec *spec)
2326 {
2327         unsigned int i;
2328         uint16_t ether_type;
2329         uint8_t ip_proto;
2330         efx_filter_match_flags_t match_flags;
2331         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2332
2333         for (i = 0; i < spec_filter->count; i++) {
2334                 match_flags = spec_filter->filters[i].efs_match_flags;
2335
2336                 if (sfc_flow_is_match_with_vids(match_flags,
2337                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2338                     sfc_flow_is_match_with_vids(match_flags,
2339                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2340                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2341                         ether_type = spec_filter->filters[i].efs_ether_type;
2342                         if (filter->supports_ip_proto_or_addr_filter &&
2343                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2344                              ether_type == EFX_ETHER_TYPE_IPV6))
2345                                 return B_TRUE;
2346                 } else if (sfc_flow_is_match_with_vids(match_flags,
2347                                 EFX_FILTER_MATCH_ETHER_TYPE |
2348                                 EFX_FILTER_MATCH_IP_PROTO) ||
2349                            sfc_flow_is_match_with_vids(match_flags,
2350                                 EFX_FILTER_MATCH_ETHER_TYPE |
2351                                 EFX_FILTER_MATCH_IP_PROTO |
2352                                 EFX_FILTER_MATCH_LOC_MAC)) {
2353                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2354                         if (filter->supports_rem_or_local_port_filter &&
2355                             (ip_proto == EFX_IPPROTO_TCP ||
2356                              ip_proto == EFX_IPPROTO_UDP))
2357                                 return B_TRUE;
2358                 }
2359         }
2360
2361         return B_FALSE;
2362 }
2363
2364 static int
2365 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2366                               struct rte_flow *flow,
2367                               struct rte_flow_error *error)
2368 {
2369         struct sfc_flow_spec *spec = &flow->spec;
2370         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2371         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2372         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2373         int rc;
2374
2375         /* Initialize the first filter spec with template */
2376         spec_filter->filters[0] = *spec_tmpl;
2377         spec_filter->count = 1;
2378
2379         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2380                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2381                 if (rc != 0)
2382                         return rc;
2383         }
2384
2385         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2386                 rte_flow_error_set(error, ENOTSUP,
2387                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2388                         "The flow rule pattern is unsupported");
2389                 return -rte_errno;
2390         }
2391
2392         return 0;
2393 }
2394
2395 static int
2396 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2397                              const struct rte_flow_item pattern[],
2398                              const struct rte_flow_action actions[],
2399                              struct rte_flow *flow,
2400                              struct rte_flow_error *error)
2401 {
2402         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2403         struct sfc_flow_spec *spec = &flow->spec;
2404         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2405         struct sfc_flow_parse_ctx ctx;
2406         int rc;
2407
2408         ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2409         ctx.filter = &spec_filter->template;
2410
2411         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2412                                     pattern, &ctx, error);
2413         if (rc != 0)
2414                 goto fail_bad_value;
2415
2416         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2417         if (rc != 0)
2418                 goto fail_bad_value;
2419
2420         rc = sfc_flow_validate_match_flags(sa, flow, error);
2421         if (rc != 0)
2422                 goto fail_bad_value;
2423
2424         return 0;
2425
2426 fail_bad_value:
2427         return rc;
2428 }
2429
2430 static int
2431 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2432                           const struct rte_flow_item pattern[],
2433                           const struct rte_flow_action actions[],
2434                           struct rte_flow *flow,
2435                           struct rte_flow_error *error)
2436 {
2437         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2438         struct sfc_flow_spec *spec = &flow->spec;
2439         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2440         int rc;
2441
2442         rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2443         if (rc != 0)
2444                 return rc;
2445
2446         rc = sfc_mae_rule_parse_actions(sa, actions, &spec_mae->action_set,
2447                                         error);
2448         if (rc != 0)
2449                 return rc;
2450
2451         return 0;
2452 }
2453
2454 static int
2455 sfc_flow_parse(struct rte_eth_dev *dev,
2456                const struct rte_flow_attr *attr,
2457                const struct rte_flow_item pattern[],
2458                const struct rte_flow_action actions[],
2459                struct rte_flow *flow,
2460                struct rte_flow_error *error)
2461 {
2462         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2463         const struct sfc_flow_ops_by_spec *ops;
2464         int rc;
2465
2466         rc = sfc_flow_parse_attr(sa, attr, flow, error);
2467         if (rc != 0)
2468                 return rc;
2469
2470         ops = sfc_flow_get_ops_by_spec(flow);
2471         if (ops == NULL || ops->parse == NULL) {
2472                 rte_flow_error_set(error, ENOTSUP,
2473                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2474                                    "No backend to handle this flow");
2475                 return -rte_errno;
2476         }
2477
2478         return ops->parse(dev, pattern, actions, flow, error);
2479 }
2480
2481 static struct rte_flow *
2482 sfc_flow_zmalloc(struct rte_flow_error *error)
2483 {
2484         struct rte_flow *flow;
2485
2486         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2487         if (flow == NULL) {
2488                 rte_flow_error_set(error, ENOMEM,
2489                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2490                                    "Failed to allocate memory");
2491         }
2492
2493         return flow;
2494 }
2495
2496 static void
2497 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2498 {
2499         const struct sfc_flow_ops_by_spec *ops;
2500
2501         ops = sfc_flow_get_ops_by_spec(flow);
2502         if (ops != NULL && ops->cleanup != NULL)
2503                 ops->cleanup(sa, flow);
2504
2505         rte_free(flow);
2506 }
2507
2508 static int
2509 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2510                 struct rte_flow_error *error)
2511 {
2512         const struct sfc_flow_ops_by_spec *ops;
2513         int rc;
2514
2515         ops = sfc_flow_get_ops_by_spec(flow);
2516         if (ops == NULL || ops->insert == NULL) {
2517                 rte_flow_error_set(error, ENOTSUP,
2518                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2519                                    "No backend to handle this flow");
2520                 return rte_errno;
2521         }
2522
2523         rc = ops->insert(sa, flow);
2524         if (rc != 0) {
2525                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2526                                    NULL, "Failed to insert the flow rule");
2527         }
2528
2529         return rc;
2530 }
2531
2532 static int
2533 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2534                 struct rte_flow_error *error)
2535 {
2536         const struct sfc_flow_ops_by_spec *ops;
2537         int rc;
2538
2539         ops = sfc_flow_get_ops_by_spec(flow);
2540         if (ops == NULL || ops->remove == NULL) {
2541                 rte_flow_error_set(error, ENOTSUP,
2542                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2543                                    "No backend to handle this flow");
2544                 return rte_errno;
2545         }
2546
2547         rc = ops->remove(sa, flow);
2548         if (rc != 0) {
2549                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2550                                    NULL, "Failed to remove the flow rule");
2551         }
2552
2553         return rc;
2554 }
2555
2556 static int
2557 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2558                 struct rte_flow_error *error)
2559 {
2560         const struct sfc_flow_ops_by_spec *ops;
2561         int rc = 0;
2562
2563         ops = sfc_flow_get_ops_by_spec(flow);
2564         if (ops == NULL) {
2565                 rte_flow_error_set(error, ENOTSUP,
2566                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2567                                    "No backend to handle this flow");
2568                 return -rte_errno;
2569         }
2570
2571         if (ops->verify != NULL) {
2572                 /*
2573                  * Use locking since verify method may need to
2574                  * access the list of already created rules.
2575                  */
2576                 sfc_adapter_lock(sa);
2577                 rc = ops->verify(sa, flow);
2578                 sfc_adapter_unlock(sa);
2579         }
2580
2581         if (rc != 0) {
2582                 rte_flow_error_set(error, rc,
2583                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2584                         "Failed to verify flow validity with FW");
2585                 return -rte_errno;
2586         }
2587
2588         return 0;
2589 }
2590
2591 static int
2592 sfc_flow_validate(struct rte_eth_dev *dev,
2593                   const struct rte_flow_attr *attr,
2594                   const struct rte_flow_item pattern[],
2595                   const struct rte_flow_action actions[],
2596                   struct rte_flow_error *error)
2597 {
2598         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2599         struct rte_flow *flow;
2600         int rc;
2601
2602         flow = sfc_flow_zmalloc(error);
2603         if (flow == NULL)
2604                 return -rte_errno;
2605
2606         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2607         if (rc == 0)
2608                 rc = sfc_flow_verify(sa, flow, error);
2609
2610         sfc_flow_free(sa, flow);
2611
2612         return rc;
2613 }
2614
2615 static struct rte_flow *
2616 sfc_flow_create(struct rte_eth_dev *dev,
2617                 const struct rte_flow_attr *attr,
2618                 const struct rte_flow_item pattern[],
2619                 const struct rte_flow_action actions[],
2620                 struct rte_flow_error *error)
2621 {
2622         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2623         struct rte_flow *flow = NULL;
2624         int rc;
2625
2626         flow = sfc_flow_zmalloc(error);
2627         if (flow == NULL)
2628                 goto fail_no_mem;
2629
2630         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2631         if (rc != 0)
2632                 goto fail_bad_value;
2633
2634         sfc_adapter_lock(sa);
2635
2636         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2637
2638         if (sa->state == SFC_ADAPTER_STARTED) {
2639                 rc = sfc_flow_insert(sa, flow, error);
2640                 if (rc != 0)
2641                         goto fail_flow_insert;
2642         }
2643
2644         sfc_adapter_unlock(sa);
2645
2646         return flow;
2647
2648 fail_flow_insert:
2649         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2650
2651 fail_bad_value:
2652         sfc_flow_free(sa, flow);
2653         sfc_adapter_unlock(sa);
2654
2655 fail_no_mem:
2656         return NULL;
2657 }
2658
2659 static int
2660 sfc_flow_destroy(struct rte_eth_dev *dev,
2661                  struct rte_flow *flow,
2662                  struct rte_flow_error *error)
2663 {
2664         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2665         struct rte_flow *flow_ptr;
2666         int rc = EINVAL;
2667
2668         sfc_adapter_lock(sa);
2669
2670         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2671                 if (flow_ptr == flow)
2672                         rc = 0;
2673         }
2674         if (rc != 0) {
2675                 rte_flow_error_set(error, rc,
2676                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2677                                    "Failed to find flow rule to destroy");
2678                 goto fail_bad_value;
2679         }
2680
2681         if (sa->state == SFC_ADAPTER_STARTED)
2682                 rc = sfc_flow_remove(sa, flow, error);
2683
2684         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2685         sfc_flow_free(sa, flow);
2686
2687 fail_bad_value:
2688         sfc_adapter_unlock(sa);
2689
2690         return -rc;
2691 }
2692
2693 static int
2694 sfc_flow_flush(struct rte_eth_dev *dev,
2695                struct rte_flow_error *error)
2696 {
2697         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2698         struct rte_flow *flow;
2699         int ret = 0;
2700
2701         sfc_adapter_lock(sa);
2702
2703         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2704                 if (sa->state == SFC_ADAPTER_STARTED) {
2705                         int rc;
2706
2707                         rc = sfc_flow_remove(sa, flow, error);
2708                         if (rc != 0)
2709                                 ret = rc;
2710                 }
2711
2712                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2713                 sfc_flow_free(sa, flow);
2714         }
2715
2716         sfc_adapter_unlock(sa);
2717
2718         return -ret;
2719 }
2720
2721 static int
2722 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2723                  struct rte_flow_error *error)
2724 {
2725         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2726         int ret = 0;
2727
2728         sfc_adapter_lock(sa);
2729         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2730                 rte_flow_error_set(error, EBUSY,
2731                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2732                                    NULL, "please close the port first");
2733                 ret = -rte_errno;
2734         } else {
2735                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2736         }
2737         sfc_adapter_unlock(sa);
2738
2739         return ret;
2740 }
2741
2742 const struct rte_flow_ops sfc_flow_ops = {
2743         .validate = sfc_flow_validate,
2744         .create = sfc_flow_create,
2745         .destroy = sfc_flow_destroy,
2746         .flush = sfc_flow_flush,
2747         .query = NULL,
2748         .isolate = sfc_flow_isolate,
2749 };
2750
2751 void
2752 sfc_flow_init(struct sfc_adapter *sa)
2753 {
2754         SFC_ASSERT(sfc_adapter_is_locked(sa));
2755
2756         TAILQ_INIT(&sa->flow_list);
2757 }
2758
2759 void
2760 sfc_flow_fini(struct sfc_adapter *sa)
2761 {
2762         struct rte_flow *flow;
2763
2764         SFC_ASSERT(sfc_adapter_is_locked(sa));
2765
2766         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2767                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2768                 sfc_flow_free(sa, flow);
2769         }
2770 }
2771
2772 void
2773 sfc_flow_stop(struct sfc_adapter *sa)
2774 {
2775         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2776         struct sfc_rss *rss = &sas->rss;
2777         struct rte_flow *flow;
2778
2779         SFC_ASSERT(sfc_adapter_is_locked(sa));
2780
2781         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2782                 sfc_flow_remove(sa, flow, NULL);
2783
2784         if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2785                 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2786                 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2787         }
2788 }
2789
2790 int
2791 sfc_flow_start(struct sfc_adapter *sa)
2792 {
2793         struct rte_flow *flow;
2794         int rc = 0;
2795
2796         sfc_log_init(sa, "entry");
2797
2798         SFC_ASSERT(sfc_adapter_is_locked(sa));
2799
2800         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2801                 rc = sfc_flow_insert(sa, flow, NULL);
2802                 if (rc != 0)
2803                         goto fail_bad_flow;
2804         }
2805
2806         sfc_log_init(sa, "done");
2807
2808 fail_bad_flow:
2809         return rc;
2810 }