net/bnxt: support Thor WC TCAM
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 struct sfc_flow_ops_by_spec {
29         sfc_flow_parse_cb_t     *parse;
30         sfc_flow_verify_cb_t    *verify;
31         sfc_flow_cleanup_cb_t   *cleanup;
32         sfc_flow_insert_cb_t    *insert;
33         sfc_flow_remove_cb_t    *remove;
34 };
35
36 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
37 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
38 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
39 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
40
41 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
42         .parse = sfc_flow_parse_rte_to_filter,
43         .verify = NULL,
44         .cleanup = NULL,
45         .insert = sfc_flow_filter_insert,
46         .remove = sfc_flow_filter_remove,
47 };
48
49 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
50         .parse = sfc_flow_parse_rte_to_mae,
51         .verify = sfc_mae_flow_verify,
52         .cleanup = sfc_mae_flow_cleanup,
53         .insert = sfc_mae_flow_insert,
54         .remove = sfc_mae_flow_remove,
55 };
56
57 static const struct sfc_flow_ops_by_spec *
58 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
59 {
60         struct sfc_flow_spec *spec = &flow->spec;
61         const struct sfc_flow_ops_by_spec *ops = NULL;
62
63         switch (spec->type) {
64         case SFC_FLOW_SPEC_FILTER:
65                 ops = &sfc_flow_ops_filter;
66                 break;
67         case SFC_FLOW_SPEC_MAE:
68                 ops = &sfc_flow_ops_mae;
69                 break;
70         default:
71                 SFC_ASSERT(false);
72                 break;
73         }
74
75         return ops;
76 }
77
78 /*
79  * Currently, filter-based (VNIC) flow API is implemented in such a manner
80  * that each flow rule is converted to one or more hardware filters.
81  * All elements of flow rule (attributes, pattern items, actions)
82  * correspond to one or more fields in the efx_filter_spec_s structure
83  * that is responsible for the hardware filter.
84  * If some required field is unset in the flow rule, then a handful
85  * of filter copies will be created to cover all possible values
86  * of such a field.
87  */
88
89 static sfc_flow_item_parse sfc_flow_parse_void;
90 static sfc_flow_item_parse sfc_flow_parse_eth;
91 static sfc_flow_item_parse sfc_flow_parse_vlan;
92 static sfc_flow_item_parse sfc_flow_parse_ipv4;
93 static sfc_flow_item_parse sfc_flow_parse_ipv6;
94 static sfc_flow_item_parse sfc_flow_parse_tcp;
95 static sfc_flow_item_parse sfc_flow_parse_udp;
96 static sfc_flow_item_parse sfc_flow_parse_vxlan;
97 static sfc_flow_item_parse sfc_flow_parse_geneve;
98 static sfc_flow_item_parse sfc_flow_parse_nvgre;
99 static sfc_flow_item_parse sfc_flow_parse_pppoex;
100
101 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
102                                      unsigned int filters_count_for_one_val,
103                                      struct rte_flow_error *error);
104
105 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
106                                         efx_filter_spec_t *spec,
107                                         struct sfc_filter *filter);
108
109 struct sfc_flow_copy_flag {
110         /* EFX filter specification match flag */
111         efx_filter_match_flags_t flag;
112         /* Number of values of corresponding field */
113         unsigned int vals_count;
114         /* Function to set values in specifications */
115         sfc_flow_spec_set_vals *set_vals;
116         /*
117          * Function to check that the specification is suitable
118          * for adding this match flag
119          */
120         sfc_flow_spec_check *spec_check;
121 };
122
123 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
124 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
125 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
126 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
127 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
128 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
129 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
130
131 static boolean_t
132 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
133 {
134         uint8_t sum = 0;
135         unsigned int i;
136
137         for (i = 0; i < size; i++)
138                 sum |= buf[i];
139
140         return (sum == 0) ? B_TRUE : B_FALSE;
141 }
142
143 /*
144  * Validate item and prepare structures spec and mask for parsing
145  */
146 int
147 sfc_flow_parse_init(const struct rte_flow_item *item,
148                     const void **spec_ptr,
149                     const void **mask_ptr,
150                     const void *supp_mask,
151                     const void *def_mask,
152                     unsigned int size,
153                     struct rte_flow_error *error)
154 {
155         const uint8_t *spec;
156         const uint8_t *mask;
157         const uint8_t *last;
158         uint8_t supp;
159         unsigned int i;
160
161         if (item == NULL) {
162                 rte_flow_error_set(error, EINVAL,
163                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
164                                    "NULL item");
165                 return -rte_errno;
166         }
167
168         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
169                 rte_flow_error_set(error, EINVAL,
170                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
171                                    "Mask or last is set without spec");
172                 return -rte_errno;
173         }
174
175         /*
176          * If "mask" is not set, default mask is used,
177          * but if default mask is NULL, "mask" should be set
178          */
179         if (item->mask == NULL) {
180                 if (def_mask == NULL) {
181                         rte_flow_error_set(error, EINVAL,
182                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
183                                 "Mask should be specified");
184                         return -rte_errno;
185                 }
186
187                 mask = def_mask;
188         } else {
189                 mask = item->mask;
190         }
191
192         spec = item->spec;
193         last = item->last;
194
195         if (spec == NULL)
196                 goto exit;
197
198         /*
199          * If field values in "last" are either 0 or equal to the corresponding
200          * values in "spec" then they are ignored
201          */
202         if (last != NULL &&
203             !sfc_flow_is_zero(last, size) &&
204             memcmp(last, spec, size) != 0) {
205                 rte_flow_error_set(error, ENOTSUP,
206                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
207                                    "Ranging is not supported");
208                 return -rte_errno;
209         }
210
211         if (supp_mask == NULL) {
212                 rte_flow_error_set(error, EINVAL,
213                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
214                         "Supported mask for item should be specified");
215                 return -rte_errno;
216         }
217
218         /* Check that mask does not ask for more match than supp_mask */
219         for (i = 0; i < size; i++) {
220                 supp = ((const uint8_t *)supp_mask)[i];
221
222                 if (~supp & mask[i]) {
223                         rte_flow_error_set(error, ENOTSUP,
224                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
225                                            "Item's field is not supported");
226                         return -rte_errno;
227                 }
228         }
229
230 exit:
231         *spec_ptr = spec;
232         *mask_ptr = mask;
233         return 0;
234 }
235
236 /*
237  * Protocol parsers.
238  * Masking is not supported, so masks in items should be either
239  * full or empty (zeroed) and set only for supported fields which
240  * are specified in the supp_mask.
241  */
242
243 static int
244 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
245                     __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
246                     __rte_unused struct rte_flow_error *error)
247 {
248         return 0;
249 }
250
251 /**
252  * Convert Ethernet item to EFX filter specification.
253  *
254  * @param item[in]
255  *   Item specification. Outer frame specification may only comprise
256  *   source/destination addresses and Ethertype field.
257  *   Inner frame specification may contain destination address only.
258  *   There is support for individual/group mask as well as for empty and full.
259  *   If the mask is NULL, default mask will be used. Ranging is not supported.
260  * @param efx_spec[in, out]
261  *   EFX filter specification to update.
262  * @param[out] error
263  *   Perform verbose error reporting if not NULL.
264  */
265 static int
266 sfc_flow_parse_eth(const struct rte_flow_item *item,
267                    struct sfc_flow_parse_ctx *parse_ctx,
268                    struct rte_flow_error *error)
269 {
270         int rc;
271         efx_filter_spec_t *efx_spec = parse_ctx->filter;
272         const struct rte_flow_item_eth *spec = NULL;
273         const struct rte_flow_item_eth *mask = NULL;
274         const struct rte_flow_item_eth supp_mask = {
275                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
276                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
277                 .type = 0xffff,
278         };
279         const struct rte_flow_item_eth ifrm_supp_mask = {
280                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
281         };
282         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
283                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
284         };
285         const struct rte_flow_item_eth *supp_mask_p;
286         const struct rte_flow_item_eth *def_mask_p;
287         uint8_t *loc_mac = NULL;
288         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
289                 EFX_TUNNEL_PROTOCOL_NONE);
290
291         if (is_ifrm) {
292                 supp_mask_p = &ifrm_supp_mask;
293                 def_mask_p = &ifrm_supp_mask;
294                 loc_mac = efx_spec->efs_ifrm_loc_mac;
295         } else {
296                 supp_mask_p = &supp_mask;
297                 def_mask_p = &rte_flow_item_eth_mask;
298                 loc_mac = efx_spec->efs_loc_mac;
299         }
300
301         rc = sfc_flow_parse_init(item,
302                                  (const void **)&spec,
303                                  (const void **)&mask,
304                                  supp_mask_p, def_mask_p,
305                                  sizeof(struct rte_flow_item_eth),
306                                  error);
307         if (rc != 0)
308                 return rc;
309
310         /* If "spec" is not set, could be any Ethernet */
311         if (spec == NULL)
312                 return 0;
313
314         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
315                 efx_spec->efs_match_flags |= is_ifrm ?
316                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
317                         EFX_FILTER_MATCH_LOC_MAC;
318                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
319                            EFX_MAC_ADDR_LEN);
320         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
321                           EFX_MAC_ADDR_LEN) == 0) {
322                 if (rte_is_unicast_ether_addr(&spec->dst))
323                         efx_spec->efs_match_flags |= is_ifrm ?
324                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
325                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
326                 else
327                         efx_spec->efs_match_flags |= is_ifrm ?
328                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
329                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
330         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
331                 goto fail_bad_mask;
332         }
333
334         /*
335          * ifrm_supp_mask ensures that the source address and
336          * ethertype masks are equal to zero in inner frame,
337          * so these fields are filled in only for the outer frame
338          */
339         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
340                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
341                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
342                            EFX_MAC_ADDR_LEN);
343         } else if (!rte_is_zero_ether_addr(&mask->src)) {
344                 goto fail_bad_mask;
345         }
346
347         /*
348          * Ether type is in big-endian byte order in item and
349          * in little-endian in efx_spec, so byte swap is used
350          */
351         if (mask->type == supp_mask.type) {
352                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
353                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
354         } else if (mask->type != 0) {
355                 goto fail_bad_mask;
356         }
357
358         return 0;
359
360 fail_bad_mask:
361         rte_flow_error_set(error, EINVAL,
362                            RTE_FLOW_ERROR_TYPE_ITEM, item,
363                            "Bad mask in the ETH pattern item");
364         return -rte_errno;
365 }
366
367 /**
368  * Convert VLAN item to EFX filter specification.
369  *
370  * @param item[in]
371  *   Item specification. Only VID field is supported.
372  *   The mask can not be NULL. Ranging is not supported.
373  * @param efx_spec[in, out]
374  *   EFX filter specification to update.
375  * @param[out] error
376  *   Perform verbose error reporting if not NULL.
377  */
378 static int
379 sfc_flow_parse_vlan(const struct rte_flow_item *item,
380                     struct sfc_flow_parse_ctx *parse_ctx,
381                     struct rte_flow_error *error)
382 {
383         int rc;
384         uint16_t vid;
385         efx_filter_spec_t *efx_spec = parse_ctx->filter;
386         const struct rte_flow_item_vlan *spec = NULL;
387         const struct rte_flow_item_vlan *mask = NULL;
388         const struct rte_flow_item_vlan supp_mask = {
389                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
390                 .inner_type = RTE_BE16(0xffff),
391         };
392
393         rc = sfc_flow_parse_init(item,
394                                  (const void **)&spec,
395                                  (const void **)&mask,
396                                  &supp_mask,
397                                  NULL,
398                                  sizeof(struct rte_flow_item_vlan),
399                                  error);
400         if (rc != 0)
401                 return rc;
402
403         /*
404          * VID is in big-endian byte order in item and
405          * in little-endian in efx_spec, so byte swap is used.
406          * If two VLAN items are included, the first matches
407          * the outer tag and the next matches the inner tag.
408          */
409         if (mask->tci == supp_mask.tci) {
410                 /* Apply mask to keep VID only */
411                 vid = rte_bswap16(spec->tci & mask->tci);
412
413                 if (!(efx_spec->efs_match_flags &
414                       EFX_FILTER_MATCH_OUTER_VID)) {
415                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
416                         efx_spec->efs_outer_vid = vid;
417                 } else if (!(efx_spec->efs_match_flags &
418                              EFX_FILTER_MATCH_INNER_VID)) {
419                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
420                         efx_spec->efs_inner_vid = vid;
421                 } else {
422                         rte_flow_error_set(error, EINVAL,
423                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
424                                            "More than two VLAN items");
425                         return -rte_errno;
426                 }
427         } else {
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
430                                    "VLAN ID in TCI match is required");
431                 return -rte_errno;
432         }
433
434         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
435                 rte_flow_error_set(error, EINVAL,
436                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
437                                    "VLAN TPID matching is not supported");
438                 return -rte_errno;
439         }
440         if (mask->inner_type == supp_mask.inner_type) {
441                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
442                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
443         } else if (mask->inner_type) {
444                 rte_flow_error_set(error, EINVAL,
445                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
446                                    "Bad mask for VLAN inner_type");
447                 return -rte_errno;
448         }
449
450         return 0;
451 }
452
453 /**
454  * Convert IPv4 item to EFX filter specification.
455  *
456  * @param item[in]
457  *   Item specification. Only source and destination addresses and
458  *   protocol fields are supported. If the mask is NULL, default
459  *   mask will be used. Ranging is not supported.
460  * @param efx_spec[in, out]
461  *   EFX filter specification to update.
462  * @param[out] error
463  *   Perform verbose error reporting if not NULL.
464  */
465 static int
466 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
467                     struct sfc_flow_parse_ctx *parse_ctx,
468                     struct rte_flow_error *error)
469 {
470         int rc;
471         efx_filter_spec_t *efx_spec = parse_ctx->filter;
472         const struct rte_flow_item_ipv4 *spec = NULL;
473         const struct rte_flow_item_ipv4 *mask = NULL;
474         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
475         const struct rte_flow_item_ipv4 supp_mask = {
476                 .hdr = {
477                         .src_addr = 0xffffffff,
478                         .dst_addr = 0xffffffff,
479                         .next_proto_id = 0xff,
480                 }
481         };
482
483         rc = sfc_flow_parse_init(item,
484                                  (const void **)&spec,
485                                  (const void **)&mask,
486                                  &supp_mask,
487                                  &rte_flow_item_ipv4_mask,
488                                  sizeof(struct rte_flow_item_ipv4),
489                                  error);
490         if (rc != 0)
491                 return rc;
492
493         /*
494          * Filtering by IPv4 source and destination addresses requires
495          * the appropriate ETHER_TYPE in hardware filters
496          */
497         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
498                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
499                 efx_spec->efs_ether_type = ether_type_ipv4;
500         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
501                 rte_flow_error_set(error, EINVAL,
502                         RTE_FLOW_ERROR_TYPE_ITEM, item,
503                         "Ethertype in pattern with IPV4 item should be appropriate");
504                 return -rte_errno;
505         }
506
507         if (spec == NULL)
508                 return 0;
509
510         /*
511          * IPv4 addresses are in big-endian byte order in item and in
512          * efx_spec
513          */
514         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
515                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
516                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
517         } else if (mask->hdr.src_addr != 0) {
518                 goto fail_bad_mask;
519         }
520
521         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
522                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
523                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
524         } else if (mask->hdr.dst_addr != 0) {
525                 goto fail_bad_mask;
526         }
527
528         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
529                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
530                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
531         } else if (mask->hdr.next_proto_id != 0) {
532                 goto fail_bad_mask;
533         }
534
535         return 0;
536
537 fail_bad_mask:
538         rte_flow_error_set(error, EINVAL,
539                            RTE_FLOW_ERROR_TYPE_ITEM, item,
540                            "Bad mask in the IPV4 pattern item");
541         return -rte_errno;
542 }
543
544 /**
545  * Convert IPv6 item to EFX filter specification.
546  *
547  * @param item[in]
548  *   Item specification. Only source and destination addresses and
549  *   next header fields are supported. If the mask is NULL, default
550  *   mask will be used. Ranging is not supported.
551  * @param efx_spec[in, out]
552  *   EFX filter specification to update.
553  * @param[out] error
554  *   Perform verbose error reporting if not NULL.
555  */
556 static int
557 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
558                     struct sfc_flow_parse_ctx *parse_ctx,
559                     struct rte_flow_error *error)
560 {
561         int rc;
562         efx_filter_spec_t *efx_spec = parse_ctx->filter;
563         const struct rte_flow_item_ipv6 *spec = NULL;
564         const struct rte_flow_item_ipv6 *mask = NULL;
565         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
566         const struct rte_flow_item_ipv6 supp_mask = {
567                 .hdr = {
568                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
569                                       0xff, 0xff, 0xff, 0xff,
570                                       0xff, 0xff, 0xff, 0xff,
571                                       0xff, 0xff, 0xff, 0xff },
572                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
573                                       0xff, 0xff, 0xff, 0xff,
574                                       0xff, 0xff, 0xff, 0xff,
575                                       0xff, 0xff, 0xff, 0xff },
576                         .proto = 0xff,
577                 }
578         };
579
580         rc = sfc_flow_parse_init(item,
581                                  (const void **)&spec,
582                                  (const void **)&mask,
583                                  &supp_mask,
584                                  &rte_flow_item_ipv6_mask,
585                                  sizeof(struct rte_flow_item_ipv6),
586                                  error);
587         if (rc != 0)
588                 return rc;
589
590         /*
591          * Filtering by IPv6 source and destination addresses requires
592          * the appropriate ETHER_TYPE in hardware filters
593          */
594         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
595                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
596                 efx_spec->efs_ether_type = ether_type_ipv6;
597         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
598                 rte_flow_error_set(error, EINVAL,
599                         RTE_FLOW_ERROR_TYPE_ITEM, item,
600                         "Ethertype in pattern with IPV6 item should be appropriate");
601                 return -rte_errno;
602         }
603
604         if (spec == NULL)
605                 return 0;
606
607         /*
608          * IPv6 addresses are in big-endian byte order in item and in
609          * efx_spec
610          */
611         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
612                    sizeof(mask->hdr.src_addr)) == 0) {
613                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
614
615                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
616                                  sizeof(spec->hdr.src_addr));
617                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
618                            sizeof(efx_spec->efs_rem_host));
619         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
620                                      sizeof(mask->hdr.src_addr))) {
621                 goto fail_bad_mask;
622         }
623
624         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
625                    sizeof(mask->hdr.dst_addr)) == 0) {
626                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
627
628                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
629                                  sizeof(spec->hdr.dst_addr));
630                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
631                            sizeof(efx_spec->efs_loc_host));
632         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
633                                      sizeof(mask->hdr.dst_addr))) {
634                 goto fail_bad_mask;
635         }
636
637         if (mask->hdr.proto == supp_mask.hdr.proto) {
638                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
639                 efx_spec->efs_ip_proto = spec->hdr.proto;
640         } else if (mask->hdr.proto != 0) {
641                 goto fail_bad_mask;
642         }
643
644         return 0;
645
646 fail_bad_mask:
647         rte_flow_error_set(error, EINVAL,
648                            RTE_FLOW_ERROR_TYPE_ITEM, item,
649                            "Bad mask in the IPV6 pattern item");
650         return -rte_errno;
651 }
652
653 /**
654  * Convert TCP item to EFX filter specification.
655  *
656  * @param item[in]
657  *   Item specification. Only source and destination ports fields
658  *   are supported. If the mask is NULL, default mask will be used.
659  *   Ranging is not supported.
660  * @param efx_spec[in, out]
661  *   EFX filter specification to update.
662  * @param[out] error
663  *   Perform verbose error reporting if not NULL.
664  */
665 static int
666 sfc_flow_parse_tcp(const struct rte_flow_item *item,
667                    struct sfc_flow_parse_ctx *parse_ctx,
668                    struct rte_flow_error *error)
669 {
670         int rc;
671         efx_filter_spec_t *efx_spec = parse_ctx->filter;
672         const struct rte_flow_item_tcp *spec = NULL;
673         const struct rte_flow_item_tcp *mask = NULL;
674         const struct rte_flow_item_tcp supp_mask = {
675                 .hdr = {
676                         .src_port = 0xffff,
677                         .dst_port = 0xffff,
678                 }
679         };
680
681         rc = sfc_flow_parse_init(item,
682                                  (const void **)&spec,
683                                  (const void **)&mask,
684                                  &supp_mask,
685                                  &rte_flow_item_tcp_mask,
686                                  sizeof(struct rte_flow_item_tcp),
687                                  error);
688         if (rc != 0)
689                 return rc;
690
691         /*
692          * Filtering by TCP source and destination ports requires
693          * the appropriate IP_PROTO in hardware filters
694          */
695         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
696                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
697                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
698         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
699                 rte_flow_error_set(error, EINVAL,
700                         RTE_FLOW_ERROR_TYPE_ITEM, item,
701                         "IP proto in pattern with TCP item should be appropriate");
702                 return -rte_errno;
703         }
704
705         if (spec == NULL)
706                 return 0;
707
708         /*
709          * Source and destination ports are in big-endian byte order in item and
710          * in little-endian in efx_spec, so byte swap is used
711          */
712         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
713                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
714                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
715         } else if (mask->hdr.src_port != 0) {
716                 goto fail_bad_mask;
717         }
718
719         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
720                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
721                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
722         } else if (mask->hdr.dst_port != 0) {
723                 goto fail_bad_mask;
724         }
725
726         return 0;
727
728 fail_bad_mask:
729         rte_flow_error_set(error, EINVAL,
730                            RTE_FLOW_ERROR_TYPE_ITEM, item,
731                            "Bad mask in the TCP pattern item");
732         return -rte_errno;
733 }
734
735 /**
736  * Convert UDP item to EFX filter specification.
737  *
738  * @param item[in]
739  *   Item specification. Only source and destination ports fields
740  *   are supported. If the mask is NULL, default mask will be used.
741  *   Ranging is not supported.
742  * @param efx_spec[in, out]
743  *   EFX filter specification to update.
744  * @param[out] error
745  *   Perform verbose error reporting if not NULL.
746  */
747 static int
748 sfc_flow_parse_udp(const struct rte_flow_item *item,
749                    struct sfc_flow_parse_ctx *parse_ctx,
750                    struct rte_flow_error *error)
751 {
752         int rc;
753         efx_filter_spec_t *efx_spec = parse_ctx->filter;
754         const struct rte_flow_item_udp *spec = NULL;
755         const struct rte_flow_item_udp *mask = NULL;
756         const struct rte_flow_item_udp supp_mask = {
757                 .hdr = {
758                         .src_port = 0xffff,
759                         .dst_port = 0xffff,
760                 }
761         };
762
763         rc = sfc_flow_parse_init(item,
764                                  (const void **)&spec,
765                                  (const void **)&mask,
766                                  &supp_mask,
767                                  &rte_flow_item_udp_mask,
768                                  sizeof(struct rte_flow_item_udp),
769                                  error);
770         if (rc != 0)
771                 return rc;
772
773         /*
774          * Filtering by UDP source and destination ports requires
775          * the appropriate IP_PROTO in hardware filters
776          */
777         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
778                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
779                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
780         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
781                 rte_flow_error_set(error, EINVAL,
782                         RTE_FLOW_ERROR_TYPE_ITEM, item,
783                         "IP proto in pattern with UDP item should be appropriate");
784                 return -rte_errno;
785         }
786
787         if (spec == NULL)
788                 return 0;
789
790         /*
791          * Source and destination ports are in big-endian byte order in item and
792          * in little-endian in efx_spec, so byte swap is used
793          */
794         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
795                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
796                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
797         } else if (mask->hdr.src_port != 0) {
798                 goto fail_bad_mask;
799         }
800
801         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
802                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
803                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
804         } else if (mask->hdr.dst_port != 0) {
805                 goto fail_bad_mask;
806         }
807
808         return 0;
809
810 fail_bad_mask:
811         rte_flow_error_set(error, EINVAL,
812                            RTE_FLOW_ERROR_TYPE_ITEM, item,
813                            "Bad mask in the UDP pattern item");
814         return -rte_errno;
815 }
816
817 /*
818  * Filters for encapsulated packets match based on the EtherType and IP
819  * protocol in the outer frame.
820  */
821 static int
822 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
823                                         efx_filter_spec_t *efx_spec,
824                                         uint8_t ip_proto,
825                                         struct rte_flow_error *error)
826 {
827         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
828                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
829                 efx_spec->efs_ip_proto = ip_proto;
830         } else if (efx_spec->efs_ip_proto != ip_proto) {
831                 switch (ip_proto) {
832                 case EFX_IPPROTO_UDP:
833                         rte_flow_error_set(error, EINVAL,
834                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
835                                 "Outer IP header protocol must be UDP "
836                                 "in VxLAN/GENEVE pattern");
837                         return -rte_errno;
838
839                 case EFX_IPPROTO_GRE:
840                         rte_flow_error_set(error, EINVAL,
841                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
842                                 "Outer IP header protocol must be GRE "
843                                 "in NVGRE pattern");
844                         return -rte_errno;
845
846                 default:
847                         rte_flow_error_set(error, EINVAL,
848                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
849                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
850                                 "are supported");
851                         return -rte_errno;
852                 }
853         }
854
855         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
856             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
857             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
858                 rte_flow_error_set(error, EINVAL,
859                         RTE_FLOW_ERROR_TYPE_ITEM, item,
860                         "Outer frame EtherType in pattern with tunneling "
861                         "must be IPv4 or IPv6");
862                 return -rte_errno;
863         }
864
865         return 0;
866 }
867
868 static int
869 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
870                                   const uint8_t *vni_or_vsid_val,
871                                   const uint8_t *vni_or_vsid_mask,
872                                   const struct rte_flow_item *item,
873                                   struct rte_flow_error *error)
874 {
875         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
876                 0xff, 0xff, 0xff
877         };
878
879         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
880                    EFX_VNI_OR_VSID_LEN) == 0) {
881                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
882                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
883                            EFX_VNI_OR_VSID_LEN);
884         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
885                 rte_flow_error_set(error, EINVAL,
886                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
887                                    "Unsupported VNI/VSID mask");
888                 return -rte_errno;
889         }
890
891         return 0;
892 }
893
894 /**
895  * Convert VXLAN item to EFX filter specification.
896  *
897  * @param item[in]
898  *   Item specification. Only VXLAN network identifier field is supported.
899  *   If the mask is NULL, default mask will be used.
900  *   Ranging is not supported.
901  * @param efx_spec[in, out]
902  *   EFX filter specification to update.
903  * @param[out] error
904  *   Perform verbose error reporting if not NULL.
905  */
906 static int
907 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
908                      struct sfc_flow_parse_ctx *parse_ctx,
909                      struct rte_flow_error *error)
910 {
911         int rc;
912         efx_filter_spec_t *efx_spec = parse_ctx->filter;
913         const struct rte_flow_item_vxlan *spec = NULL;
914         const struct rte_flow_item_vxlan *mask = NULL;
915         const struct rte_flow_item_vxlan supp_mask = {
916                 .vni = { 0xff, 0xff, 0xff }
917         };
918
919         rc = sfc_flow_parse_init(item,
920                                  (const void **)&spec,
921                                  (const void **)&mask,
922                                  &supp_mask,
923                                  &rte_flow_item_vxlan_mask,
924                                  sizeof(struct rte_flow_item_vxlan),
925                                  error);
926         if (rc != 0)
927                 return rc;
928
929         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
930                                                      EFX_IPPROTO_UDP, error);
931         if (rc != 0)
932                 return rc;
933
934         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
935         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
936
937         if (spec == NULL)
938                 return 0;
939
940         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
941                                                mask->vni, item, error);
942
943         return rc;
944 }
945
946 /**
947  * Convert GENEVE item to EFX filter specification.
948  *
949  * @param item[in]
950  *   Item specification. Only Virtual Network Identifier and protocol type
951  *   fields are supported. But protocol type can be only Ethernet (0x6558).
952  *   If the mask is NULL, default mask will be used.
953  *   Ranging is not supported.
954  * @param efx_spec[in, out]
955  *   EFX filter specification to update.
956  * @param[out] error
957  *   Perform verbose error reporting if not NULL.
958  */
959 static int
960 sfc_flow_parse_geneve(const struct rte_flow_item *item,
961                       struct sfc_flow_parse_ctx *parse_ctx,
962                       struct rte_flow_error *error)
963 {
964         int rc;
965         efx_filter_spec_t *efx_spec = parse_ctx->filter;
966         const struct rte_flow_item_geneve *spec = NULL;
967         const struct rte_flow_item_geneve *mask = NULL;
968         const struct rte_flow_item_geneve supp_mask = {
969                 .protocol = RTE_BE16(0xffff),
970                 .vni = { 0xff, 0xff, 0xff }
971         };
972
973         rc = sfc_flow_parse_init(item,
974                                  (const void **)&spec,
975                                  (const void **)&mask,
976                                  &supp_mask,
977                                  &rte_flow_item_geneve_mask,
978                                  sizeof(struct rte_flow_item_geneve),
979                                  error);
980         if (rc != 0)
981                 return rc;
982
983         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
984                                                      EFX_IPPROTO_UDP, error);
985         if (rc != 0)
986                 return rc;
987
988         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
989         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
990
991         if (spec == NULL)
992                 return 0;
993
994         if (mask->protocol == supp_mask.protocol) {
995                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
996                         rte_flow_error_set(error, EINVAL,
997                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
998                                 "GENEVE encap. protocol must be Ethernet "
999                                 "(0x6558) in the GENEVE pattern item");
1000                         return -rte_errno;
1001                 }
1002         } else if (mask->protocol != 0) {
1003                 rte_flow_error_set(error, EINVAL,
1004                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1005                         "Unsupported mask for GENEVE encap. protocol");
1006                 return -rte_errno;
1007         }
1008
1009         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1010                                                mask->vni, item, error);
1011
1012         return rc;
1013 }
1014
1015 /**
1016  * Convert NVGRE item to EFX filter specification.
1017  *
1018  * @param item[in]
1019  *   Item specification. Only virtual subnet ID field is supported.
1020  *   If the mask is NULL, default mask will be used.
1021  *   Ranging is not supported.
1022  * @param efx_spec[in, out]
1023  *   EFX filter specification to update.
1024  * @param[out] error
1025  *   Perform verbose error reporting if not NULL.
1026  */
1027 static int
1028 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1029                      struct sfc_flow_parse_ctx *parse_ctx,
1030                      struct rte_flow_error *error)
1031 {
1032         int rc;
1033         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1034         const struct rte_flow_item_nvgre *spec = NULL;
1035         const struct rte_flow_item_nvgre *mask = NULL;
1036         const struct rte_flow_item_nvgre supp_mask = {
1037                 .tni = { 0xff, 0xff, 0xff }
1038         };
1039
1040         rc = sfc_flow_parse_init(item,
1041                                  (const void **)&spec,
1042                                  (const void **)&mask,
1043                                  &supp_mask,
1044                                  &rte_flow_item_nvgre_mask,
1045                                  sizeof(struct rte_flow_item_nvgre),
1046                                  error);
1047         if (rc != 0)
1048                 return rc;
1049
1050         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1051                                                      EFX_IPPROTO_GRE, error);
1052         if (rc != 0)
1053                 return rc;
1054
1055         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1056         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1057
1058         if (spec == NULL)
1059                 return 0;
1060
1061         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1062                                                mask->tni, item, error);
1063
1064         return rc;
1065 }
1066
1067 /**
1068  * Convert PPPoEx item to EFX filter specification.
1069  *
1070  * @param item[in]
1071  *   Item specification.
1072  *   Matching on PPPoEx fields is not supported.
1073  *   This item can only be used to set or validate the EtherType filter.
1074  *   Only zero masks are allowed.
1075  *   Ranging is not supported.
1076  * @param efx_spec[in, out]
1077  *   EFX filter specification to update.
1078  * @param[out] error
1079  *   Perform verbose error reporting if not NULL.
1080  */
1081 static int
1082 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1083                       struct sfc_flow_parse_ctx *parse_ctx,
1084                       struct rte_flow_error *error)
1085 {
1086         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1087         const struct rte_flow_item_pppoe *spec = NULL;
1088         const struct rte_flow_item_pppoe *mask = NULL;
1089         const struct rte_flow_item_pppoe supp_mask = {};
1090         const struct rte_flow_item_pppoe def_mask = {};
1091         uint16_t ether_type;
1092         int rc;
1093
1094         rc = sfc_flow_parse_init(item,
1095                                  (const void **)&spec,
1096                                  (const void **)&mask,
1097                                  &supp_mask,
1098                                  &def_mask,
1099                                  sizeof(struct rte_flow_item_pppoe),
1100                                  error);
1101         if (rc != 0)
1102                 return rc;
1103
1104         if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1105                 ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1106         else
1107                 ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1108
1109         if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1110                 if (efx_spec->efs_ether_type != ether_type) {
1111                         rte_flow_error_set(error, EINVAL,
1112                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
1113                                            "Invalid EtherType for a PPPoE flow item");
1114                         return -rte_errno;
1115                 }
1116         } else {
1117                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1118                 efx_spec->efs_ether_type = ether_type;
1119         }
1120
1121         return 0;
1122 }
1123
1124 static const struct sfc_flow_item sfc_flow_items[] = {
1125         {
1126                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1127                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1128                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1129                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1130                 .parse = sfc_flow_parse_void,
1131         },
1132         {
1133                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1134                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1135                 .layer = SFC_FLOW_ITEM_L2,
1136                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1137                 .parse = sfc_flow_parse_eth,
1138         },
1139         {
1140                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1141                 .prev_layer = SFC_FLOW_ITEM_L2,
1142                 .layer = SFC_FLOW_ITEM_L2,
1143                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1144                 .parse = sfc_flow_parse_vlan,
1145         },
1146         {
1147                 .type = RTE_FLOW_ITEM_TYPE_PPPOED,
1148                 .prev_layer = SFC_FLOW_ITEM_L2,
1149                 .layer = SFC_FLOW_ITEM_L2,
1150                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1151                 .parse = sfc_flow_parse_pppoex,
1152         },
1153         {
1154                 .type = RTE_FLOW_ITEM_TYPE_PPPOES,
1155                 .prev_layer = SFC_FLOW_ITEM_L2,
1156                 .layer = SFC_FLOW_ITEM_L2,
1157                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1158                 .parse = sfc_flow_parse_pppoex,
1159         },
1160         {
1161                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1162                 .prev_layer = SFC_FLOW_ITEM_L2,
1163                 .layer = SFC_FLOW_ITEM_L3,
1164                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1165                 .parse = sfc_flow_parse_ipv4,
1166         },
1167         {
1168                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1169                 .prev_layer = SFC_FLOW_ITEM_L2,
1170                 .layer = SFC_FLOW_ITEM_L3,
1171                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1172                 .parse = sfc_flow_parse_ipv6,
1173         },
1174         {
1175                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1176                 .prev_layer = SFC_FLOW_ITEM_L3,
1177                 .layer = SFC_FLOW_ITEM_L4,
1178                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1179                 .parse = sfc_flow_parse_tcp,
1180         },
1181         {
1182                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1183                 .prev_layer = SFC_FLOW_ITEM_L3,
1184                 .layer = SFC_FLOW_ITEM_L4,
1185                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1186                 .parse = sfc_flow_parse_udp,
1187         },
1188         {
1189                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1190                 .prev_layer = SFC_FLOW_ITEM_L4,
1191                 .layer = SFC_FLOW_ITEM_START_LAYER,
1192                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1193                 .parse = sfc_flow_parse_vxlan,
1194         },
1195         {
1196                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1197                 .prev_layer = SFC_FLOW_ITEM_L4,
1198                 .layer = SFC_FLOW_ITEM_START_LAYER,
1199                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1200                 .parse = sfc_flow_parse_geneve,
1201         },
1202         {
1203                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1204                 .prev_layer = SFC_FLOW_ITEM_L3,
1205                 .layer = SFC_FLOW_ITEM_START_LAYER,
1206                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1207                 .parse = sfc_flow_parse_nvgre,
1208         },
1209 };
1210
1211 /*
1212  * Protocol-independent flow API support
1213  */
1214 static int
1215 sfc_flow_parse_attr(struct sfc_adapter *sa,
1216                     const struct rte_flow_attr *attr,
1217                     struct rte_flow *flow,
1218                     struct rte_flow_error *error)
1219 {
1220         struct sfc_flow_spec *spec = &flow->spec;
1221         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1222         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1223         struct sfc_mae *mae = &sa->mae;
1224
1225         if (attr == NULL) {
1226                 rte_flow_error_set(error, EINVAL,
1227                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1228                                    "NULL attribute");
1229                 return -rte_errno;
1230         }
1231         if (attr->group != 0) {
1232                 rte_flow_error_set(error, ENOTSUP,
1233                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1234                                    "Groups are not supported");
1235                 return -rte_errno;
1236         }
1237         if (attr->egress != 0) {
1238                 rte_flow_error_set(error, ENOTSUP,
1239                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1240                                    "Egress is not supported");
1241                 return -rte_errno;
1242         }
1243         if (attr->ingress == 0) {
1244                 rte_flow_error_set(error, ENOTSUP,
1245                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1246                                    "Ingress is compulsory");
1247                 return -rte_errno;
1248         }
1249         if (attr->transfer == 0) {
1250                 if (attr->priority != 0) {
1251                         rte_flow_error_set(error, ENOTSUP,
1252                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1253                                            attr, "Priorities are unsupported");
1254                         return -rte_errno;
1255                 }
1256                 spec->type = SFC_FLOW_SPEC_FILTER;
1257                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1258                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1259                 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1260         } else {
1261                 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
1262                         rte_flow_error_set(error, ENOTSUP,
1263                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1264                                            attr, "Transfer is not supported");
1265                         return -rte_errno;
1266                 }
1267                 if (attr->priority > mae->nb_action_rule_prios_max) {
1268                         rte_flow_error_set(error, ENOTSUP,
1269                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1270                                            attr, "Unsupported priority level");
1271                         return -rte_errno;
1272                 }
1273                 spec->type = SFC_FLOW_SPEC_MAE;
1274                 spec_mae->priority = attr->priority;
1275                 spec_mae->match_spec = NULL;
1276                 spec_mae->action_set = NULL;
1277                 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1278         }
1279
1280         return 0;
1281 }
1282
1283 /* Get item from array sfc_flow_items */
1284 static const struct sfc_flow_item *
1285 sfc_flow_get_item(const struct sfc_flow_item *items,
1286                   unsigned int nb_items,
1287                   enum rte_flow_item_type type)
1288 {
1289         unsigned int i;
1290
1291         for (i = 0; i < nb_items; i++)
1292                 if (items[i].type == type)
1293                         return &items[i];
1294
1295         return NULL;
1296 }
1297
1298 int
1299 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1300                        unsigned int nb_flow_items,
1301                        const struct rte_flow_item pattern[],
1302                        struct sfc_flow_parse_ctx *parse_ctx,
1303                        struct rte_flow_error *error)
1304 {
1305         int rc;
1306         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1307         boolean_t is_ifrm = B_FALSE;
1308         const struct sfc_flow_item *item;
1309
1310         if (pattern == NULL) {
1311                 rte_flow_error_set(error, EINVAL,
1312                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1313                                    "NULL pattern");
1314                 return -rte_errno;
1315         }
1316
1317         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1318                 item = sfc_flow_get_item(flow_items, nb_flow_items,
1319                                          pattern->type);
1320                 if (item == NULL) {
1321                         rte_flow_error_set(error, ENOTSUP,
1322                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1323                                            "Unsupported pattern item");
1324                         return -rte_errno;
1325                 }
1326
1327                 /*
1328                  * Omitting one or several protocol layers at the beginning
1329                  * of pattern is supported
1330                  */
1331                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1332                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1333                     item->prev_layer != prev_layer) {
1334                         rte_flow_error_set(error, ENOTSUP,
1335                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1336                                            "Unexpected sequence of pattern items");
1337                         return -rte_errno;
1338                 }
1339
1340                 /*
1341                  * Allow only VOID and ETH pattern items in the inner frame.
1342                  * Also check that there is only one tunneling protocol.
1343                  */
1344                 switch (item->type) {
1345                 case RTE_FLOW_ITEM_TYPE_VOID:
1346                 case RTE_FLOW_ITEM_TYPE_ETH:
1347                         break;
1348
1349                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1350                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1351                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1352                         if (is_ifrm) {
1353                                 rte_flow_error_set(error, EINVAL,
1354                                         RTE_FLOW_ERROR_TYPE_ITEM,
1355                                         pattern,
1356                                         "More than one tunneling protocol");
1357                                 return -rte_errno;
1358                         }
1359                         is_ifrm = B_TRUE;
1360                         break;
1361
1362                 default:
1363                         if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1364                             is_ifrm) {
1365                                 rte_flow_error_set(error, EINVAL,
1366                                         RTE_FLOW_ERROR_TYPE_ITEM,
1367                                         pattern,
1368                                         "There is an unsupported pattern item "
1369                                         "in the inner frame");
1370                                 return -rte_errno;
1371                         }
1372                         break;
1373                 }
1374
1375                 if (parse_ctx->type != item->ctx_type) {
1376                         rte_flow_error_set(error, EINVAL,
1377                                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1378                                         "Parse context type mismatch");
1379                         return -rte_errno;
1380                 }
1381
1382                 rc = item->parse(pattern, parse_ctx, error);
1383                 if (rc != 0)
1384                         return rc;
1385
1386                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1387                         prev_layer = item->layer;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 sfc_flow_parse_queue(struct sfc_adapter *sa,
1395                      const struct rte_flow_action_queue *queue,
1396                      struct rte_flow *flow)
1397 {
1398         struct sfc_flow_spec *spec = &flow->spec;
1399         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1400         struct sfc_rxq *rxq;
1401         struct sfc_rxq_info *rxq_info;
1402
1403         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1404                 return -EINVAL;
1405
1406         rxq = &sa->rxq_ctrl[queue->index];
1407         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1408
1409         rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1410         spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1411                                             SFC_RXQ_FLAG_RSS_HASH);
1412
1413         return 0;
1414 }
1415
1416 static int
1417 sfc_flow_parse_rss(struct sfc_adapter *sa,
1418                    const struct rte_flow_action_rss *action_rss,
1419                    struct rte_flow *flow)
1420 {
1421         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1422         struct sfc_rss *rss = &sas->rss;
1423         unsigned int rxq_sw_index;
1424         struct sfc_rxq *rxq;
1425         unsigned int rxq_hw_index_min;
1426         unsigned int rxq_hw_index_max;
1427         efx_rx_hash_type_t efx_hash_types;
1428         const uint8_t *rss_key;
1429         struct sfc_flow_spec *spec = &flow->spec;
1430         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1431         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1432         unsigned int i;
1433
1434         if (action_rss->queue_num == 0)
1435                 return -EINVAL;
1436
1437         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1438         rxq = &sa->rxq_ctrl[rxq_sw_index];
1439         rxq_hw_index_min = rxq->hw_index;
1440         rxq_hw_index_max = 0;
1441
1442         for (i = 0; i < action_rss->queue_num; ++i) {
1443                 rxq_sw_index = action_rss->queue[i];
1444
1445                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1446                         return -EINVAL;
1447
1448                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1449
1450                 if (rxq->hw_index < rxq_hw_index_min)
1451                         rxq_hw_index_min = rxq->hw_index;
1452
1453                 if (rxq->hw_index > rxq_hw_index_max)
1454                         rxq_hw_index_max = rxq->hw_index;
1455         }
1456
1457         switch (action_rss->func) {
1458         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1459         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1460                 break;
1461         default:
1462                 return -EINVAL;
1463         }
1464
1465         if (action_rss->level)
1466                 return -EINVAL;
1467
1468         /*
1469          * Dummy RSS action with only one queue and no specific settings
1470          * for hash types and key does not require dedicated RSS context
1471          * and may be simplified to single queue action.
1472          */
1473         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1474             action_rss->key_len == 0) {
1475                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1476                 return 0;
1477         }
1478
1479         if (action_rss->types) {
1480                 int rc;
1481
1482                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1483                                           &efx_hash_types);
1484                 if (rc != 0)
1485                         return -rc;
1486         } else {
1487                 unsigned int i;
1488
1489                 efx_hash_types = 0;
1490                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1491                         efx_hash_types |= rss->hf_map[i].efx;
1492         }
1493
1494         if (action_rss->key_len) {
1495                 if (action_rss->key_len != sizeof(rss->key))
1496                         return -EINVAL;
1497
1498                 rss_key = action_rss->key;
1499         } else {
1500                 rss_key = rss->key;
1501         }
1502
1503         spec_filter->rss = B_TRUE;
1504
1505         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1506         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1507         sfc_rss_conf->rss_hash_types = efx_hash_types;
1508         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1509
1510         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1511                 unsigned int nb_queues = action_rss->queue_num;
1512                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1513                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1514
1515                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1516         }
1517
1518         return 0;
1519 }
1520
1521 static int
1522 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1523                     unsigned int filters_count)
1524 {
1525         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1526         unsigned int i;
1527         int ret = 0;
1528
1529         for (i = 0; i < filters_count; i++) {
1530                 int rc;
1531
1532                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1533                 if (ret == 0 && rc != 0) {
1534                         sfc_err(sa, "failed to remove filter specification "
1535                                 "(rc = %d)", rc);
1536                         ret = rc;
1537                 }
1538         }
1539
1540         return ret;
1541 }
1542
1543 static int
1544 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1545 {
1546         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1547         unsigned int i;
1548         int rc = 0;
1549
1550         for (i = 0; i < spec_filter->count; i++) {
1551                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1552                 if (rc != 0) {
1553                         sfc_flow_spec_flush(sa, spec, i);
1554                         break;
1555                 }
1556         }
1557
1558         return rc;
1559 }
1560
1561 static int
1562 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1563 {
1564         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1565
1566         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1567 }
1568
1569 static int
1570 sfc_flow_filter_insert(struct sfc_adapter *sa,
1571                        struct rte_flow *flow)
1572 {
1573         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1574         struct sfc_rss *rss = &sas->rss;
1575         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1576         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1577         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1578         boolean_t create_context;
1579         unsigned int i;
1580         int rc = 0;
1581
1582         create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1583                         rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1584
1585         if (create_context) {
1586                 unsigned int rss_spread;
1587                 unsigned int rss_hash_types;
1588                 uint8_t *rss_key;
1589
1590                 if (spec_filter->rss) {
1591                         rss_spread = MIN(flow_rss->rxq_hw_index_max -
1592                                         flow_rss->rxq_hw_index_min + 1,
1593                                         EFX_MAXRSS);
1594                         rss_hash_types = flow_rss->rss_hash_types;
1595                         rss_key = flow_rss->rss_key;
1596                 } else {
1597                         /*
1598                          * Initialize dummy RSS context parameters to have
1599                          * valid RSS hash. Use default RSS hash function and
1600                          * key.
1601                          */
1602                         rss_spread = 1;
1603                         rss_hash_types = rss->hash_types;
1604                         rss_key = rss->key;
1605                 }
1606
1607                 rc = efx_rx_scale_context_alloc(sa->nic,
1608                                                 EFX_RX_SCALE_EXCLUSIVE,
1609                                                 rss_spread,
1610                                                 &efs_rss_context);
1611                 if (rc != 0)
1612                         goto fail_scale_context_alloc;
1613
1614                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1615                                            rss->hash_alg,
1616                                            rss_hash_types, B_TRUE);
1617                 if (rc != 0)
1618                         goto fail_scale_mode_set;
1619
1620                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1621                                           rss_key, sizeof(rss->key));
1622                 if (rc != 0)
1623                         goto fail_scale_key_set;
1624         } else {
1625                 efs_rss_context = rss->dummy_rss_context;
1626         }
1627
1628         if (spec_filter->rss || spec_filter->rss_hash_required) {
1629                 /*
1630                  * At this point, fully elaborated filter specifications
1631                  * have been produced from the template. To make sure that
1632                  * RSS behaviour is consistent between them, set the same
1633                  * RSS context value everywhere.
1634                  */
1635                 for (i = 0; i < spec_filter->count; i++) {
1636                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1637
1638                         spec->efs_rss_context = efs_rss_context;
1639                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1640                         if (spec_filter->rss)
1641                                 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1642                 }
1643         }
1644
1645         rc = sfc_flow_spec_insert(sa, &flow->spec);
1646         if (rc != 0)
1647                 goto fail_filter_insert;
1648
1649         if (create_context) {
1650                 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1651                 unsigned int *tbl;
1652
1653                 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1654
1655                 /*
1656                  * Scale table is set after filter insertion because
1657                  * the table entries are relative to the base RxQ ID
1658                  * and the latter is submitted to the HW by means of
1659                  * inserting a filter, so by the time of the request
1660                  * the HW knows all the information needed to verify
1661                  * the table entries, and the operation will succeed
1662                  */
1663                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1664                                           tbl, RTE_DIM(flow_rss->rss_tbl));
1665                 if (rc != 0)
1666                         goto fail_scale_tbl_set;
1667
1668                 /* Remember created dummy RSS context */
1669                 if (!spec_filter->rss)
1670                         rss->dummy_rss_context = efs_rss_context;
1671         }
1672
1673         return 0;
1674
1675 fail_scale_tbl_set:
1676         sfc_flow_spec_remove(sa, &flow->spec);
1677
1678 fail_filter_insert:
1679 fail_scale_key_set:
1680 fail_scale_mode_set:
1681         if (create_context)
1682                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1683
1684 fail_scale_context_alloc:
1685         return rc;
1686 }
1687
1688 static int
1689 sfc_flow_filter_remove(struct sfc_adapter *sa,
1690                        struct rte_flow *flow)
1691 {
1692         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1693         int rc = 0;
1694
1695         rc = sfc_flow_spec_remove(sa, &flow->spec);
1696         if (rc != 0)
1697                 return rc;
1698
1699         if (spec_filter->rss) {
1700                 /*
1701                  * All specifications for a given flow rule have the same RSS
1702                  * context, so that RSS context value is taken from the first
1703                  * filter specification
1704                  */
1705                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1706
1707                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1708         }
1709
1710         return rc;
1711 }
1712
1713 static int
1714 sfc_flow_parse_mark(struct sfc_adapter *sa,
1715                     const struct rte_flow_action_mark *mark,
1716                     struct rte_flow *flow)
1717 {
1718         struct sfc_flow_spec *spec = &flow->spec;
1719         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1720         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1721
1722         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1723                 return EINVAL;
1724
1725         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1726         spec_filter->template.efs_mark = mark->id;
1727
1728         return 0;
1729 }
1730
1731 static int
1732 sfc_flow_parse_actions(struct sfc_adapter *sa,
1733                        const struct rte_flow_action actions[],
1734                        struct rte_flow *flow,
1735                        struct rte_flow_error *error)
1736 {
1737         int rc;
1738         struct sfc_flow_spec *spec = &flow->spec;
1739         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1740         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1741         uint32_t actions_set = 0;
1742         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1743                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1744                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1745         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1746                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1747
1748         if (actions == NULL) {
1749                 rte_flow_error_set(error, EINVAL,
1750                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1751                                    "NULL actions");
1752                 return -rte_errno;
1753         }
1754
1755         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1756                 switch (actions->type) {
1757                 case RTE_FLOW_ACTION_TYPE_VOID:
1758                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1759                                                actions_set);
1760                         break;
1761
1762                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1763                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1764                                                actions_set);
1765                         if ((actions_set & fate_actions_mask) != 0)
1766                                 goto fail_fate_actions;
1767
1768                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1769                         if (rc != 0) {
1770                                 rte_flow_error_set(error, EINVAL,
1771                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1772                                         "Bad QUEUE action");
1773                                 return -rte_errno;
1774                         }
1775                         break;
1776
1777                 case RTE_FLOW_ACTION_TYPE_RSS:
1778                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1779                                                actions_set);
1780                         if ((actions_set & fate_actions_mask) != 0)
1781                                 goto fail_fate_actions;
1782
1783                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1784                         if (rc != 0) {
1785                                 rte_flow_error_set(error, -rc,
1786                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1787                                         "Bad RSS action");
1788                                 return -rte_errno;
1789                         }
1790                         break;
1791
1792                 case RTE_FLOW_ACTION_TYPE_DROP:
1793                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1794                                                actions_set);
1795                         if ((actions_set & fate_actions_mask) != 0)
1796                                 goto fail_fate_actions;
1797
1798                         spec_filter->template.efs_dmaq_id =
1799                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1800                         break;
1801
1802                 case RTE_FLOW_ACTION_TYPE_FLAG:
1803                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1804                                                actions_set);
1805                         if ((actions_set & mark_actions_mask) != 0)
1806                                 goto fail_actions_overlap;
1807
1808                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1809                                 rte_flow_error_set(error, ENOTSUP,
1810                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1811                                         "FLAG action is not supported on the current Rx datapath");
1812                                 return -rte_errno;
1813                         }
1814
1815                         spec_filter->template.efs_flags |=
1816                                 EFX_FILTER_FLAG_ACTION_FLAG;
1817                         break;
1818
1819                 case RTE_FLOW_ACTION_TYPE_MARK:
1820                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1821                                                actions_set);
1822                         if ((actions_set & mark_actions_mask) != 0)
1823                                 goto fail_actions_overlap;
1824
1825                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1826                                 rte_flow_error_set(error, ENOTSUP,
1827                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1828                                         "MARK action is not supported on the current Rx datapath");
1829                                 return -rte_errno;
1830                         }
1831
1832                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1833                         if (rc != 0) {
1834                                 rte_flow_error_set(error, rc,
1835                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1836                                         "Bad MARK action");
1837                                 return -rte_errno;
1838                         }
1839                         break;
1840
1841                 default:
1842                         rte_flow_error_set(error, ENOTSUP,
1843                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1844                                            "Action is not supported");
1845                         return -rte_errno;
1846                 }
1847
1848                 actions_set |= (1UL << actions->type);
1849         }
1850
1851         /* When fate is unknown, drop traffic. */
1852         if ((actions_set & fate_actions_mask) == 0) {
1853                 spec_filter->template.efs_dmaq_id =
1854                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1855         }
1856
1857         return 0;
1858
1859 fail_fate_actions:
1860         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1861                            "Cannot combine several fate-deciding actions, "
1862                            "choose between QUEUE, RSS or DROP");
1863         return -rte_errno;
1864
1865 fail_actions_overlap:
1866         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1867                            "Overlapping actions are not supported");
1868         return -rte_errno;
1869 }
1870
1871 /**
1872  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1873  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1874  * specifications after copying.
1875  *
1876  * @param spec[in, out]
1877  *   SFC flow specification to update.
1878  * @param filters_count_for_one_val[in]
1879  *   How many specifications should have the same match flag, what is the
1880  *   number of specifications before copying.
1881  * @param error[out]
1882  *   Perform verbose error reporting if not NULL.
1883  */
1884 static int
1885 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1886                                unsigned int filters_count_for_one_val,
1887                                struct rte_flow_error *error)
1888 {
1889         unsigned int i;
1890         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1891         static const efx_filter_match_flags_t vals[] = {
1892                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1893                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1894         };
1895
1896         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1897                 rte_flow_error_set(error, EINVAL,
1898                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1899                         "Number of specifications is incorrect while copying "
1900                         "by unknown destination flags");
1901                 return -rte_errno;
1902         }
1903
1904         for (i = 0; i < spec_filter->count; i++) {
1905                 /* The check above ensures that divisor can't be zero here */
1906                 spec_filter->filters[i].efs_match_flags |=
1907                         vals[i / filters_count_for_one_val];
1908         }
1909
1910         return 0;
1911 }
1912
1913 /**
1914  * Check that the following conditions are met:
1915  * - the list of supported filters has a filter
1916  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1917  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1918  *   be inserted.
1919  *
1920  * @param match[in]
1921  *   The match flags of filter.
1922  * @param spec[in]
1923  *   Specification to be supplemented.
1924  * @param filter[in]
1925  *   SFC filter with list of supported filters.
1926  */
1927 static boolean_t
1928 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1929                                  __rte_unused efx_filter_spec_t *spec,
1930                                  struct sfc_filter *filter)
1931 {
1932         unsigned int i;
1933         efx_filter_match_flags_t match_mcast_dst;
1934
1935         match_mcast_dst =
1936                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1937                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1938         for (i = 0; i < filter->supported_match_num; i++) {
1939                 if (match_mcast_dst == filter->supported_match[i])
1940                         return B_TRUE;
1941         }
1942
1943         return B_FALSE;
1944 }
1945
1946 /**
1947  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1948  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1949  * specifications after copying.
1950  *
1951  * @param spec[in, out]
1952  *   SFC flow specification to update.
1953  * @param filters_count_for_one_val[in]
1954  *   How many specifications should have the same EtherType value, what is the
1955  *   number of specifications before copying.
1956  * @param error[out]
1957  *   Perform verbose error reporting if not NULL.
1958  */
1959 static int
1960 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1961                         unsigned int filters_count_for_one_val,
1962                         struct rte_flow_error *error)
1963 {
1964         unsigned int i;
1965         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1966         static const uint16_t vals[] = {
1967                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1968         };
1969
1970         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1971                 rte_flow_error_set(error, EINVAL,
1972                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1973                         "Number of specifications is incorrect "
1974                         "while copying by Ethertype");
1975                 return -rte_errno;
1976         }
1977
1978         for (i = 0; i < spec_filter->count; i++) {
1979                 spec_filter->filters[i].efs_match_flags |=
1980                         EFX_FILTER_MATCH_ETHER_TYPE;
1981
1982                 /*
1983                  * The check above ensures that
1984                  * filters_count_for_one_val is not 0
1985                  */
1986                 spec_filter->filters[i].efs_ether_type =
1987                         vals[i / filters_count_for_one_val];
1988         }
1989
1990         return 0;
1991 }
1992
1993 /**
1994  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1995  * in the same specifications after copying.
1996  *
1997  * @param spec[in, out]
1998  *   SFC flow specification to update.
1999  * @param filters_count_for_one_val[in]
2000  *   How many specifications should have the same match flag, what is the
2001  *   number of specifications before copying.
2002  * @param error[out]
2003  *   Perform verbose error reporting if not NULL.
2004  */
2005 static int
2006 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
2007                             unsigned int filters_count_for_one_val,
2008                             struct rte_flow_error *error)
2009 {
2010         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2011         unsigned int i;
2012
2013         if (filters_count_for_one_val != spec_filter->count) {
2014                 rte_flow_error_set(error, EINVAL,
2015                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2016                         "Number of specifications is incorrect "
2017                         "while copying by outer VLAN ID");
2018                 return -rte_errno;
2019         }
2020
2021         for (i = 0; i < spec_filter->count; i++) {
2022                 spec_filter->filters[i].efs_match_flags |=
2023                         EFX_FILTER_MATCH_OUTER_VID;
2024
2025                 spec_filter->filters[i].efs_outer_vid = 0;
2026         }
2027
2028         return 0;
2029 }
2030
2031 /**
2032  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
2033  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
2034  * specifications after copying.
2035  *
2036  * @param spec[in, out]
2037  *   SFC flow specification to update.
2038  * @param filters_count_for_one_val[in]
2039  *   How many specifications should have the same match flag, what is the
2040  *   number of specifications before copying.
2041  * @param error[out]
2042  *   Perform verbose error reporting if not NULL.
2043  */
2044 static int
2045 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
2046                                     unsigned int filters_count_for_one_val,
2047                                     struct rte_flow_error *error)
2048 {
2049         unsigned int i;
2050         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2051         static const efx_filter_match_flags_t vals[] = {
2052                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2053                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
2054         };
2055
2056         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2057                 rte_flow_error_set(error, EINVAL,
2058                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2059                         "Number of specifications is incorrect while copying "
2060                         "by inner frame unknown destination flags");
2061                 return -rte_errno;
2062         }
2063
2064         for (i = 0; i < spec_filter->count; i++) {
2065                 /* The check above ensures that divisor can't be zero here */
2066                 spec_filter->filters[i].efs_match_flags |=
2067                         vals[i / filters_count_for_one_val];
2068         }
2069
2070         return 0;
2071 }
2072
2073 /**
2074  * Check that the following conditions are met:
2075  * - the specification corresponds to a filter for encapsulated traffic
2076  * - the list of supported filters has a filter
2077  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2078  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2079  *   be inserted.
2080  *
2081  * @param match[in]
2082  *   The match flags of filter.
2083  * @param spec[in]
2084  *   Specification to be supplemented.
2085  * @param filter[in]
2086  *   SFC filter with list of supported filters.
2087  */
2088 static boolean_t
2089 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2090                                       efx_filter_spec_t *spec,
2091                                       struct sfc_filter *filter)
2092 {
2093         unsigned int i;
2094         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2095         efx_filter_match_flags_t match_mcast_dst;
2096
2097         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2098                 return B_FALSE;
2099
2100         match_mcast_dst =
2101                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2102                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2103         for (i = 0; i < filter->supported_match_num; i++) {
2104                 if (match_mcast_dst == filter->supported_match[i])
2105                         return B_TRUE;
2106         }
2107
2108         return B_FALSE;
2109 }
2110
2111 /**
2112  * Check that the list of supported filters has a filter that differs
2113  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2114  * in this case that filter will be used and the flag
2115  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2116  *
2117  * @param match[in]
2118  *   The match flags of filter.
2119  * @param spec[in]
2120  *   Specification to be supplemented.
2121  * @param filter[in]
2122  *   SFC filter with list of supported filters.
2123  */
2124 static boolean_t
2125 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2126                               __rte_unused efx_filter_spec_t *spec,
2127                               struct sfc_filter *filter)
2128 {
2129         unsigned int i;
2130         efx_filter_match_flags_t match_without_vid =
2131                 match & ~EFX_FILTER_MATCH_OUTER_VID;
2132
2133         for (i = 0; i < filter->supported_match_num; i++) {
2134                 if (match_without_vid == filter->supported_match[i])
2135                         return B_FALSE;
2136         }
2137
2138         return B_TRUE;
2139 }
2140
2141 /*
2142  * Match flags that can be automatically added to filters.
2143  * Selecting the last minimum when searching for the copy flag ensures that the
2144  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2145  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2146  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2147  * filters.
2148  */
2149 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2150         {
2151                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2152                 .vals_count = 2,
2153                 .set_vals = sfc_flow_set_unknown_dst_flags,
2154                 .spec_check = sfc_flow_check_unknown_dst_flags,
2155         },
2156         {
2157                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2158                 .vals_count = 2,
2159                 .set_vals = sfc_flow_set_ethertypes,
2160                 .spec_check = NULL,
2161         },
2162         {
2163                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2164                 .vals_count = 2,
2165                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2166                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2167         },
2168         {
2169                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2170                 .vals_count = 1,
2171                 .set_vals = sfc_flow_set_outer_vid_flag,
2172                 .spec_check = sfc_flow_check_outer_vid_flag,
2173         },
2174 };
2175
2176 /* Get item from array sfc_flow_copy_flags */
2177 static const struct sfc_flow_copy_flag *
2178 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2179 {
2180         unsigned int i;
2181
2182         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2183                 if (sfc_flow_copy_flags[i].flag == flag)
2184                         return &sfc_flow_copy_flags[i];
2185         }
2186
2187         return NULL;
2188 }
2189
2190 /**
2191  * Make copies of the specifications, set match flag and values
2192  * of the field that corresponds to it.
2193  *
2194  * @param spec[in, out]
2195  *   SFC flow specification to update.
2196  * @param flag[in]
2197  *   The match flag to add.
2198  * @param error[out]
2199  *   Perform verbose error reporting if not NULL.
2200  */
2201 static int
2202 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2203                              efx_filter_match_flags_t flag,
2204                              struct rte_flow_error *error)
2205 {
2206         unsigned int i;
2207         unsigned int new_filters_count;
2208         unsigned int filters_count_for_one_val;
2209         const struct sfc_flow_copy_flag *copy_flag;
2210         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2211         int rc;
2212
2213         copy_flag = sfc_flow_get_copy_flag(flag);
2214         if (copy_flag == NULL) {
2215                 rte_flow_error_set(error, ENOTSUP,
2216                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2217                                    "Unsupported spec field for copying");
2218                 return -rte_errno;
2219         }
2220
2221         new_filters_count = spec_filter->count * copy_flag->vals_count;
2222         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2223                 rte_flow_error_set(error, EINVAL,
2224                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2225                         "Too much EFX specifications in the flow rule");
2226                 return -rte_errno;
2227         }
2228
2229         /* Copy filters specifications */
2230         for (i = spec_filter->count; i < new_filters_count; i++) {
2231                 spec_filter->filters[i] =
2232                         spec_filter->filters[i - spec_filter->count];
2233         }
2234
2235         filters_count_for_one_val = spec_filter->count;
2236         spec_filter->count = new_filters_count;
2237
2238         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2239         if (rc != 0)
2240                 return rc;
2241
2242         return 0;
2243 }
2244
2245 /**
2246  * Check that the given set of match flags missing in the original filter spec
2247  * could be covered by adding spec copies which specify the corresponding
2248  * flags and packet field values to match.
2249  *
2250  * @param miss_flags[in]
2251  *   Flags that are missing until the supported filter.
2252  * @param spec[in]
2253  *   Specification to be supplemented.
2254  * @param filter[in]
2255  *   SFC filter.
2256  *
2257  * @return
2258  *   Number of specifications after copy or 0, if the flags can not be added.
2259  */
2260 static unsigned int
2261 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2262                              efx_filter_spec_t *spec,
2263                              struct sfc_filter *filter)
2264 {
2265         unsigned int i;
2266         efx_filter_match_flags_t copy_flags = 0;
2267         efx_filter_match_flags_t flag;
2268         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2269         sfc_flow_spec_check *check;
2270         unsigned int multiplier = 1;
2271
2272         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2273                 flag = sfc_flow_copy_flags[i].flag;
2274                 check = sfc_flow_copy_flags[i].spec_check;
2275                 if ((flag & miss_flags) == flag) {
2276                         if (check != NULL && (!check(match, spec, filter)))
2277                                 continue;
2278
2279                         copy_flags |= flag;
2280                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2281                 }
2282         }
2283
2284         if (copy_flags == miss_flags)
2285                 return multiplier;
2286
2287         return 0;
2288 }
2289
2290 /**
2291  * Attempt to supplement the specification template to the minimally
2292  * supported set of match flags. To do this, it is necessary to copy
2293  * the specifications, filling them with the values of fields that
2294  * correspond to the missing flags.
2295  * The necessary and sufficient filter is built from the fewest number
2296  * of copies which could be made to cover the minimally required set
2297  * of flags.
2298  *
2299  * @param sa[in]
2300  *   SFC adapter.
2301  * @param spec[in, out]
2302  *   SFC flow specification to update.
2303  * @param error[out]
2304  *   Perform verbose error reporting if not NULL.
2305  */
2306 static int
2307 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2308                                struct sfc_flow_spec *spec,
2309                                struct rte_flow_error *error)
2310 {
2311         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2312         struct sfc_filter *filter = &sa->filter;
2313         efx_filter_match_flags_t miss_flags;
2314         efx_filter_match_flags_t min_miss_flags = 0;
2315         efx_filter_match_flags_t match;
2316         unsigned int min_multiplier = UINT_MAX;
2317         unsigned int multiplier;
2318         unsigned int i;
2319         int rc;
2320
2321         match = spec_filter->template.efs_match_flags;
2322         for (i = 0; i < filter->supported_match_num; i++) {
2323                 if ((match & filter->supported_match[i]) == match) {
2324                         miss_flags = filter->supported_match[i] & (~match);
2325                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2326                                 &spec_filter->template, filter);
2327                         if (multiplier > 0) {
2328                                 if (multiplier <= min_multiplier) {
2329                                         min_multiplier = multiplier;
2330                                         min_miss_flags = miss_flags;
2331                                 }
2332                         }
2333                 }
2334         }
2335
2336         if (min_multiplier == UINT_MAX) {
2337                 rte_flow_error_set(error, ENOTSUP,
2338                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2339                                    "The flow rule pattern is unsupported");
2340                 return -rte_errno;
2341         }
2342
2343         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2344                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2345
2346                 if ((flag & min_miss_flags) == flag) {
2347                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2348                         if (rc != 0)
2349                                 return rc;
2350                 }
2351         }
2352
2353         return 0;
2354 }
2355
2356 /**
2357  * Check that set of match flags is referred to by a filter. Filter is
2358  * described by match flags with the ability to add OUTER_VID and INNER_VID
2359  * flags.
2360  *
2361  * @param match_flags[in]
2362  *   Set of match flags.
2363  * @param flags_pattern[in]
2364  *   Pattern of filter match flags.
2365  */
2366 static boolean_t
2367 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2368                             efx_filter_match_flags_t flags_pattern)
2369 {
2370         if ((match_flags & flags_pattern) != flags_pattern)
2371                 return B_FALSE;
2372
2373         switch (match_flags & ~flags_pattern) {
2374         case 0:
2375         case EFX_FILTER_MATCH_OUTER_VID:
2376         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2377                 return B_TRUE;
2378         default:
2379                 return B_FALSE;
2380         }
2381 }
2382
2383 /**
2384  * Check whether the spec maps to a hardware filter which is known to be
2385  * ineffective despite being valid.
2386  *
2387  * @param filter[in]
2388  *   SFC filter with list of supported filters.
2389  * @param spec[in]
2390  *   SFC flow specification.
2391  */
2392 static boolean_t
2393 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2394                                   struct sfc_flow_spec *spec)
2395 {
2396         unsigned int i;
2397         uint16_t ether_type;
2398         uint8_t ip_proto;
2399         efx_filter_match_flags_t match_flags;
2400         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2401
2402         for (i = 0; i < spec_filter->count; i++) {
2403                 match_flags = spec_filter->filters[i].efs_match_flags;
2404
2405                 if (sfc_flow_is_match_with_vids(match_flags,
2406                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2407                     sfc_flow_is_match_with_vids(match_flags,
2408                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2409                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2410                         ether_type = spec_filter->filters[i].efs_ether_type;
2411                         if (filter->supports_ip_proto_or_addr_filter &&
2412                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2413                              ether_type == EFX_ETHER_TYPE_IPV6))
2414                                 return B_TRUE;
2415                 } else if (sfc_flow_is_match_with_vids(match_flags,
2416                                 EFX_FILTER_MATCH_ETHER_TYPE |
2417                                 EFX_FILTER_MATCH_IP_PROTO) ||
2418                            sfc_flow_is_match_with_vids(match_flags,
2419                                 EFX_FILTER_MATCH_ETHER_TYPE |
2420                                 EFX_FILTER_MATCH_IP_PROTO |
2421                                 EFX_FILTER_MATCH_LOC_MAC)) {
2422                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2423                         if (filter->supports_rem_or_local_port_filter &&
2424                             (ip_proto == EFX_IPPROTO_TCP ||
2425                              ip_proto == EFX_IPPROTO_UDP))
2426                                 return B_TRUE;
2427                 }
2428         }
2429
2430         return B_FALSE;
2431 }
2432
2433 static int
2434 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2435                               struct rte_flow *flow,
2436                               struct rte_flow_error *error)
2437 {
2438         struct sfc_flow_spec *spec = &flow->spec;
2439         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2440         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2441         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2442         int rc;
2443
2444         /* Initialize the first filter spec with template */
2445         spec_filter->filters[0] = *spec_tmpl;
2446         spec_filter->count = 1;
2447
2448         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2449                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2450                 if (rc != 0)
2451                         return rc;
2452         }
2453
2454         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2455                 rte_flow_error_set(error, ENOTSUP,
2456                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2457                         "The flow rule pattern is unsupported");
2458                 return -rte_errno;
2459         }
2460
2461         return 0;
2462 }
2463
2464 static int
2465 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2466                              const struct rte_flow_item pattern[],
2467                              const struct rte_flow_action actions[],
2468                              struct rte_flow *flow,
2469                              struct rte_flow_error *error)
2470 {
2471         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2472         struct sfc_flow_spec *spec = &flow->spec;
2473         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2474         struct sfc_flow_parse_ctx ctx;
2475         int rc;
2476
2477         ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2478         ctx.filter = &spec_filter->template;
2479
2480         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2481                                     pattern, &ctx, error);
2482         if (rc != 0)
2483                 goto fail_bad_value;
2484
2485         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2486         if (rc != 0)
2487                 goto fail_bad_value;
2488
2489         rc = sfc_flow_validate_match_flags(sa, flow, error);
2490         if (rc != 0)
2491                 goto fail_bad_value;
2492
2493         return 0;
2494
2495 fail_bad_value:
2496         return rc;
2497 }
2498
2499 static int
2500 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2501                           const struct rte_flow_item pattern[],
2502                           const struct rte_flow_action actions[],
2503                           struct rte_flow *flow,
2504                           struct rte_flow_error *error)
2505 {
2506         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2507         struct sfc_flow_spec *spec = &flow->spec;
2508         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2509         int rc;
2510
2511         rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2512         if (rc != 0)
2513                 return rc;
2514
2515         rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2516         if (rc != 0)
2517                 return rc;
2518
2519         return 0;
2520 }
2521
2522 static int
2523 sfc_flow_parse(struct rte_eth_dev *dev,
2524                const struct rte_flow_attr *attr,
2525                const struct rte_flow_item pattern[],
2526                const struct rte_flow_action actions[],
2527                struct rte_flow *flow,
2528                struct rte_flow_error *error)
2529 {
2530         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2531         const struct sfc_flow_ops_by_spec *ops;
2532         int rc;
2533
2534         rc = sfc_flow_parse_attr(sa, attr, flow, error);
2535         if (rc != 0)
2536                 return rc;
2537
2538         ops = sfc_flow_get_ops_by_spec(flow);
2539         if (ops == NULL || ops->parse == NULL) {
2540                 rte_flow_error_set(error, ENOTSUP,
2541                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2542                                    "No backend to handle this flow");
2543                 return -rte_errno;
2544         }
2545
2546         return ops->parse(dev, pattern, actions, flow, error);
2547 }
2548
2549 static struct rte_flow *
2550 sfc_flow_zmalloc(struct rte_flow_error *error)
2551 {
2552         struct rte_flow *flow;
2553
2554         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2555         if (flow == NULL) {
2556                 rte_flow_error_set(error, ENOMEM,
2557                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2558                                    "Failed to allocate memory");
2559         }
2560
2561         return flow;
2562 }
2563
2564 static void
2565 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2566 {
2567         const struct sfc_flow_ops_by_spec *ops;
2568
2569         ops = sfc_flow_get_ops_by_spec(flow);
2570         if (ops != NULL && ops->cleanup != NULL)
2571                 ops->cleanup(sa, flow);
2572
2573         rte_free(flow);
2574 }
2575
2576 static int
2577 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2578                 struct rte_flow_error *error)
2579 {
2580         const struct sfc_flow_ops_by_spec *ops;
2581         int rc;
2582
2583         ops = sfc_flow_get_ops_by_spec(flow);
2584         if (ops == NULL || ops->insert == NULL) {
2585                 rte_flow_error_set(error, ENOTSUP,
2586                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2587                                    "No backend to handle this flow");
2588                 return rte_errno;
2589         }
2590
2591         rc = ops->insert(sa, flow);
2592         if (rc != 0) {
2593                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2594                                    NULL, "Failed to insert the flow rule");
2595         }
2596
2597         return rc;
2598 }
2599
2600 static int
2601 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2602                 struct rte_flow_error *error)
2603 {
2604         const struct sfc_flow_ops_by_spec *ops;
2605         int rc;
2606
2607         ops = sfc_flow_get_ops_by_spec(flow);
2608         if (ops == NULL || ops->remove == NULL) {
2609                 rte_flow_error_set(error, ENOTSUP,
2610                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2611                                    "No backend to handle this flow");
2612                 return rte_errno;
2613         }
2614
2615         rc = ops->remove(sa, flow);
2616         if (rc != 0) {
2617                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2618                                    NULL, "Failed to remove the flow rule");
2619         }
2620
2621         return rc;
2622 }
2623
2624 static int
2625 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2626                 struct rte_flow_error *error)
2627 {
2628         const struct sfc_flow_ops_by_spec *ops;
2629         int rc = 0;
2630
2631         ops = sfc_flow_get_ops_by_spec(flow);
2632         if (ops == NULL) {
2633                 rte_flow_error_set(error, ENOTSUP,
2634                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2635                                    "No backend to handle this flow");
2636                 return -rte_errno;
2637         }
2638
2639         if (ops->verify != NULL) {
2640                 SFC_ASSERT(sfc_adapter_is_locked(sa));
2641                 rc = ops->verify(sa, flow);
2642         }
2643
2644         if (rc != 0) {
2645                 rte_flow_error_set(error, rc,
2646                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2647                         "Failed to verify flow validity with FW");
2648                 return -rte_errno;
2649         }
2650
2651         return 0;
2652 }
2653
2654 static int
2655 sfc_flow_validate(struct rte_eth_dev *dev,
2656                   const struct rte_flow_attr *attr,
2657                   const struct rte_flow_item pattern[],
2658                   const struct rte_flow_action actions[],
2659                   struct rte_flow_error *error)
2660 {
2661         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2662         struct rte_flow *flow;
2663         int rc;
2664
2665         flow = sfc_flow_zmalloc(error);
2666         if (flow == NULL)
2667                 return -rte_errno;
2668
2669         sfc_adapter_lock(sa);
2670
2671         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2672         if (rc == 0)
2673                 rc = sfc_flow_verify(sa, flow, error);
2674
2675         sfc_flow_free(sa, flow);
2676
2677         sfc_adapter_unlock(sa);
2678
2679         return rc;
2680 }
2681
2682 static struct rte_flow *
2683 sfc_flow_create(struct rte_eth_dev *dev,
2684                 const struct rte_flow_attr *attr,
2685                 const struct rte_flow_item pattern[],
2686                 const struct rte_flow_action actions[],
2687                 struct rte_flow_error *error)
2688 {
2689         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2690         struct rte_flow *flow = NULL;
2691         int rc;
2692
2693         flow = sfc_flow_zmalloc(error);
2694         if (flow == NULL)
2695                 goto fail_no_mem;
2696
2697         sfc_adapter_lock(sa);
2698
2699         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2700         if (rc != 0)
2701                 goto fail_bad_value;
2702
2703         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2704
2705         if (sa->state == SFC_ADAPTER_STARTED) {
2706                 rc = sfc_flow_insert(sa, flow, error);
2707                 if (rc != 0)
2708                         goto fail_flow_insert;
2709         }
2710
2711         sfc_adapter_unlock(sa);
2712
2713         return flow;
2714
2715 fail_flow_insert:
2716         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2717
2718 fail_bad_value:
2719         sfc_flow_free(sa, flow);
2720         sfc_adapter_unlock(sa);
2721
2722 fail_no_mem:
2723         return NULL;
2724 }
2725
2726 static int
2727 sfc_flow_destroy(struct rte_eth_dev *dev,
2728                  struct rte_flow *flow,
2729                  struct rte_flow_error *error)
2730 {
2731         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2732         struct rte_flow *flow_ptr;
2733         int rc = EINVAL;
2734
2735         sfc_adapter_lock(sa);
2736
2737         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2738                 if (flow_ptr == flow)
2739                         rc = 0;
2740         }
2741         if (rc != 0) {
2742                 rte_flow_error_set(error, rc,
2743                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2744                                    "Failed to find flow rule to destroy");
2745                 goto fail_bad_value;
2746         }
2747
2748         if (sa->state == SFC_ADAPTER_STARTED)
2749                 rc = sfc_flow_remove(sa, flow, error);
2750
2751         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2752         sfc_flow_free(sa, flow);
2753
2754 fail_bad_value:
2755         sfc_adapter_unlock(sa);
2756
2757         return -rc;
2758 }
2759
2760 static int
2761 sfc_flow_flush(struct rte_eth_dev *dev,
2762                struct rte_flow_error *error)
2763 {
2764         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2765         struct rte_flow *flow;
2766         int ret = 0;
2767
2768         sfc_adapter_lock(sa);
2769
2770         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2771                 if (sa->state == SFC_ADAPTER_STARTED) {
2772                         int rc;
2773
2774                         rc = sfc_flow_remove(sa, flow, error);
2775                         if (rc != 0)
2776                                 ret = rc;
2777                 }
2778
2779                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2780                 sfc_flow_free(sa, flow);
2781         }
2782
2783         sfc_adapter_unlock(sa);
2784
2785         return -ret;
2786 }
2787
2788 static int
2789 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2790                  struct rte_flow_error *error)
2791 {
2792         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2793         int ret = 0;
2794
2795         sfc_adapter_lock(sa);
2796         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2797                 rte_flow_error_set(error, EBUSY,
2798                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2799                                    NULL, "please close the port first");
2800                 ret = -rte_errno;
2801         } else {
2802                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2803         }
2804         sfc_adapter_unlock(sa);
2805
2806         return ret;
2807 }
2808
2809 const struct rte_flow_ops sfc_flow_ops = {
2810         .validate = sfc_flow_validate,
2811         .create = sfc_flow_create,
2812         .destroy = sfc_flow_destroy,
2813         .flush = sfc_flow_flush,
2814         .query = NULL,
2815         .isolate = sfc_flow_isolate,
2816 };
2817
2818 void
2819 sfc_flow_init(struct sfc_adapter *sa)
2820 {
2821         SFC_ASSERT(sfc_adapter_is_locked(sa));
2822
2823         TAILQ_INIT(&sa->flow_list);
2824 }
2825
2826 void
2827 sfc_flow_fini(struct sfc_adapter *sa)
2828 {
2829         struct rte_flow *flow;
2830
2831         SFC_ASSERT(sfc_adapter_is_locked(sa));
2832
2833         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2834                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2835                 sfc_flow_free(sa, flow);
2836         }
2837 }
2838
2839 void
2840 sfc_flow_stop(struct sfc_adapter *sa)
2841 {
2842         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2843         struct sfc_rss *rss = &sas->rss;
2844         struct rte_flow *flow;
2845
2846         SFC_ASSERT(sfc_adapter_is_locked(sa));
2847
2848         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2849                 sfc_flow_remove(sa, flow, NULL);
2850
2851         if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2852                 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2853                 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2854         }
2855 }
2856
2857 int
2858 sfc_flow_start(struct sfc_adapter *sa)
2859 {
2860         struct rte_flow *flow;
2861         int rc = 0;
2862
2863         sfc_log_init(sa, "entry");
2864
2865         SFC_ASSERT(sfc_adapter_is_locked(sa));
2866
2867         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2868                 rc = sfc_flow_insert(sa, flow, NULL);
2869                 if (rc != 0)
2870                         goto fail_bad_flow;
2871         }
2872
2873         sfc_log_init(sa, "done");
2874
2875 fail_bad_flow:
2876         return rc;
2877 }