net/sfc: prepare for internal Rx queue
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 struct sfc_flow_ops_by_spec {
29         sfc_flow_parse_cb_t     *parse;
30         sfc_flow_verify_cb_t    *verify;
31         sfc_flow_cleanup_cb_t   *cleanup;
32         sfc_flow_insert_cb_t    *insert;
33         sfc_flow_remove_cb_t    *remove;
34 };
35
36 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
37 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
38 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
39 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
40
41 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
42         .parse = sfc_flow_parse_rte_to_filter,
43         .verify = NULL,
44         .cleanup = NULL,
45         .insert = sfc_flow_filter_insert,
46         .remove = sfc_flow_filter_remove,
47 };
48
49 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
50         .parse = sfc_flow_parse_rte_to_mae,
51         .verify = sfc_mae_flow_verify,
52         .cleanup = sfc_mae_flow_cleanup,
53         .insert = sfc_mae_flow_insert,
54         .remove = sfc_mae_flow_remove,
55 };
56
57 static const struct sfc_flow_ops_by_spec *
58 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
59 {
60         struct sfc_flow_spec *spec = &flow->spec;
61         const struct sfc_flow_ops_by_spec *ops = NULL;
62
63         switch (spec->type) {
64         case SFC_FLOW_SPEC_FILTER:
65                 ops = &sfc_flow_ops_filter;
66                 break;
67         case SFC_FLOW_SPEC_MAE:
68                 ops = &sfc_flow_ops_mae;
69                 break;
70         default:
71                 SFC_ASSERT(false);
72                 break;
73         }
74
75         return ops;
76 }
77
78 /*
79  * Currently, filter-based (VNIC) flow API is implemented in such a manner
80  * that each flow rule is converted to one or more hardware filters.
81  * All elements of flow rule (attributes, pattern items, actions)
82  * correspond to one or more fields in the efx_filter_spec_s structure
83  * that is responsible for the hardware filter.
84  * If some required field is unset in the flow rule, then a handful
85  * of filter copies will be created to cover all possible values
86  * of such a field.
87  */
88
89 static sfc_flow_item_parse sfc_flow_parse_void;
90 static sfc_flow_item_parse sfc_flow_parse_eth;
91 static sfc_flow_item_parse sfc_flow_parse_vlan;
92 static sfc_flow_item_parse sfc_flow_parse_ipv4;
93 static sfc_flow_item_parse sfc_flow_parse_ipv6;
94 static sfc_flow_item_parse sfc_flow_parse_tcp;
95 static sfc_flow_item_parse sfc_flow_parse_udp;
96 static sfc_flow_item_parse sfc_flow_parse_vxlan;
97 static sfc_flow_item_parse sfc_flow_parse_geneve;
98 static sfc_flow_item_parse sfc_flow_parse_nvgre;
99 static sfc_flow_item_parse sfc_flow_parse_pppoex;
100
101 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
102                                      unsigned int filters_count_for_one_val,
103                                      struct rte_flow_error *error);
104
105 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
106                                         efx_filter_spec_t *spec,
107                                         struct sfc_filter *filter);
108
109 struct sfc_flow_copy_flag {
110         /* EFX filter specification match flag */
111         efx_filter_match_flags_t flag;
112         /* Number of values of corresponding field */
113         unsigned int vals_count;
114         /* Function to set values in specifications */
115         sfc_flow_spec_set_vals *set_vals;
116         /*
117          * Function to check that the specification is suitable
118          * for adding this match flag
119          */
120         sfc_flow_spec_check *spec_check;
121 };
122
123 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
124 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
125 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
126 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
127 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
128 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
129 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
130
131 static boolean_t
132 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
133 {
134         uint8_t sum = 0;
135         unsigned int i;
136
137         for (i = 0; i < size; i++)
138                 sum |= buf[i];
139
140         return (sum == 0) ? B_TRUE : B_FALSE;
141 }
142
143 /*
144  * Validate item and prepare structures spec and mask for parsing
145  */
146 int
147 sfc_flow_parse_init(const struct rte_flow_item *item,
148                     const void **spec_ptr,
149                     const void **mask_ptr,
150                     const void *supp_mask,
151                     const void *def_mask,
152                     unsigned int size,
153                     struct rte_flow_error *error)
154 {
155         const uint8_t *spec;
156         const uint8_t *mask;
157         const uint8_t *last;
158         uint8_t supp;
159         unsigned int i;
160
161         if (item == NULL) {
162                 rte_flow_error_set(error, EINVAL,
163                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
164                                    "NULL item");
165                 return -rte_errno;
166         }
167
168         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
169                 rte_flow_error_set(error, EINVAL,
170                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
171                                    "Mask or last is set without spec");
172                 return -rte_errno;
173         }
174
175         /*
176          * If "mask" is not set, default mask is used,
177          * but if default mask is NULL, "mask" should be set
178          */
179         if (item->mask == NULL) {
180                 if (def_mask == NULL) {
181                         rte_flow_error_set(error, EINVAL,
182                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
183                                 "Mask should be specified");
184                         return -rte_errno;
185                 }
186
187                 mask = def_mask;
188         } else {
189                 mask = item->mask;
190         }
191
192         spec = item->spec;
193         last = item->last;
194
195         if (spec == NULL)
196                 goto exit;
197
198         /*
199          * If field values in "last" are either 0 or equal to the corresponding
200          * values in "spec" then they are ignored
201          */
202         if (last != NULL &&
203             !sfc_flow_is_zero(last, size) &&
204             memcmp(last, spec, size) != 0) {
205                 rte_flow_error_set(error, ENOTSUP,
206                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
207                                    "Ranging is not supported");
208                 return -rte_errno;
209         }
210
211         if (supp_mask == NULL) {
212                 rte_flow_error_set(error, EINVAL,
213                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
214                         "Supported mask for item should be specified");
215                 return -rte_errno;
216         }
217
218         /* Check that mask does not ask for more match than supp_mask */
219         for (i = 0; i < size; i++) {
220                 supp = ((const uint8_t *)supp_mask)[i];
221
222                 if (~supp & mask[i]) {
223                         rte_flow_error_set(error, ENOTSUP,
224                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
225                                            "Item's field is not supported");
226                         return -rte_errno;
227                 }
228         }
229
230 exit:
231         *spec_ptr = spec;
232         *mask_ptr = mask;
233         return 0;
234 }
235
236 /*
237  * Protocol parsers.
238  * Masking is not supported, so masks in items should be either
239  * full or empty (zeroed) and set only for supported fields which
240  * are specified in the supp_mask.
241  */
242
243 static int
244 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
245                     __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
246                     __rte_unused struct rte_flow_error *error)
247 {
248         return 0;
249 }
250
251 /**
252  * Convert Ethernet item to EFX filter specification.
253  *
254  * @param item[in]
255  *   Item specification. Outer frame specification may only comprise
256  *   source/destination addresses and Ethertype field.
257  *   Inner frame specification may contain destination address only.
258  *   There is support for individual/group mask as well as for empty and full.
259  *   If the mask is NULL, default mask will be used. Ranging is not supported.
260  * @param efx_spec[in, out]
261  *   EFX filter specification to update.
262  * @param[out] error
263  *   Perform verbose error reporting if not NULL.
264  */
265 static int
266 sfc_flow_parse_eth(const struct rte_flow_item *item,
267                    struct sfc_flow_parse_ctx *parse_ctx,
268                    struct rte_flow_error *error)
269 {
270         int rc;
271         efx_filter_spec_t *efx_spec = parse_ctx->filter;
272         const struct rte_flow_item_eth *spec = NULL;
273         const struct rte_flow_item_eth *mask = NULL;
274         const struct rte_flow_item_eth supp_mask = {
275                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
276                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
277                 .type = 0xffff,
278         };
279         const struct rte_flow_item_eth ifrm_supp_mask = {
280                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
281         };
282         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
283                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
284         };
285         const struct rte_flow_item_eth *supp_mask_p;
286         const struct rte_flow_item_eth *def_mask_p;
287         uint8_t *loc_mac = NULL;
288         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
289                 EFX_TUNNEL_PROTOCOL_NONE);
290
291         if (is_ifrm) {
292                 supp_mask_p = &ifrm_supp_mask;
293                 def_mask_p = &ifrm_supp_mask;
294                 loc_mac = efx_spec->efs_ifrm_loc_mac;
295         } else {
296                 supp_mask_p = &supp_mask;
297                 def_mask_p = &rte_flow_item_eth_mask;
298                 loc_mac = efx_spec->efs_loc_mac;
299         }
300
301         rc = sfc_flow_parse_init(item,
302                                  (const void **)&spec,
303                                  (const void **)&mask,
304                                  supp_mask_p, def_mask_p,
305                                  sizeof(struct rte_flow_item_eth),
306                                  error);
307         if (rc != 0)
308                 return rc;
309
310         /* If "spec" is not set, could be any Ethernet */
311         if (spec == NULL)
312                 return 0;
313
314         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
315                 efx_spec->efs_match_flags |= is_ifrm ?
316                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
317                         EFX_FILTER_MATCH_LOC_MAC;
318                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
319                            EFX_MAC_ADDR_LEN);
320         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
321                           EFX_MAC_ADDR_LEN) == 0) {
322                 if (rte_is_unicast_ether_addr(&spec->dst))
323                         efx_spec->efs_match_flags |= is_ifrm ?
324                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
325                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
326                 else
327                         efx_spec->efs_match_flags |= is_ifrm ?
328                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
329                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
330         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
331                 goto fail_bad_mask;
332         }
333
334         /*
335          * ifrm_supp_mask ensures that the source address and
336          * ethertype masks are equal to zero in inner frame,
337          * so these fields are filled in only for the outer frame
338          */
339         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
340                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
341                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
342                            EFX_MAC_ADDR_LEN);
343         } else if (!rte_is_zero_ether_addr(&mask->src)) {
344                 goto fail_bad_mask;
345         }
346
347         /*
348          * Ether type is in big-endian byte order in item and
349          * in little-endian in efx_spec, so byte swap is used
350          */
351         if (mask->type == supp_mask.type) {
352                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
353                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
354         } else if (mask->type != 0) {
355                 goto fail_bad_mask;
356         }
357
358         return 0;
359
360 fail_bad_mask:
361         rte_flow_error_set(error, EINVAL,
362                            RTE_FLOW_ERROR_TYPE_ITEM, item,
363                            "Bad mask in the ETH pattern item");
364         return -rte_errno;
365 }
366
367 /**
368  * Convert VLAN item to EFX filter specification.
369  *
370  * @param item[in]
371  *   Item specification. Only VID field is supported.
372  *   The mask can not be NULL. Ranging is not supported.
373  * @param efx_spec[in, out]
374  *   EFX filter specification to update.
375  * @param[out] error
376  *   Perform verbose error reporting if not NULL.
377  */
378 static int
379 sfc_flow_parse_vlan(const struct rte_flow_item *item,
380                     struct sfc_flow_parse_ctx *parse_ctx,
381                     struct rte_flow_error *error)
382 {
383         int rc;
384         uint16_t vid;
385         efx_filter_spec_t *efx_spec = parse_ctx->filter;
386         const struct rte_flow_item_vlan *spec = NULL;
387         const struct rte_flow_item_vlan *mask = NULL;
388         const struct rte_flow_item_vlan supp_mask = {
389                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
390                 .inner_type = RTE_BE16(0xffff),
391         };
392
393         rc = sfc_flow_parse_init(item,
394                                  (const void **)&spec,
395                                  (const void **)&mask,
396                                  &supp_mask,
397                                  NULL,
398                                  sizeof(struct rte_flow_item_vlan),
399                                  error);
400         if (rc != 0)
401                 return rc;
402
403         /*
404          * VID is in big-endian byte order in item and
405          * in little-endian in efx_spec, so byte swap is used.
406          * If two VLAN items are included, the first matches
407          * the outer tag and the next matches the inner tag.
408          */
409         if (mask->tci == supp_mask.tci) {
410                 /* Apply mask to keep VID only */
411                 vid = rte_bswap16(spec->tci & mask->tci);
412
413                 if (!(efx_spec->efs_match_flags &
414                       EFX_FILTER_MATCH_OUTER_VID)) {
415                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
416                         efx_spec->efs_outer_vid = vid;
417                 } else if (!(efx_spec->efs_match_flags &
418                              EFX_FILTER_MATCH_INNER_VID)) {
419                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
420                         efx_spec->efs_inner_vid = vid;
421                 } else {
422                         rte_flow_error_set(error, EINVAL,
423                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
424                                            "More than two VLAN items");
425                         return -rte_errno;
426                 }
427         } else {
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
430                                    "VLAN ID in TCI match is required");
431                 return -rte_errno;
432         }
433
434         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
435                 rte_flow_error_set(error, EINVAL,
436                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
437                                    "VLAN TPID matching is not supported");
438                 return -rte_errno;
439         }
440         if (mask->inner_type == supp_mask.inner_type) {
441                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
442                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
443         } else if (mask->inner_type) {
444                 rte_flow_error_set(error, EINVAL,
445                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
446                                    "Bad mask for VLAN inner_type");
447                 return -rte_errno;
448         }
449
450         return 0;
451 }
452
453 /**
454  * Convert IPv4 item to EFX filter specification.
455  *
456  * @param item[in]
457  *   Item specification. Only source and destination addresses and
458  *   protocol fields are supported. If the mask is NULL, default
459  *   mask will be used. Ranging is not supported.
460  * @param efx_spec[in, out]
461  *   EFX filter specification to update.
462  * @param[out] error
463  *   Perform verbose error reporting if not NULL.
464  */
465 static int
466 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
467                     struct sfc_flow_parse_ctx *parse_ctx,
468                     struct rte_flow_error *error)
469 {
470         int rc;
471         efx_filter_spec_t *efx_spec = parse_ctx->filter;
472         const struct rte_flow_item_ipv4 *spec = NULL;
473         const struct rte_flow_item_ipv4 *mask = NULL;
474         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
475         const struct rte_flow_item_ipv4 supp_mask = {
476                 .hdr = {
477                         .src_addr = 0xffffffff,
478                         .dst_addr = 0xffffffff,
479                         .next_proto_id = 0xff,
480                 }
481         };
482
483         rc = sfc_flow_parse_init(item,
484                                  (const void **)&spec,
485                                  (const void **)&mask,
486                                  &supp_mask,
487                                  &rte_flow_item_ipv4_mask,
488                                  sizeof(struct rte_flow_item_ipv4),
489                                  error);
490         if (rc != 0)
491                 return rc;
492
493         /*
494          * Filtering by IPv4 source and destination addresses requires
495          * the appropriate ETHER_TYPE in hardware filters
496          */
497         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
498                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
499                 efx_spec->efs_ether_type = ether_type_ipv4;
500         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
501                 rte_flow_error_set(error, EINVAL,
502                         RTE_FLOW_ERROR_TYPE_ITEM, item,
503                         "Ethertype in pattern with IPV4 item should be appropriate");
504                 return -rte_errno;
505         }
506
507         if (spec == NULL)
508                 return 0;
509
510         /*
511          * IPv4 addresses are in big-endian byte order in item and in
512          * efx_spec
513          */
514         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
515                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
516                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
517         } else if (mask->hdr.src_addr != 0) {
518                 goto fail_bad_mask;
519         }
520
521         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
522                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
523                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
524         } else if (mask->hdr.dst_addr != 0) {
525                 goto fail_bad_mask;
526         }
527
528         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
529                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
530                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
531         } else if (mask->hdr.next_proto_id != 0) {
532                 goto fail_bad_mask;
533         }
534
535         return 0;
536
537 fail_bad_mask:
538         rte_flow_error_set(error, EINVAL,
539                            RTE_FLOW_ERROR_TYPE_ITEM, item,
540                            "Bad mask in the IPV4 pattern item");
541         return -rte_errno;
542 }
543
544 /**
545  * Convert IPv6 item to EFX filter specification.
546  *
547  * @param item[in]
548  *   Item specification. Only source and destination addresses and
549  *   next header fields are supported. If the mask is NULL, default
550  *   mask will be used. Ranging is not supported.
551  * @param efx_spec[in, out]
552  *   EFX filter specification to update.
553  * @param[out] error
554  *   Perform verbose error reporting if not NULL.
555  */
556 static int
557 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
558                     struct sfc_flow_parse_ctx *parse_ctx,
559                     struct rte_flow_error *error)
560 {
561         int rc;
562         efx_filter_spec_t *efx_spec = parse_ctx->filter;
563         const struct rte_flow_item_ipv6 *spec = NULL;
564         const struct rte_flow_item_ipv6 *mask = NULL;
565         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
566         const struct rte_flow_item_ipv6 supp_mask = {
567                 .hdr = {
568                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
569                                       0xff, 0xff, 0xff, 0xff,
570                                       0xff, 0xff, 0xff, 0xff,
571                                       0xff, 0xff, 0xff, 0xff },
572                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
573                                       0xff, 0xff, 0xff, 0xff,
574                                       0xff, 0xff, 0xff, 0xff,
575                                       0xff, 0xff, 0xff, 0xff },
576                         .proto = 0xff,
577                 }
578         };
579
580         rc = sfc_flow_parse_init(item,
581                                  (const void **)&spec,
582                                  (const void **)&mask,
583                                  &supp_mask,
584                                  &rte_flow_item_ipv6_mask,
585                                  sizeof(struct rte_flow_item_ipv6),
586                                  error);
587         if (rc != 0)
588                 return rc;
589
590         /*
591          * Filtering by IPv6 source and destination addresses requires
592          * the appropriate ETHER_TYPE in hardware filters
593          */
594         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
595                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
596                 efx_spec->efs_ether_type = ether_type_ipv6;
597         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
598                 rte_flow_error_set(error, EINVAL,
599                         RTE_FLOW_ERROR_TYPE_ITEM, item,
600                         "Ethertype in pattern with IPV6 item should be appropriate");
601                 return -rte_errno;
602         }
603
604         if (spec == NULL)
605                 return 0;
606
607         /*
608          * IPv6 addresses are in big-endian byte order in item and in
609          * efx_spec
610          */
611         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
612                    sizeof(mask->hdr.src_addr)) == 0) {
613                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
614
615                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
616                                  sizeof(spec->hdr.src_addr));
617                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
618                            sizeof(efx_spec->efs_rem_host));
619         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
620                                      sizeof(mask->hdr.src_addr))) {
621                 goto fail_bad_mask;
622         }
623
624         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
625                    sizeof(mask->hdr.dst_addr)) == 0) {
626                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
627
628                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
629                                  sizeof(spec->hdr.dst_addr));
630                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
631                            sizeof(efx_spec->efs_loc_host));
632         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
633                                      sizeof(mask->hdr.dst_addr))) {
634                 goto fail_bad_mask;
635         }
636
637         if (mask->hdr.proto == supp_mask.hdr.proto) {
638                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
639                 efx_spec->efs_ip_proto = spec->hdr.proto;
640         } else if (mask->hdr.proto != 0) {
641                 goto fail_bad_mask;
642         }
643
644         return 0;
645
646 fail_bad_mask:
647         rte_flow_error_set(error, EINVAL,
648                            RTE_FLOW_ERROR_TYPE_ITEM, item,
649                            "Bad mask in the IPV6 pattern item");
650         return -rte_errno;
651 }
652
653 /**
654  * Convert TCP item to EFX filter specification.
655  *
656  * @param item[in]
657  *   Item specification. Only source and destination ports fields
658  *   are supported. If the mask is NULL, default mask will be used.
659  *   Ranging is not supported.
660  * @param efx_spec[in, out]
661  *   EFX filter specification to update.
662  * @param[out] error
663  *   Perform verbose error reporting if not NULL.
664  */
665 static int
666 sfc_flow_parse_tcp(const struct rte_flow_item *item,
667                    struct sfc_flow_parse_ctx *parse_ctx,
668                    struct rte_flow_error *error)
669 {
670         int rc;
671         efx_filter_spec_t *efx_spec = parse_ctx->filter;
672         const struct rte_flow_item_tcp *spec = NULL;
673         const struct rte_flow_item_tcp *mask = NULL;
674         const struct rte_flow_item_tcp supp_mask = {
675                 .hdr = {
676                         .src_port = 0xffff,
677                         .dst_port = 0xffff,
678                 }
679         };
680
681         rc = sfc_flow_parse_init(item,
682                                  (const void **)&spec,
683                                  (const void **)&mask,
684                                  &supp_mask,
685                                  &rte_flow_item_tcp_mask,
686                                  sizeof(struct rte_flow_item_tcp),
687                                  error);
688         if (rc != 0)
689                 return rc;
690
691         /*
692          * Filtering by TCP source and destination ports requires
693          * the appropriate IP_PROTO in hardware filters
694          */
695         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
696                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
697                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
698         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
699                 rte_flow_error_set(error, EINVAL,
700                         RTE_FLOW_ERROR_TYPE_ITEM, item,
701                         "IP proto in pattern with TCP item should be appropriate");
702                 return -rte_errno;
703         }
704
705         if (spec == NULL)
706                 return 0;
707
708         /*
709          * Source and destination ports are in big-endian byte order in item and
710          * in little-endian in efx_spec, so byte swap is used
711          */
712         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
713                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
714                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
715         } else if (mask->hdr.src_port != 0) {
716                 goto fail_bad_mask;
717         }
718
719         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
720                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
721                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
722         } else if (mask->hdr.dst_port != 0) {
723                 goto fail_bad_mask;
724         }
725
726         return 0;
727
728 fail_bad_mask:
729         rte_flow_error_set(error, EINVAL,
730                            RTE_FLOW_ERROR_TYPE_ITEM, item,
731                            "Bad mask in the TCP pattern item");
732         return -rte_errno;
733 }
734
735 /**
736  * Convert UDP item to EFX filter specification.
737  *
738  * @param item[in]
739  *   Item specification. Only source and destination ports fields
740  *   are supported. If the mask is NULL, default mask will be used.
741  *   Ranging is not supported.
742  * @param efx_spec[in, out]
743  *   EFX filter specification to update.
744  * @param[out] error
745  *   Perform verbose error reporting if not NULL.
746  */
747 static int
748 sfc_flow_parse_udp(const struct rte_flow_item *item,
749                    struct sfc_flow_parse_ctx *parse_ctx,
750                    struct rte_flow_error *error)
751 {
752         int rc;
753         efx_filter_spec_t *efx_spec = parse_ctx->filter;
754         const struct rte_flow_item_udp *spec = NULL;
755         const struct rte_flow_item_udp *mask = NULL;
756         const struct rte_flow_item_udp supp_mask = {
757                 .hdr = {
758                         .src_port = 0xffff,
759                         .dst_port = 0xffff,
760                 }
761         };
762
763         rc = sfc_flow_parse_init(item,
764                                  (const void **)&spec,
765                                  (const void **)&mask,
766                                  &supp_mask,
767                                  &rte_flow_item_udp_mask,
768                                  sizeof(struct rte_flow_item_udp),
769                                  error);
770         if (rc != 0)
771                 return rc;
772
773         /*
774          * Filtering by UDP source and destination ports requires
775          * the appropriate IP_PROTO in hardware filters
776          */
777         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
778                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
779                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
780         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
781                 rte_flow_error_set(error, EINVAL,
782                         RTE_FLOW_ERROR_TYPE_ITEM, item,
783                         "IP proto in pattern with UDP item should be appropriate");
784                 return -rte_errno;
785         }
786
787         if (spec == NULL)
788                 return 0;
789
790         /*
791          * Source and destination ports are in big-endian byte order in item and
792          * in little-endian in efx_spec, so byte swap is used
793          */
794         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
795                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
796                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
797         } else if (mask->hdr.src_port != 0) {
798                 goto fail_bad_mask;
799         }
800
801         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
802                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
803                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
804         } else if (mask->hdr.dst_port != 0) {
805                 goto fail_bad_mask;
806         }
807
808         return 0;
809
810 fail_bad_mask:
811         rte_flow_error_set(error, EINVAL,
812                            RTE_FLOW_ERROR_TYPE_ITEM, item,
813                            "Bad mask in the UDP pattern item");
814         return -rte_errno;
815 }
816
817 /*
818  * Filters for encapsulated packets match based on the EtherType and IP
819  * protocol in the outer frame.
820  */
821 static int
822 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
823                                         efx_filter_spec_t *efx_spec,
824                                         uint8_t ip_proto,
825                                         struct rte_flow_error *error)
826 {
827         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
828                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
829                 efx_spec->efs_ip_proto = ip_proto;
830         } else if (efx_spec->efs_ip_proto != ip_proto) {
831                 switch (ip_proto) {
832                 case EFX_IPPROTO_UDP:
833                         rte_flow_error_set(error, EINVAL,
834                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
835                                 "Outer IP header protocol must be UDP "
836                                 "in VxLAN/GENEVE pattern");
837                         return -rte_errno;
838
839                 case EFX_IPPROTO_GRE:
840                         rte_flow_error_set(error, EINVAL,
841                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
842                                 "Outer IP header protocol must be GRE "
843                                 "in NVGRE pattern");
844                         return -rte_errno;
845
846                 default:
847                         rte_flow_error_set(error, EINVAL,
848                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
849                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
850                                 "are supported");
851                         return -rte_errno;
852                 }
853         }
854
855         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
856             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
857             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
858                 rte_flow_error_set(error, EINVAL,
859                         RTE_FLOW_ERROR_TYPE_ITEM, item,
860                         "Outer frame EtherType in pattern with tunneling "
861                         "must be IPv4 or IPv6");
862                 return -rte_errno;
863         }
864
865         return 0;
866 }
867
868 static int
869 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
870                                   const uint8_t *vni_or_vsid_val,
871                                   const uint8_t *vni_or_vsid_mask,
872                                   const struct rte_flow_item *item,
873                                   struct rte_flow_error *error)
874 {
875         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
876                 0xff, 0xff, 0xff
877         };
878
879         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
880                    EFX_VNI_OR_VSID_LEN) == 0) {
881                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
882                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
883                            EFX_VNI_OR_VSID_LEN);
884         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
885                 rte_flow_error_set(error, EINVAL,
886                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
887                                    "Unsupported VNI/VSID mask");
888                 return -rte_errno;
889         }
890
891         return 0;
892 }
893
894 /**
895  * Convert VXLAN item to EFX filter specification.
896  *
897  * @param item[in]
898  *   Item specification. Only VXLAN network identifier field is supported.
899  *   If the mask is NULL, default mask will be used.
900  *   Ranging is not supported.
901  * @param efx_spec[in, out]
902  *   EFX filter specification to update.
903  * @param[out] error
904  *   Perform verbose error reporting if not NULL.
905  */
906 static int
907 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
908                      struct sfc_flow_parse_ctx *parse_ctx,
909                      struct rte_flow_error *error)
910 {
911         int rc;
912         efx_filter_spec_t *efx_spec = parse_ctx->filter;
913         const struct rte_flow_item_vxlan *spec = NULL;
914         const struct rte_flow_item_vxlan *mask = NULL;
915         const struct rte_flow_item_vxlan supp_mask = {
916                 .vni = { 0xff, 0xff, 0xff }
917         };
918
919         rc = sfc_flow_parse_init(item,
920                                  (const void **)&spec,
921                                  (const void **)&mask,
922                                  &supp_mask,
923                                  &rte_flow_item_vxlan_mask,
924                                  sizeof(struct rte_flow_item_vxlan),
925                                  error);
926         if (rc != 0)
927                 return rc;
928
929         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
930                                                      EFX_IPPROTO_UDP, error);
931         if (rc != 0)
932                 return rc;
933
934         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
935         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
936
937         if (spec == NULL)
938                 return 0;
939
940         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
941                                                mask->vni, item, error);
942
943         return rc;
944 }
945
946 /**
947  * Convert GENEVE item to EFX filter specification.
948  *
949  * @param item[in]
950  *   Item specification. Only Virtual Network Identifier and protocol type
951  *   fields are supported. But protocol type can be only Ethernet (0x6558).
952  *   If the mask is NULL, default mask will be used.
953  *   Ranging is not supported.
954  * @param efx_spec[in, out]
955  *   EFX filter specification to update.
956  * @param[out] error
957  *   Perform verbose error reporting if not NULL.
958  */
959 static int
960 sfc_flow_parse_geneve(const struct rte_flow_item *item,
961                       struct sfc_flow_parse_ctx *parse_ctx,
962                       struct rte_flow_error *error)
963 {
964         int rc;
965         efx_filter_spec_t *efx_spec = parse_ctx->filter;
966         const struct rte_flow_item_geneve *spec = NULL;
967         const struct rte_flow_item_geneve *mask = NULL;
968         const struct rte_flow_item_geneve supp_mask = {
969                 .protocol = RTE_BE16(0xffff),
970                 .vni = { 0xff, 0xff, 0xff }
971         };
972
973         rc = sfc_flow_parse_init(item,
974                                  (const void **)&spec,
975                                  (const void **)&mask,
976                                  &supp_mask,
977                                  &rte_flow_item_geneve_mask,
978                                  sizeof(struct rte_flow_item_geneve),
979                                  error);
980         if (rc != 0)
981                 return rc;
982
983         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
984                                                      EFX_IPPROTO_UDP, error);
985         if (rc != 0)
986                 return rc;
987
988         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
989         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
990
991         if (spec == NULL)
992                 return 0;
993
994         if (mask->protocol == supp_mask.protocol) {
995                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
996                         rte_flow_error_set(error, EINVAL,
997                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
998                                 "GENEVE encap. protocol must be Ethernet "
999                                 "(0x6558) in the GENEVE pattern item");
1000                         return -rte_errno;
1001                 }
1002         } else if (mask->protocol != 0) {
1003                 rte_flow_error_set(error, EINVAL,
1004                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1005                         "Unsupported mask for GENEVE encap. protocol");
1006                 return -rte_errno;
1007         }
1008
1009         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1010                                                mask->vni, item, error);
1011
1012         return rc;
1013 }
1014
1015 /**
1016  * Convert NVGRE item to EFX filter specification.
1017  *
1018  * @param item[in]
1019  *   Item specification. Only virtual subnet ID field is supported.
1020  *   If the mask is NULL, default mask will be used.
1021  *   Ranging is not supported.
1022  * @param efx_spec[in, out]
1023  *   EFX filter specification to update.
1024  * @param[out] error
1025  *   Perform verbose error reporting if not NULL.
1026  */
1027 static int
1028 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1029                      struct sfc_flow_parse_ctx *parse_ctx,
1030                      struct rte_flow_error *error)
1031 {
1032         int rc;
1033         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1034         const struct rte_flow_item_nvgre *spec = NULL;
1035         const struct rte_flow_item_nvgre *mask = NULL;
1036         const struct rte_flow_item_nvgre supp_mask = {
1037                 .tni = { 0xff, 0xff, 0xff }
1038         };
1039
1040         rc = sfc_flow_parse_init(item,
1041                                  (const void **)&spec,
1042                                  (const void **)&mask,
1043                                  &supp_mask,
1044                                  &rte_flow_item_nvgre_mask,
1045                                  sizeof(struct rte_flow_item_nvgre),
1046                                  error);
1047         if (rc != 0)
1048                 return rc;
1049
1050         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1051                                                      EFX_IPPROTO_GRE, error);
1052         if (rc != 0)
1053                 return rc;
1054
1055         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1056         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1057
1058         if (spec == NULL)
1059                 return 0;
1060
1061         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1062                                                mask->tni, item, error);
1063
1064         return rc;
1065 }
1066
1067 /**
1068  * Convert PPPoEx item to EFX filter specification.
1069  *
1070  * @param item[in]
1071  *   Item specification.
1072  *   Matching on PPPoEx fields is not supported.
1073  *   This item can only be used to set or validate the EtherType filter.
1074  *   Only zero masks are allowed.
1075  *   Ranging is not supported.
1076  * @param efx_spec[in, out]
1077  *   EFX filter specification to update.
1078  * @param[out] error
1079  *   Perform verbose error reporting if not NULL.
1080  */
1081 static int
1082 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1083                       struct sfc_flow_parse_ctx *parse_ctx,
1084                       struct rte_flow_error *error)
1085 {
1086         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1087         const struct rte_flow_item_pppoe *spec = NULL;
1088         const struct rte_flow_item_pppoe *mask = NULL;
1089         const struct rte_flow_item_pppoe supp_mask = {};
1090         const struct rte_flow_item_pppoe def_mask = {};
1091         uint16_t ether_type;
1092         int rc;
1093
1094         rc = sfc_flow_parse_init(item,
1095                                  (const void **)&spec,
1096                                  (const void **)&mask,
1097                                  &supp_mask,
1098                                  &def_mask,
1099                                  sizeof(struct rte_flow_item_pppoe),
1100                                  error);
1101         if (rc != 0)
1102                 return rc;
1103
1104         if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1105                 ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1106         else
1107                 ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1108
1109         if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1110                 if (efx_spec->efs_ether_type != ether_type) {
1111                         rte_flow_error_set(error, EINVAL,
1112                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
1113                                            "Invalid EtherType for a PPPoE flow item");
1114                         return -rte_errno;
1115                 }
1116         } else {
1117                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1118                 efx_spec->efs_ether_type = ether_type;
1119         }
1120
1121         return 0;
1122 }
1123
1124 static const struct sfc_flow_item sfc_flow_items[] = {
1125         {
1126                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1127                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1128                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1129                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1130                 .parse = sfc_flow_parse_void,
1131         },
1132         {
1133                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1134                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1135                 .layer = SFC_FLOW_ITEM_L2,
1136                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1137                 .parse = sfc_flow_parse_eth,
1138         },
1139         {
1140                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1141                 .prev_layer = SFC_FLOW_ITEM_L2,
1142                 .layer = SFC_FLOW_ITEM_L2,
1143                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1144                 .parse = sfc_flow_parse_vlan,
1145         },
1146         {
1147                 .type = RTE_FLOW_ITEM_TYPE_PPPOED,
1148                 .prev_layer = SFC_FLOW_ITEM_L2,
1149                 .layer = SFC_FLOW_ITEM_L2,
1150                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1151                 .parse = sfc_flow_parse_pppoex,
1152         },
1153         {
1154                 .type = RTE_FLOW_ITEM_TYPE_PPPOES,
1155                 .prev_layer = SFC_FLOW_ITEM_L2,
1156                 .layer = SFC_FLOW_ITEM_L2,
1157                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1158                 .parse = sfc_flow_parse_pppoex,
1159         },
1160         {
1161                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1162                 .prev_layer = SFC_FLOW_ITEM_L2,
1163                 .layer = SFC_FLOW_ITEM_L3,
1164                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1165                 .parse = sfc_flow_parse_ipv4,
1166         },
1167         {
1168                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1169                 .prev_layer = SFC_FLOW_ITEM_L2,
1170                 .layer = SFC_FLOW_ITEM_L3,
1171                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1172                 .parse = sfc_flow_parse_ipv6,
1173         },
1174         {
1175                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1176                 .prev_layer = SFC_FLOW_ITEM_L3,
1177                 .layer = SFC_FLOW_ITEM_L4,
1178                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1179                 .parse = sfc_flow_parse_tcp,
1180         },
1181         {
1182                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1183                 .prev_layer = SFC_FLOW_ITEM_L3,
1184                 .layer = SFC_FLOW_ITEM_L4,
1185                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1186                 .parse = sfc_flow_parse_udp,
1187         },
1188         {
1189                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1190                 .prev_layer = SFC_FLOW_ITEM_L4,
1191                 .layer = SFC_FLOW_ITEM_START_LAYER,
1192                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1193                 .parse = sfc_flow_parse_vxlan,
1194         },
1195         {
1196                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1197                 .prev_layer = SFC_FLOW_ITEM_L4,
1198                 .layer = SFC_FLOW_ITEM_START_LAYER,
1199                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1200                 .parse = sfc_flow_parse_geneve,
1201         },
1202         {
1203                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1204                 .prev_layer = SFC_FLOW_ITEM_L3,
1205                 .layer = SFC_FLOW_ITEM_START_LAYER,
1206                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1207                 .parse = sfc_flow_parse_nvgre,
1208         },
1209 };
1210
1211 /*
1212  * Protocol-independent flow API support
1213  */
1214 static int
1215 sfc_flow_parse_attr(struct sfc_adapter *sa,
1216                     const struct rte_flow_attr *attr,
1217                     struct rte_flow *flow,
1218                     struct rte_flow_error *error)
1219 {
1220         struct sfc_flow_spec *spec = &flow->spec;
1221         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1222         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1223         struct sfc_mae *mae = &sa->mae;
1224
1225         if (attr == NULL) {
1226                 rte_flow_error_set(error, EINVAL,
1227                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1228                                    "NULL attribute");
1229                 return -rte_errno;
1230         }
1231         if (attr->group != 0) {
1232                 rte_flow_error_set(error, ENOTSUP,
1233                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1234                                    "Groups are not supported");
1235                 return -rte_errno;
1236         }
1237         if (attr->egress != 0) {
1238                 rte_flow_error_set(error, ENOTSUP,
1239                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1240                                    "Egress is not supported");
1241                 return -rte_errno;
1242         }
1243         if (attr->ingress == 0) {
1244                 rte_flow_error_set(error, ENOTSUP,
1245                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1246                                    "Ingress is compulsory");
1247                 return -rte_errno;
1248         }
1249         if (attr->transfer == 0) {
1250                 if (attr->priority != 0) {
1251                         rte_flow_error_set(error, ENOTSUP,
1252                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1253                                            attr, "Priorities are unsupported");
1254                         return -rte_errno;
1255                 }
1256                 spec->type = SFC_FLOW_SPEC_FILTER;
1257                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1258                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1259                 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1260         } else {
1261                 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
1262                         rte_flow_error_set(error, ENOTSUP,
1263                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1264                                            attr, "Transfer is not supported");
1265                         return -rte_errno;
1266                 }
1267                 if (attr->priority > mae->nb_action_rule_prios_max) {
1268                         rte_flow_error_set(error, ENOTSUP,
1269                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1270                                            attr, "Unsupported priority level");
1271                         return -rte_errno;
1272                 }
1273                 spec->type = SFC_FLOW_SPEC_MAE;
1274                 spec_mae->priority = attr->priority;
1275                 spec_mae->match_spec = NULL;
1276                 spec_mae->action_set = NULL;
1277                 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1278         }
1279
1280         return 0;
1281 }
1282
1283 /* Get item from array sfc_flow_items */
1284 static const struct sfc_flow_item *
1285 sfc_flow_get_item(const struct sfc_flow_item *items,
1286                   unsigned int nb_items,
1287                   enum rte_flow_item_type type)
1288 {
1289         unsigned int i;
1290
1291         for (i = 0; i < nb_items; i++)
1292                 if (items[i].type == type)
1293                         return &items[i];
1294
1295         return NULL;
1296 }
1297
1298 int
1299 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1300                        unsigned int nb_flow_items,
1301                        const struct rte_flow_item pattern[],
1302                        struct sfc_flow_parse_ctx *parse_ctx,
1303                        struct rte_flow_error *error)
1304 {
1305         int rc;
1306         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1307         boolean_t is_ifrm = B_FALSE;
1308         const struct sfc_flow_item *item;
1309
1310         if (pattern == NULL) {
1311                 rte_flow_error_set(error, EINVAL,
1312                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1313                                    "NULL pattern");
1314                 return -rte_errno;
1315         }
1316
1317         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1318                 item = sfc_flow_get_item(flow_items, nb_flow_items,
1319                                          pattern->type);
1320                 if (item == NULL) {
1321                         rte_flow_error_set(error, ENOTSUP,
1322                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1323                                            "Unsupported pattern item");
1324                         return -rte_errno;
1325                 }
1326
1327                 /*
1328                  * Omitting one or several protocol layers at the beginning
1329                  * of pattern is supported
1330                  */
1331                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1332                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1333                     item->prev_layer != prev_layer) {
1334                         rte_flow_error_set(error, ENOTSUP,
1335                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1336                                            "Unexpected sequence of pattern items");
1337                         return -rte_errno;
1338                 }
1339
1340                 /*
1341                  * Allow only VOID and ETH pattern items in the inner frame.
1342                  * Also check that there is only one tunneling protocol.
1343                  */
1344                 switch (item->type) {
1345                 case RTE_FLOW_ITEM_TYPE_VOID:
1346                 case RTE_FLOW_ITEM_TYPE_ETH:
1347                         break;
1348
1349                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1350                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1351                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1352                         if (is_ifrm) {
1353                                 rte_flow_error_set(error, EINVAL,
1354                                         RTE_FLOW_ERROR_TYPE_ITEM,
1355                                         pattern,
1356                                         "More than one tunneling protocol");
1357                                 return -rte_errno;
1358                         }
1359                         is_ifrm = B_TRUE;
1360                         break;
1361
1362                 default:
1363                         if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1364                             is_ifrm) {
1365                                 rte_flow_error_set(error, EINVAL,
1366                                         RTE_FLOW_ERROR_TYPE_ITEM,
1367                                         pattern,
1368                                         "There is an unsupported pattern item "
1369                                         "in the inner frame");
1370                                 return -rte_errno;
1371                         }
1372                         break;
1373                 }
1374
1375                 if (parse_ctx->type != item->ctx_type) {
1376                         rte_flow_error_set(error, EINVAL,
1377                                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1378                                         "Parse context type mismatch");
1379                         return -rte_errno;
1380                 }
1381
1382                 rc = item->parse(pattern, parse_ctx, error);
1383                 if (rc != 0)
1384                         return rc;
1385
1386                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1387                         prev_layer = item->layer;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 sfc_flow_parse_queue(struct sfc_adapter *sa,
1395                      const struct rte_flow_action_queue *queue,
1396                      struct rte_flow *flow)
1397 {
1398         struct sfc_flow_spec *spec = &flow->spec;
1399         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1400         struct sfc_rxq *rxq;
1401         struct sfc_rxq_info *rxq_info;
1402
1403         if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1404                 return -EINVAL;
1405
1406         rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1407         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1408
1409         rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1410         spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1411                                             SFC_RXQ_FLAG_RSS_HASH);
1412
1413         return 0;
1414 }
1415
1416 static int
1417 sfc_flow_parse_rss(struct sfc_adapter *sa,
1418                    const struct rte_flow_action_rss *action_rss,
1419                    struct rte_flow *flow)
1420 {
1421         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1422         struct sfc_rss *rss = &sas->rss;
1423         sfc_ethdev_qid_t ethdev_qid;
1424         struct sfc_rxq *rxq;
1425         unsigned int rxq_hw_index_min;
1426         unsigned int rxq_hw_index_max;
1427         efx_rx_hash_type_t efx_hash_types;
1428         const uint8_t *rss_key;
1429         struct sfc_flow_spec *spec = &flow->spec;
1430         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1431         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1432         unsigned int i;
1433
1434         if (action_rss->queue_num == 0)
1435                 return -EINVAL;
1436
1437         ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
1438         rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1439         rxq_hw_index_min = rxq->hw_index;
1440         rxq_hw_index_max = 0;
1441
1442         for (i = 0; i < action_rss->queue_num; ++i) {
1443                 ethdev_qid = action_rss->queue[i];
1444
1445                 if ((unsigned int)ethdev_qid >=
1446                     sfc_sa2shared(sa)->ethdev_rxq_count)
1447                         return -EINVAL;
1448
1449                 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1450
1451                 if (rxq->hw_index < rxq_hw_index_min)
1452                         rxq_hw_index_min = rxq->hw_index;
1453
1454                 if (rxq->hw_index > rxq_hw_index_max)
1455                         rxq_hw_index_max = rxq->hw_index;
1456         }
1457
1458         switch (action_rss->func) {
1459         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1460         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1461                 break;
1462         default:
1463                 return -EINVAL;
1464         }
1465
1466         if (action_rss->level)
1467                 return -EINVAL;
1468
1469         /*
1470          * Dummy RSS action with only one queue and no specific settings
1471          * for hash types and key does not require dedicated RSS context
1472          * and may be simplified to single queue action.
1473          */
1474         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1475             action_rss->key_len == 0) {
1476                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1477                 return 0;
1478         }
1479
1480         if (action_rss->types) {
1481                 int rc;
1482
1483                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1484                                           &efx_hash_types);
1485                 if (rc != 0)
1486                         return -rc;
1487         } else {
1488                 unsigned int i;
1489
1490                 efx_hash_types = 0;
1491                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1492                         efx_hash_types |= rss->hf_map[i].efx;
1493         }
1494
1495         if (action_rss->key_len) {
1496                 if (action_rss->key_len != sizeof(rss->key))
1497                         return -EINVAL;
1498
1499                 rss_key = action_rss->key;
1500         } else {
1501                 rss_key = rss->key;
1502         }
1503
1504         spec_filter->rss = B_TRUE;
1505
1506         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1507         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1508         sfc_rss_conf->rss_hash_types = efx_hash_types;
1509         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1510
1511         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1512                 unsigned int nb_queues = action_rss->queue_num;
1513                 struct sfc_rxq *rxq;
1514
1515                 ethdev_qid = action_rss->queue[i % nb_queues];
1516                 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1517                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1518         }
1519
1520         return 0;
1521 }
1522
1523 static int
1524 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1525                     unsigned int filters_count)
1526 {
1527         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1528         unsigned int i;
1529         int ret = 0;
1530
1531         for (i = 0; i < filters_count; i++) {
1532                 int rc;
1533
1534                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1535                 if (ret == 0 && rc != 0) {
1536                         sfc_err(sa, "failed to remove filter specification "
1537                                 "(rc = %d)", rc);
1538                         ret = rc;
1539                 }
1540         }
1541
1542         return ret;
1543 }
1544
1545 static int
1546 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1547 {
1548         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1549         unsigned int i;
1550         int rc = 0;
1551
1552         for (i = 0; i < spec_filter->count; i++) {
1553                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1554                 if (rc != 0) {
1555                         sfc_flow_spec_flush(sa, spec, i);
1556                         break;
1557                 }
1558         }
1559
1560         return rc;
1561 }
1562
1563 static int
1564 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1565 {
1566         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1567
1568         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1569 }
1570
1571 static int
1572 sfc_flow_filter_insert(struct sfc_adapter *sa,
1573                        struct rte_flow *flow)
1574 {
1575         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1576         struct sfc_rss *rss = &sas->rss;
1577         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1578         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1579         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1580         boolean_t create_context;
1581         unsigned int i;
1582         int rc = 0;
1583
1584         create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1585                         rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1586
1587         if (create_context) {
1588                 unsigned int rss_spread;
1589                 unsigned int rss_hash_types;
1590                 uint8_t *rss_key;
1591
1592                 if (spec_filter->rss) {
1593                         rss_spread = MIN(flow_rss->rxq_hw_index_max -
1594                                         flow_rss->rxq_hw_index_min + 1,
1595                                         EFX_MAXRSS);
1596                         rss_hash_types = flow_rss->rss_hash_types;
1597                         rss_key = flow_rss->rss_key;
1598                 } else {
1599                         /*
1600                          * Initialize dummy RSS context parameters to have
1601                          * valid RSS hash. Use default RSS hash function and
1602                          * key.
1603                          */
1604                         rss_spread = 1;
1605                         rss_hash_types = rss->hash_types;
1606                         rss_key = rss->key;
1607                 }
1608
1609                 rc = efx_rx_scale_context_alloc(sa->nic,
1610                                                 EFX_RX_SCALE_EXCLUSIVE,
1611                                                 rss_spread,
1612                                                 &efs_rss_context);
1613                 if (rc != 0)
1614                         goto fail_scale_context_alloc;
1615
1616                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1617                                            rss->hash_alg,
1618                                            rss_hash_types, B_TRUE);
1619                 if (rc != 0)
1620                         goto fail_scale_mode_set;
1621
1622                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1623                                           rss_key, sizeof(rss->key));
1624                 if (rc != 0)
1625                         goto fail_scale_key_set;
1626         } else {
1627                 efs_rss_context = rss->dummy_rss_context;
1628         }
1629
1630         if (spec_filter->rss || spec_filter->rss_hash_required) {
1631                 /*
1632                  * At this point, fully elaborated filter specifications
1633                  * have been produced from the template. To make sure that
1634                  * RSS behaviour is consistent between them, set the same
1635                  * RSS context value everywhere.
1636                  */
1637                 for (i = 0; i < spec_filter->count; i++) {
1638                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1639
1640                         spec->efs_rss_context = efs_rss_context;
1641                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1642                         if (spec_filter->rss)
1643                                 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1644                 }
1645         }
1646
1647         rc = sfc_flow_spec_insert(sa, &flow->spec);
1648         if (rc != 0)
1649                 goto fail_filter_insert;
1650
1651         if (create_context) {
1652                 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1653                 unsigned int *tbl;
1654
1655                 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1656
1657                 /*
1658                  * Scale table is set after filter insertion because
1659                  * the table entries are relative to the base RxQ ID
1660                  * and the latter is submitted to the HW by means of
1661                  * inserting a filter, so by the time of the request
1662                  * the HW knows all the information needed to verify
1663                  * the table entries, and the operation will succeed
1664                  */
1665                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1666                                           tbl, RTE_DIM(flow_rss->rss_tbl));
1667                 if (rc != 0)
1668                         goto fail_scale_tbl_set;
1669
1670                 /* Remember created dummy RSS context */
1671                 if (!spec_filter->rss)
1672                         rss->dummy_rss_context = efs_rss_context;
1673         }
1674
1675         return 0;
1676
1677 fail_scale_tbl_set:
1678         sfc_flow_spec_remove(sa, &flow->spec);
1679
1680 fail_filter_insert:
1681 fail_scale_key_set:
1682 fail_scale_mode_set:
1683         if (create_context)
1684                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1685
1686 fail_scale_context_alloc:
1687         return rc;
1688 }
1689
1690 static int
1691 sfc_flow_filter_remove(struct sfc_adapter *sa,
1692                        struct rte_flow *flow)
1693 {
1694         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1695         int rc = 0;
1696
1697         rc = sfc_flow_spec_remove(sa, &flow->spec);
1698         if (rc != 0)
1699                 return rc;
1700
1701         if (spec_filter->rss) {
1702                 /*
1703                  * All specifications for a given flow rule have the same RSS
1704                  * context, so that RSS context value is taken from the first
1705                  * filter specification
1706                  */
1707                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1708
1709                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1710         }
1711
1712         return rc;
1713 }
1714
1715 static int
1716 sfc_flow_parse_mark(struct sfc_adapter *sa,
1717                     const struct rte_flow_action_mark *mark,
1718                     struct rte_flow *flow)
1719 {
1720         struct sfc_flow_spec *spec = &flow->spec;
1721         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1722         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1723
1724         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1725                 return EINVAL;
1726
1727         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1728         spec_filter->template.efs_mark = mark->id;
1729
1730         return 0;
1731 }
1732
1733 static int
1734 sfc_flow_parse_actions(struct sfc_adapter *sa,
1735                        const struct rte_flow_action actions[],
1736                        struct rte_flow *flow,
1737                        struct rte_flow_error *error)
1738 {
1739         int rc;
1740         struct sfc_flow_spec *spec = &flow->spec;
1741         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1742         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1743         uint32_t actions_set = 0;
1744         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1745                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1746                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1747         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1748                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1749
1750         if (actions == NULL) {
1751                 rte_flow_error_set(error, EINVAL,
1752                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1753                                    "NULL actions");
1754                 return -rte_errno;
1755         }
1756
1757         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1758                 switch (actions->type) {
1759                 case RTE_FLOW_ACTION_TYPE_VOID:
1760                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1761                                                actions_set);
1762                         break;
1763
1764                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1765                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1766                                                actions_set);
1767                         if ((actions_set & fate_actions_mask) != 0)
1768                                 goto fail_fate_actions;
1769
1770                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1771                         if (rc != 0) {
1772                                 rte_flow_error_set(error, EINVAL,
1773                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1774                                         "Bad QUEUE action");
1775                                 return -rte_errno;
1776                         }
1777                         break;
1778
1779                 case RTE_FLOW_ACTION_TYPE_RSS:
1780                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1781                                                actions_set);
1782                         if ((actions_set & fate_actions_mask) != 0)
1783                                 goto fail_fate_actions;
1784
1785                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1786                         if (rc != 0) {
1787                                 rte_flow_error_set(error, -rc,
1788                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1789                                         "Bad RSS action");
1790                                 return -rte_errno;
1791                         }
1792                         break;
1793
1794                 case RTE_FLOW_ACTION_TYPE_DROP:
1795                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1796                                                actions_set);
1797                         if ((actions_set & fate_actions_mask) != 0)
1798                                 goto fail_fate_actions;
1799
1800                         spec_filter->template.efs_dmaq_id =
1801                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1802                         break;
1803
1804                 case RTE_FLOW_ACTION_TYPE_FLAG:
1805                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1806                                                actions_set);
1807                         if ((actions_set & mark_actions_mask) != 0)
1808                                 goto fail_actions_overlap;
1809
1810                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1811                                 rte_flow_error_set(error, ENOTSUP,
1812                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1813                                         "FLAG action is not supported on the current Rx datapath");
1814                                 return -rte_errno;
1815                         }
1816
1817                         spec_filter->template.efs_flags |=
1818                                 EFX_FILTER_FLAG_ACTION_FLAG;
1819                         break;
1820
1821                 case RTE_FLOW_ACTION_TYPE_MARK:
1822                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1823                                                actions_set);
1824                         if ((actions_set & mark_actions_mask) != 0)
1825                                 goto fail_actions_overlap;
1826
1827                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1828                                 rte_flow_error_set(error, ENOTSUP,
1829                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1830                                         "MARK action is not supported on the current Rx datapath");
1831                                 return -rte_errno;
1832                         }
1833
1834                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1835                         if (rc != 0) {
1836                                 rte_flow_error_set(error, rc,
1837                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1838                                         "Bad MARK action");
1839                                 return -rte_errno;
1840                         }
1841                         break;
1842
1843                 default:
1844                         rte_flow_error_set(error, ENOTSUP,
1845                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1846                                            "Action is not supported");
1847                         return -rte_errno;
1848                 }
1849
1850                 actions_set |= (1UL << actions->type);
1851         }
1852
1853         /* When fate is unknown, drop traffic. */
1854         if ((actions_set & fate_actions_mask) == 0) {
1855                 spec_filter->template.efs_dmaq_id =
1856                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1857         }
1858
1859         return 0;
1860
1861 fail_fate_actions:
1862         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1863                            "Cannot combine several fate-deciding actions, "
1864                            "choose between QUEUE, RSS or DROP");
1865         return -rte_errno;
1866
1867 fail_actions_overlap:
1868         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1869                            "Overlapping actions are not supported");
1870         return -rte_errno;
1871 }
1872
1873 /**
1874  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1875  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1876  * specifications after copying.
1877  *
1878  * @param spec[in, out]
1879  *   SFC flow specification to update.
1880  * @param filters_count_for_one_val[in]
1881  *   How many specifications should have the same match flag, what is the
1882  *   number of specifications before copying.
1883  * @param error[out]
1884  *   Perform verbose error reporting if not NULL.
1885  */
1886 static int
1887 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1888                                unsigned int filters_count_for_one_val,
1889                                struct rte_flow_error *error)
1890 {
1891         unsigned int i;
1892         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1893         static const efx_filter_match_flags_t vals[] = {
1894                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1895                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1896         };
1897
1898         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1899                 rte_flow_error_set(error, EINVAL,
1900                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1901                         "Number of specifications is incorrect while copying "
1902                         "by unknown destination flags");
1903                 return -rte_errno;
1904         }
1905
1906         for (i = 0; i < spec_filter->count; i++) {
1907                 /* The check above ensures that divisor can't be zero here */
1908                 spec_filter->filters[i].efs_match_flags |=
1909                         vals[i / filters_count_for_one_val];
1910         }
1911
1912         return 0;
1913 }
1914
1915 /**
1916  * Check that the following conditions are met:
1917  * - the list of supported filters has a filter
1918  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1919  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1920  *   be inserted.
1921  *
1922  * @param match[in]
1923  *   The match flags of filter.
1924  * @param spec[in]
1925  *   Specification to be supplemented.
1926  * @param filter[in]
1927  *   SFC filter with list of supported filters.
1928  */
1929 static boolean_t
1930 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1931                                  __rte_unused efx_filter_spec_t *spec,
1932                                  struct sfc_filter *filter)
1933 {
1934         unsigned int i;
1935         efx_filter_match_flags_t match_mcast_dst;
1936
1937         match_mcast_dst =
1938                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1939                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1940         for (i = 0; i < filter->supported_match_num; i++) {
1941                 if (match_mcast_dst == filter->supported_match[i])
1942                         return B_TRUE;
1943         }
1944
1945         return B_FALSE;
1946 }
1947
1948 /**
1949  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1950  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1951  * specifications after copying.
1952  *
1953  * @param spec[in, out]
1954  *   SFC flow specification to update.
1955  * @param filters_count_for_one_val[in]
1956  *   How many specifications should have the same EtherType value, what is the
1957  *   number of specifications before copying.
1958  * @param error[out]
1959  *   Perform verbose error reporting if not NULL.
1960  */
1961 static int
1962 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1963                         unsigned int filters_count_for_one_val,
1964                         struct rte_flow_error *error)
1965 {
1966         unsigned int i;
1967         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1968         static const uint16_t vals[] = {
1969                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1970         };
1971
1972         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1973                 rte_flow_error_set(error, EINVAL,
1974                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1975                         "Number of specifications is incorrect "
1976                         "while copying by Ethertype");
1977                 return -rte_errno;
1978         }
1979
1980         for (i = 0; i < spec_filter->count; i++) {
1981                 spec_filter->filters[i].efs_match_flags |=
1982                         EFX_FILTER_MATCH_ETHER_TYPE;
1983
1984                 /*
1985                  * The check above ensures that
1986                  * filters_count_for_one_val is not 0
1987                  */
1988                 spec_filter->filters[i].efs_ether_type =
1989                         vals[i / filters_count_for_one_val];
1990         }
1991
1992         return 0;
1993 }
1994
1995 /**
1996  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1997  * in the same specifications after copying.
1998  *
1999  * @param spec[in, out]
2000  *   SFC flow specification to update.
2001  * @param filters_count_for_one_val[in]
2002  *   How many specifications should have the same match flag, what is the
2003  *   number of specifications before copying.
2004  * @param error[out]
2005  *   Perform verbose error reporting if not NULL.
2006  */
2007 static int
2008 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
2009                             unsigned int filters_count_for_one_val,
2010                             struct rte_flow_error *error)
2011 {
2012         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2013         unsigned int i;
2014
2015         if (filters_count_for_one_val != spec_filter->count) {
2016                 rte_flow_error_set(error, EINVAL,
2017                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2018                         "Number of specifications is incorrect "
2019                         "while copying by outer VLAN ID");
2020                 return -rte_errno;
2021         }
2022
2023         for (i = 0; i < spec_filter->count; i++) {
2024                 spec_filter->filters[i].efs_match_flags |=
2025                         EFX_FILTER_MATCH_OUTER_VID;
2026
2027                 spec_filter->filters[i].efs_outer_vid = 0;
2028         }
2029
2030         return 0;
2031 }
2032
2033 /**
2034  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
2035  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
2036  * specifications after copying.
2037  *
2038  * @param spec[in, out]
2039  *   SFC flow specification to update.
2040  * @param filters_count_for_one_val[in]
2041  *   How many specifications should have the same match flag, what is the
2042  *   number of specifications before copying.
2043  * @param error[out]
2044  *   Perform verbose error reporting if not NULL.
2045  */
2046 static int
2047 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
2048                                     unsigned int filters_count_for_one_val,
2049                                     struct rte_flow_error *error)
2050 {
2051         unsigned int i;
2052         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2053         static const efx_filter_match_flags_t vals[] = {
2054                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2055                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
2056         };
2057
2058         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2059                 rte_flow_error_set(error, EINVAL,
2060                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2061                         "Number of specifications is incorrect while copying "
2062                         "by inner frame unknown destination flags");
2063                 return -rte_errno;
2064         }
2065
2066         for (i = 0; i < spec_filter->count; i++) {
2067                 /* The check above ensures that divisor can't be zero here */
2068                 spec_filter->filters[i].efs_match_flags |=
2069                         vals[i / filters_count_for_one_val];
2070         }
2071
2072         return 0;
2073 }
2074
2075 /**
2076  * Check that the following conditions are met:
2077  * - the specification corresponds to a filter for encapsulated traffic
2078  * - the list of supported filters has a filter
2079  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2080  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2081  *   be inserted.
2082  *
2083  * @param match[in]
2084  *   The match flags of filter.
2085  * @param spec[in]
2086  *   Specification to be supplemented.
2087  * @param filter[in]
2088  *   SFC filter with list of supported filters.
2089  */
2090 static boolean_t
2091 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2092                                       efx_filter_spec_t *spec,
2093                                       struct sfc_filter *filter)
2094 {
2095         unsigned int i;
2096         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2097         efx_filter_match_flags_t match_mcast_dst;
2098
2099         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2100                 return B_FALSE;
2101
2102         match_mcast_dst =
2103                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2104                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2105         for (i = 0; i < filter->supported_match_num; i++) {
2106                 if (match_mcast_dst == filter->supported_match[i])
2107                         return B_TRUE;
2108         }
2109
2110         return B_FALSE;
2111 }
2112
2113 /**
2114  * Check that the list of supported filters has a filter that differs
2115  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2116  * in this case that filter will be used and the flag
2117  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2118  *
2119  * @param match[in]
2120  *   The match flags of filter.
2121  * @param spec[in]
2122  *   Specification to be supplemented.
2123  * @param filter[in]
2124  *   SFC filter with list of supported filters.
2125  */
2126 static boolean_t
2127 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2128                               __rte_unused efx_filter_spec_t *spec,
2129                               struct sfc_filter *filter)
2130 {
2131         unsigned int i;
2132         efx_filter_match_flags_t match_without_vid =
2133                 match & ~EFX_FILTER_MATCH_OUTER_VID;
2134
2135         for (i = 0; i < filter->supported_match_num; i++) {
2136                 if (match_without_vid == filter->supported_match[i])
2137                         return B_FALSE;
2138         }
2139
2140         return B_TRUE;
2141 }
2142
2143 /*
2144  * Match flags that can be automatically added to filters.
2145  * Selecting the last minimum when searching for the copy flag ensures that the
2146  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2147  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2148  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2149  * filters.
2150  */
2151 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2152         {
2153                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2154                 .vals_count = 2,
2155                 .set_vals = sfc_flow_set_unknown_dst_flags,
2156                 .spec_check = sfc_flow_check_unknown_dst_flags,
2157         },
2158         {
2159                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2160                 .vals_count = 2,
2161                 .set_vals = sfc_flow_set_ethertypes,
2162                 .spec_check = NULL,
2163         },
2164         {
2165                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2166                 .vals_count = 2,
2167                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2168                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2169         },
2170         {
2171                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2172                 .vals_count = 1,
2173                 .set_vals = sfc_flow_set_outer_vid_flag,
2174                 .spec_check = sfc_flow_check_outer_vid_flag,
2175         },
2176 };
2177
2178 /* Get item from array sfc_flow_copy_flags */
2179 static const struct sfc_flow_copy_flag *
2180 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2181 {
2182         unsigned int i;
2183
2184         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2185                 if (sfc_flow_copy_flags[i].flag == flag)
2186                         return &sfc_flow_copy_flags[i];
2187         }
2188
2189         return NULL;
2190 }
2191
2192 /**
2193  * Make copies of the specifications, set match flag and values
2194  * of the field that corresponds to it.
2195  *
2196  * @param spec[in, out]
2197  *   SFC flow specification to update.
2198  * @param flag[in]
2199  *   The match flag to add.
2200  * @param error[out]
2201  *   Perform verbose error reporting if not NULL.
2202  */
2203 static int
2204 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2205                              efx_filter_match_flags_t flag,
2206                              struct rte_flow_error *error)
2207 {
2208         unsigned int i;
2209         unsigned int new_filters_count;
2210         unsigned int filters_count_for_one_val;
2211         const struct sfc_flow_copy_flag *copy_flag;
2212         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2213         int rc;
2214
2215         copy_flag = sfc_flow_get_copy_flag(flag);
2216         if (copy_flag == NULL) {
2217                 rte_flow_error_set(error, ENOTSUP,
2218                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2219                                    "Unsupported spec field for copying");
2220                 return -rte_errno;
2221         }
2222
2223         new_filters_count = spec_filter->count * copy_flag->vals_count;
2224         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2225                 rte_flow_error_set(error, EINVAL,
2226                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2227                         "Too much EFX specifications in the flow rule");
2228                 return -rte_errno;
2229         }
2230
2231         /* Copy filters specifications */
2232         for (i = spec_filter->count; i < new_filters_count; i++) {
2233                 spec_filter->filters[i] =
2234                         spec_filter->filters[i - spec_filter->count];
2235         }
2236
2237         filters_count_for_one_val = spec_filter->count;
2238         spec_filter->count = new_filters_count;
2239
2240         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2241         if (rc != 0)
2242                 return rc;
2243
2244         return 0;
2245 }
2246
2247 /**
2248  * Check that the given set of match flags missing in the original filter spec
2249  * could be covered by adding spec copies which specify the corresponding
2250  * flags and packet field values to match.
2251  *
2252  * @param miss_flags[in]
2253  *   Flags that are missing until the supported filter.
2254  * @param spec[in]
2255  *   Specification to be supplemented.
2256  * @param filter[in]
2257  *   SFC filter.
2258  *
2259  * @return
2260  *   Number of specifications after copy or 0, if the flags can not be added.
2261  */
2262 static unsigned int
2263 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2264                              efx_filter_spec_t *spec,
2265                              struct sfc_filter *filter)
2266 {
2267         unsigned int i;
2268         efx_filter_match_flags_t copy_flags = 0;
2269         efx_filter_match_flags_t flag;
2270         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2271         sfc_flow_spec_check *check;
2272         unsigned int multiplier = 1;
2273
2274         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2275                 flag = sfc_flow_copy_flags[i].flag;
2276                 check = sfc_flow_copy_flags[i].spec_check;
2277                 if ((flag & miss_flags) == flag) {
2278                         if (check != NULL && (!check(match, spec, filter)))
2279                                 continue;
2280
2281                         copy_flags |= flag;
2282                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2283                 }
2284         }
2285
2286         if (copy_flags == miss_flags)
2287                 return multiplier;
2288
2289         return 0;
2290 }
2291
2292 /**
2293  * Attempt to supplement the specification template to the minimally
2294  * supported set of match flags. To do this, it is necessary to copy
2295  * the specifications, filling them with the values of fields that
2296  * correspond to the missing flags.
2297  * The necessary and sufficient filter is built from the fewest number
2298  * of copies which could be made to cover the minimally required set
2299  * of flags.
2300  *
2301  * @param sa[in]
2302  *   SFC adapter.
2303  * @param spec[in, out]
2304  *   SFC flow specification to update.
2305  * @param error[out]
2306  *   Perform verbose error reporting if not NULL.
2307  */
2308 static int
2309 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2310                                struct sfc_flow_spec *spec,
2311                                struct rte_flow_error *error)
2312 {
2313         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2314         struct sfc_filter *filter = &sa->filter;
2315         efx_filter_match_flags_t miss_flags;
2316         efx_filter_match_flags_t min_miss_flags = 0;
2317         efx_filter_match_flags_t match;
2318         unsigned int min_multiplier = UINT_MAX;
2319         unsigned int multiplier;
2320         unsigned int i;
2321         int rc;
2322
2323         match = spec_filter->template.efs_match_flags;
2324         for (i = 0; i < filter->supported_match_num; i++) {
2325                 if ((match & filter->supported_match[i]) == match) {
2326                         miss_flags = filter->supported_match[i] & (~match);
2327                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2328                                 &spec_filter->template, filter);
2329                         if (multiplier > 0) {
2330                                 if (multiplier <= min_multiplier) {
2331                                         min_multiplier = multiplier;
2332                                         min_miss_flags = miss_flags;
2333                                 }
2334                         }
2335                 }
2336         }
2337
2338         if (min_multiplier == UINT_MAX) {
2339                 rte_flow_error_set(error, ENOTSUP,
2340                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2341                                    "The flow rule pattern is unsupported");
2342                 return -rte_errno;
2343         }
2344
2345         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2346                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2347
2348                 if ((flag & min_miss_flags) == flag) {
2349                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2350                         if (rc != 0)
2351                                 return rc;
2352                 }
2353         }
2354
2355         return 0;
2356 }
2357
2358 /**
2359  * Check that set of match flags is referred to by a filter. Filter is
2360  * described by match flags with the ability to add OUTER_VID and INNER_VID
2361  * flags.
2362  *
2363  * @param match_flags[in]
2364  *   Set of match flags.
2365  * @param flags_pattern[in]
2366  *   Pattern of filter match flags.
2367  */
2368 static boolean_t
2369 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2370                             efx_filter_match_flags_t flags_pattern)
2371 {
2372         if ((match_flags & flags_pattern) != flags_pattern)
2373                 return B_FALSE;
2374
2375         switch (match_flags & ~flags_pattern) {
2376         case 0:
2377         case EFX_FILTER_MATCH_OUTER_VID:
2378         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2379                 return B_TRUE;
2380         default:
2381                 return B_FALSE;
2382         }
2383 }
2384
2385 /**
2386  * Check whether the spec maps to a hardware filter which is known to be
2387  * ineffective despite being valid.
2388  *
2389  * @param filter[in]
2390  *   SFC filter with list of supported filters.
2391  * @param spec[in]
2392  *   SFC flow specification.
2393  */
2394 static boolean_t
2395 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2396                                   struct sfc_flow_spec *spec)
2397 {
2398         unsigned int i;
2399         uint16_t ether_type;
2400         uint8_t ip_proto;
2401         efx_filter_match_flags_t match_flags;
2402         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2403
2404         for (i = 0; i < spec_filter->count; i++) {
2405                 match_flags = spec_filter->filters[i].efs_match_flags;
2406
2407                 if (sfc_flow_is_match_with_vids(match_flags,
2408                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2409                     sfc_flow_is_match_with_vids(match_flags,
2410                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2411                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2412                         ether_type = spec_filter->filters[i].efs_ether_type;
2413                         if (filter->supports_ip_proto_or_addr_filter &&
2414                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2415                              ether_type == EFX_ETHER_TYPE_IPV6))
2416                                 return B_TRUE;
2417                 } else if (sfc_flow_is_match_with_vids(match_flags,
2418                                 EFX_FILTER_MATCH_ETHER_TYPE |
2419                                 EFX_FILTER_MATCH_IP_PROTO) ||
2420                            sfc_flow_is_match_with_vids(match_flags,
2421                                 EFX_FILTER_MATCH_ETHER_TYPE |
2422                                 EFX_FILTER_MATCH_IP_PROTO |
2423                                 EFX_FILTER_MATCH_LOC_MAC)) {
2424                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2425                         if (filter->supports_rem_or_local_port_filter &&
2426                             (ip_proto == EFX_IPPROTO_TCP ||
2427                              ip_proto == EFX_IPPROTO_UDP))
2428                                 return B_TRUE;
2429                 }
2430         }
2431
2432         return B_FALSE;
2433 }
2434
2435 static int
2436 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2437                               struct rte_flow *flow,
2438                               struct rte_flow_error *error)
2439 {
2440         struct sfc_flow_spec *spec = &flow->spec;
2441         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2442         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2443         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2444         int rc;
2445
2446         /* Initialize the first filter spec with template */
2447         spec_filter->filters[0] = *spec_tmpl;
2448         spec_filter->count = 1;
2449
2450         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2451                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2452                 if (rc != 0)
2453                         return rc;
2454         }
2455
2456         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2457                 rte_flow_error_set(error, ENOTSUP,
2458                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2459                         "The flow rule pattern is unsupported");
2460                 return -rte_errno;
2461         }
2462
2463         return 0;
2464 }
2465
2466 static int
2467 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2468                              const struct rte_flow_item pattern[],
2469                              const struct rte_flow_action actions[],
2470                              struct rte_flow *flow,
2471                              struct rte_flow_error *error)
2472 {
2473         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2474         struct sfc_flow_spec *spec = &flow->spec;
2475         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2476         struct sfc_flow_parse_ctx ctx;
2477         int rc;
2478
2479         ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2480         ctx.filter = &spec_filter->template;
2481
2482         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2483                                     pattern, &ctx, error);
2484         if (rc != 0)
2485                 goto fail_bad_value;
2486
2487         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2488         if (rc != 0)
2489                 goto fail_bad_value;
2490
2491         rc = sfc_flow_validate_match_flags(sa, flow, error);
2492         if (rc != 0)
2493                 goto fail_bad_value;
2494
2495         return 0;
2496
2497 fail_bad_value:
2498         return rc;
2499 }
2500
2501 static int
2502 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2503                           const struct rte_flow_item pattern[],
2504                           const struct rte_flow_action actions[],
2505                           struct rte_flow *flow,
2506                           struct rte_flow_error *error)
2507 {
2508         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2509         struct sfc_flow_spec *spec = &flow->spec;
2510         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2511         int rc;
2512
2513         rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2514         if (rc != 0)
2515                 return rc;
2516
2517         rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2518         if (rc != 0)
2519                 return rc;
2520
2521         return 0;
2522 }
2523
2524 static int
2525 sfc_flow_parse(struct rte_eth_dev *dev,
2526                const struct rte_flow_attr *attr,
2527                const struct rte_flow_item pattern[],
2528                const struct rte_flow_action actions[],
2529                struct rte_flow *flow,
2530                struct rte_flow_error *error)
2531 {
2532         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2533         const struct sfc_flow_ops_by_spec *ops;
2534         int rc;
2535
2536         rc = sfc_flow_parse_attr(sa, attr, flow, error);
2537         if (rc != 0)
2538                 return rc;
2539
2540         ops = sfc_flow_get_ops_by_spec(flow);
2541         if (ops == NULL || ops->parse == NULL) {
2542                 rte_flow_error_set(error, ENOTSUP,
2543                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2544                                    "No backend to handle this flow");
2545                 return -rte_errno;
2546         }
2547
2548         return ops->parse(dev, pattern, actions, flow, error);
2549 }
2550
2551 static struct rte_flow *
2552 sfc_flow_zmalloc(struct rte_flow_error *error)
2553 {
2554         struct rte_flow *flow;
2555
2556         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2557         if (flow == NULL) {
2558                 rte_flow_error_set(error, ENOMEM,
2559                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2560                                    "Failed to allocate memory");
2561         }
2562
2563         return flow;
2564 }
2565
2566 static void
2567 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2568 {
2569         const struct sfc_flow_ops_by_spec *ops;
2570
2571         ops = sfc_flow_get_ops_by_spec(flow);
2572         if (ops != NULL && ops->cleanup != NULL)
2573                 ops->cleanup(sa, flow);
2574
2575         rte_free(flow);
2576 }
2577
2578 static int
2579 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2580                 struct rte_flow_error *error)
2581 {
2582         const struct sfc_flow_ops_by_spec *ops;
2583         int rc;
2584
2585         ops = sfc_flow_get_ops_by_spec(flow);
2586         if (ops == NULL || ops->insert == NULL) {
2587                 rte_flow_error_set(error, ENOTSUP,
2588                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2589                                    "No backend to handle this flow");
2590                 return rte_errno;
2591         }
2592
2593         rc = ops->insert(sa, flow);
2594         if (rc != 0) {
2595                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2596                                    NULL, "Failed to insert the flow rule");
2597         }
2598
2599         return rc;
2600 }
2601
2602 static int
2603 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2604                 struct rte_flow_error *error)
2605 {
2606         const struct sfc_flow_ops_by_spec *ops;
2607         int rc;
2608
2609         ops = sfc_flow_get_ops_by_spec(flow);
2610         if (ops == NULL || ops->remove == NULL) {
2611                 rte_flow_error_set(error, ENOTSUP,
2612                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2613                                    "No backend to handle this flow");
2614                 return rte_errno;
2615         }
2616
2617         rc = ops->remove(sa, flow);
2618         if (rc != 0) {
2619                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2620                                    NULL, "Failed to remove the flow rule");
2621         }
2622
2623         return rc;
2624 }
2625
2626 static int
2627 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2628                 struct rte_flow_error *error)
2629 {
2630         const struct sfc_flow_ops_by_spec *ops;
2631         int rc = 0;
2632
2633         ops = sfc_flow_get_ops_by_spec(flow);
2634         if (ops == NULL) {
2635                 rte_flow_error_set(error, ENOTSUP,
2636                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2637                                    "No backend to handle this flow");
2638                 return -rte_errno;
2639         }
2640
2641         if (ops->verify != NULL) {
2642                 SFC_ASSERT(sfc_adapter_is_locked(sa));
2643                 rc = ops->verify(sa, flow);
2644         }
2645
2646         if (rc != 0) {
2647                 rte_flow_error_set(error, rc,
2648                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2649                         "Failed to verify flow validity with FW");
2650                 return -rte_errno;
2651         }
2652
2653         return 0;
2654 }
2655
2656 static int
2657 sfc_flow_validate(struct rte_eth_dev *dev,
2658                   const struct rte_flow_attr *attr,
2659                   const struct rte_flow_item pattern[],
2660                   const struct rte_flow_action actions[],
2661                   struct rte_flow_error *error)
2662 {
2663         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2664         struct rte_flow *flow;
2665         int rc;
2666
2667         flow = sfc_flow_zmalloc(error);
2668         if (flow == NULL)
2669                 return -rte_errno;
2670
2671         sfc_adapter_lock(sa);
2672
2673         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2674         if (rc == 0)
2675                 rc = sfc_flow_verify(sa, flow, error);
2676
2677         sfc_flow_free(sa, flow);
2678
2679         sfc_adapter_unlock(sa);
2680
2681         return rc;
2682 }
2683
2684 static struct rte_flow *
2685 sfc_flow_create(struct rte_eth_dev *dev,
2686                 const struct rte_flow_attr *attr,
2687                 const struct rte_flow_item pattern[],
2688                 const struct rte_flow_action actions[],
2689                 struct rte_flow_error *error)
2690 {
2691         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2692         struct rte_flow *flow = NULL;
2693         int rc;
2694
2695         flow = sfc_flow_zmalloc(error);
2696         if (flow == NULL)
2697                 goto fail_no_mem;
2698
2699         sfc_adapter_lock(sa);
2700
2701         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2702         if (rc != 0)
2703                 goto fail_bad_value;
2704
2705         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2706
2707         if (sa->state == SFC_ADAPTER_STARTED) {
2708                 rc = sfc_flow_insert(sa, flow, error);
2709                 if (rc != 0)
2710                         goto fail_flow_insert;
2711         }
2712
2713         sfc_adapter_unlock(sa);
2714
2715         return flow;
2716
2717 fail_flow_insert:
2718         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2719
2720 fail_bad_value:
2721         sfc_flow_free(sa, flow);
2722         sfc_adapter_unlock(sa);
2723
2724 fail_no_mem:
2725         return NULL;
2726 }
2727
2728 static int
2729 sfc_flow_destroy(struct rte_eth_dev *dev,
2730                  struct rte_flow *flow,
2731                  struct rte_flow_error *error)
2732 {
2733         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2734         struct rte_flow *flow_ptr;
2735         int rc = EINVAL;
2736
2737         sfc_adapter_lock(sa);
2738
2739         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2740                 if (flow_ptr == flow)
2741                         rc = 0;
2742         }
2743         if (rc != 0) {
2744                 rte_flow_error_set(error, rc,
2745                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2746                                    "Failed to find flow rule to destroy");
2747                 goto fail_bad_value;
2748         }
2749
2750         if (sa->state == SFC_ADAPTER_STARTED)
2751                 rc = sfc_flow_remove(sa, flow, error);
2752
2753         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2754         sfc_flow_free(sa, flow);
2755
2756 fail_bad_value:
2757         sfc_adapter_unlock(sa);
2758
2759         return -rc;
2760 }
2761
2762 static int
2763 sfc_flow_flush(struct rte_eth_dev *dev,
2764                struct rte_flow_error *error)
2765 {
2766         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2767         struct rte_flow *flow;
2768         int ret = 0;
2769
2770         sfc_adapter_lock(sa);
2771
2772         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2773                 if (sa->state == SFC_ADAPTER_STARTED) {
2774                         int rc;
2775
2776                         rc = sfc_flow_remove(sa, flow, error);
2777                         if (rc != 0)
2778                                 ret = rc;
2779                 }
2780
2781                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2782                 sfc_flow_free(sa, flow);
2783         }
2784
2785         sfc_adapter_unlock(sa);
2786
2787         return -ret;
2788 }
2789
2790 static int
2791 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2792                  struct rte_flow_error *error)
2793 {
2794         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2795         int ret = 0;
2796
2797         sfc_adapter_lock(sa);
2798         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2799                 rte_flow_error_set(error, EBUSY,
2800                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2801                                    NULL, "please close the port first");
2802                 ret = -rte_errno;
2803         } else {
2804                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2805         }
2806         sfc_adapter_unlock(sa);
2807
2808         return ret;
2809 }
2810
2811 const struct rte_flow_ops sfc_flow_ops = {
2812         .validate = sfc_flow_validate,
2813         .create = sfc_flow_create,
2814         .destroy = sfc_flow_destroy,
2815         .flush = sfc_flow_flush,
2816         .query = NULL,
2817         .isolate = sfc_flow_isolate,
2818 };
2819
2820 void
2821 sfc_flow_init(struct sfc_adapter *sa)
2822 {
2823         SFC_ASSERT(sfc_adapter_is_locked(sa));
2824
2825         TAILQ_INIT(&sa->flow_list);
2826 }
2827
2828 void
2829 sfc_flow_fini(struct sfc_adapter *sa)
2830 {
2831         struct rte_flow *flow;
2832
2833         SFC_ASSERT(sfc_adapter_is_locked(sa));
2834
2835         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2836                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2837                 sfc_flow_free(sa, flow);
2838         }
2839 }
2840
2841 void
2842 sfc_flow_stop(struct sfc_adapter *sa)
2843 {
2844         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2845         struct sfc_rss *rss = &sas->rss;
2846         struct rte_flow *flow;
2847
2848         SFC_ASSERT(sfc_adapter_is_locked(sa));
2849
2850         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2851                 sfc_flow_remove(sa, flow, NULL);
2852
2853         if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2854                 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2855                 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2856         }
2857 }
2858
2859 int
2860 sfc_flow_start(struct sfc_adapter *sa)
2861 {
2862         struct rte_flow *flow;
2863         int rc = 0;
2864
2865         sfc_log_init(sa, "entry");
2866
2867         SFC_ASSERT(sfc_adapter_is_locked(sa));
2868
2869         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2870                 rc = sfc_flow_insert(sa, flow, NULL);
2871                 if (rc != 0)
2872                         goto fail_bad_flow;
2873         }
2874
2875         sfc_log_init(sa, "done");
2876
2877 fail_bad_flow:
2878         return rc;
2879 }