common/sfc_efx/base: implement Tx control path for Riverhead
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 struct sfc_flow_ops_by_spec {
29         sfc_flow_parse_cb_t     *parse;
30         sfc_flow_insert_cb_t    *insert;
31         sfc_flow_remove_cb_t    *remove;
32 };
33
34 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
35 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
36 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
37
38 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
39         .parse = sfc_flow_parse_rte_to_filter,
40         .insert = sfc_flow_filter_insert,
41         .remove = sfc_flow_filter_remove,
42 };
43
44 static const struct sfc_flow_ops_by_spec *
45 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
46 {
47         struct sfc_flow_spec *spec = &flow->spec;
48         const struct sfc_flow_ops_by_spec *ops = NULL;
49
50         switch (spec->type) {
51         case SFC_FLOW_SPEC_FILTER:
52                 ops = &sfc_flow_ops_filter;
53                 break;
54         default:
55                 SFC_ASSERT(false);
56                 break;
57         }
58
59         return ops;
60 }
61
62 /*
63  * Currently, filter-based (VNIC) flow API is implemented in such a manner
64  * that each flow rule is converted to one or more hardware filters.
65  * All elements of flow rule (attributes, pattern items, actions)
66  * correspond to one or more fields in the efx_filter_spec_s structure
67  * that is responsible for the hardware filter.
68  * If some required field is unset in the flow rule, then a handful
69  * of filter copies will be created to cover all possible values
70  * of such a field.
71  */
72
73 static sfc_flow_item_parse sfc_flow_parse_void;
74 static sfc_flow_item_parse sfc_flow_parse_eth;
75 static sfc_flow_item_parse sfc_flow_parse_vlan;
76 static sfc_flow_item_parse sfc_flow_parse_ipv4;
77 static sfc_flow_item_parse sfc_flow_parse_ipv6;
78 static sfc_flow_item_parse sfc_flow_parse_tcp;
79 static sfc_flow_item_parse sfc_flow_parse_udp;
80 static sfc_flow_item_parse sfc_flow_parse_vxlan;
81 static sfc_flow_item_parse sfc_flow_parse_geneve;
82 static sfc_flow_item_parse sfc_flow_parse_nvgre;
83
84 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
85                                      unsigned int filters_count_for_one_val,
86                                      struct rte_flow_error *error);
87
88 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
89                                         efx_filter_spec_t *spec,
90                                         struct sfc_filter *filter);
91
92 struct sfc_flow_copy_flag {
93         /* EFX filter specification match flag */
94         efx_filter_match_flags_t flag;
95         /* Number of values of corresponding field */
96         unsigned int vals_count;
97         /* Function to set values in specifications */
98         sfc_flow_spec_set_vals *set_vals;
99         /*
100          * Function to check that the specification is suitable
101          * for adding this match flag
102          */
103         sfc_flow_spec_check *spec_check;
104 };
105
106 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
107 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
108 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
109 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
110 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
111 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
112 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
113
114 static boolean_t
115 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
116 {
117         uint8_t sum = 0;
118         unsigned int i;
119
120         for (i = 0; i < size; i++)
121                 sum |= buf[i];
122
123         return (sum == 0) ? B_TRUE : B_FALSE;
124 }
125
126 /*
127  * Validate item and prepare structures spec and mask for parsing
128  */
129 int
130 sfc_flow_parse_init(const struct rte_flow_item *item,
131                     const void **spec_ptr,
132                     const void **mask_ptr,
133                     const void *supp_mask,
134                     const void *def_mask,
135                     unsigned int size,
136                     struct rte_flow_error *error)
137 {
138         const uint8_t *spec;
139         const uint8_t *mask;
140         const uint8_t *last;
141         uint8_t supp;
142         unsigned int i;
143
144         if (item == NULL) {
145                 rte_flow_error_set(error, EINVAL,
146                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
147                                    "NULL item");
148                 return -rte_errno;
149         }
150
151         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
152                 rte_flow_error_set(error, EINVAL,
153                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
154                                    "Mask or last is set without spec");
155                 return -rte_errno;
156         }
157
158         /*
159          * If "mask" is not set, default mask is used,
160          * but if default mask is NULL, "mask" should be set
161          */
162         if (item->mask == NULL) {
163                 if (def_mask == NULL) {
164                         rte_flow_error_set(error, EINVAL,
165                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
166                                 "Mask should be specified");
167                         return -rte_errno;
168                 }
169
170                 mask = def_mask;
171         } else {
172                 mask = item->mask;
173         }
174
175         spec = item->spec;
176         last = item->last;
177
178         if (spec == NULL)
179                 goto exit;
180
181         /*
182          * If field values in "last" are either 0 or equal to the corresponding
183          * values in "spec" then they are ignored
184          */
185         if (last != NULL &&
186             !sfc_flow_is_zero(last, size) &&
187             memcmp(last, spec, size) != 0) {
188                 rte_flow_error_set(error, ENOTSUP,
189                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
190                                    "Ranging is not supported");
191                 return -rte_errno;
192         }
193
194         if (supp_mask == NULL) {
195                 rte_flow_error_set(error, EINVAL,
196                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
197                         "Supported mask for item should be specified");
198                 return -rte_errno;
199         }
200
201         /* Check that mask does not ask for more match than supp_mask */
202         for (i = 0; i < size; i++) {
203                 supp = ((const uint8_t *)supp_mask)[i];
204
205                 if (~supp & mask[i]) {
206                         rte_flow_error_set(error, ENOTSUP,
207                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
208                                            "Item's field is not supported");
209                         return -rte_errno;
210                 }
211         }
212
213 exit:
214         *spec_ptr = spec;
215         *mask_ptr = mask;
216         return 0;
217 }
218
219 /*
220  * Protocol parsers.
221  * Masking is not supported, so masks in items should be either
222  * full or empty (zeroed) and set only for supported fields which
223  * are specified in the supp_mask.
224  */
225
226 static int
227 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
228                     __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
229                     __rte_unused struct rte_flow_error *error)
230 {
231         return 0;
232 }
233
234 /**
235  * Convert Ethernet item to EFX filter specification.
236  *
237  * @param item[in]
238  *   Item specification. Outer frame specification may only comprise
239  *   source/destination addresses and Ethertype field.
240  *   Inner frame specification may contain destination address only.
241  *   There is support for individual/group mask as well as for empty and full.
242  *   If the mask is NULL, default mask will be used. Ranging is not supported.
243  * @param efx_spec[in, out]
244  *   EFX filter specification to update.
245  * @param[out] error
246  *   Perform verbose error reporting if not NULL.
247  */
248 static int
249 sfc_flow_parse_eth(const struct rte_flow_item *item,
250                    struct sfc_flow_parse_ctx *parse_ctx,
251                    struct rte_flow_error *error)
252 {
253         int rc;
254         efx_filter_spec_t *efx_spec = parse_ctx->filter;
255         const struct rte_flow_item_eth *spec = NULL;
256         const struct rte_flow_item_eth *mask = NULL;
257         const struct rte_flow_item_eth supp_mask = {
258                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
259                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
260                 .type = 0xffff,
261         };
262         const struct rte_flow_item_eth ifrm_supp_mask = {
263                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
264         };
265         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
266                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
267         };
268         const struct rte_flow_item_eth *supp_mask_p;
269         const struct rte_flow_item_eth *def_mask_p;
270         uint8_t *loc_mac = NULL;
271         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
272                 EFX_TUNNEL_PROTOCOL_NONE);
273
274         if (is_ifrm) {
275                 supp_mask_p = &ifrm_supp_mask;
276                 def_mask_p = &ifrm_supp_mask;
277                 loc_mac = efx_spec->efs_ifrm_loc_mac;
278         } else {
279                 supp_mask_p = &supp_mask;
280                 def_mask_p = &rte_flow_item_eth_mask;
281                 loc_mac = efx_spec->efs_loc_mac;
282         }
283
284         rc = sfc_flow_parse_init(item,
285                                  (const void **)&spec,
286                                  (const void **)&mask,
287                                  supp_mask_p, def_mask_p,
288                                  sizeof(struct rte_flow_item_eth),
289                                  error);
290         if (rc != 0)
291                 return rc;
292
293         /* If "spec" is not set, could be any Ethernet */
294         if (spec == NULL)
295                 return 0;
296
297         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
298                 efx_spec->efs_match_flags |= is_ifrm ?
299                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
300                         EFX_FILTER_MATCH_LOC_MAC;
301                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
302                            EFX_MAC_ADDR_LEN);
303         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
304                           EFX_MAC_ADDR_LEN) == 0) {
305                 if (rte_is_unicast_ether_addr(&spec->dst))
306                         efx_spec->efs_match_flags |= is_ifrm ?
307                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
308                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
309                 else
310                         efx_spec->efs_match_flags |= is_ifrm ?
311                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
312                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
313         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
314                 goto fail_bad_mask;
315         }
316
317         /*
318          * ifrm_supp_mask ensures that the source address and
319          * ethertype masks are equal to zero in inner frame,
320          * so these fields are filled in only for the outer frame
321          */
322         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
323                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
324                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
325                            EFX_MAC_ADDR_LEN);
326         } else if (!rte_is_zero_ether_addr(&mask->src)) {
327                 goto fail_bad_mask;
328         }
329
330         /*
331          * Ether type is in big-endian byte order in item and
332          * in little-endian in efx_spec, so byte swap is used
333          */
334         if (mask->type == supp_mask.type) {
335                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
336                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
337         } else if (mask->type != 0) {
338                 goto fail_bad_mask;
339         }
340
341         return 0;
342
343 fail_bad_mask:
344         rte_flow_error_set(error, EINVAL,
345                            RTE_FLOW_ERROR_TYPE_ITEM, item,
346                            "Bad mask in the ETH pattern item");
347         return -rte_errno;
348 }
349
350 /**
351  * Convert VLAN item to EFX filter specification.
352  *
353  * @param item[in]
354  *   Item specification. Only VID field is supported.
355  *   The mask can not be NULL. Ranging is not supported.
356  * @param efx_spec[in, out]
357  *   EFX filter specification to update.
358  * @param[out] error
359  *   Perform verbose error reporting if not NULL.
360  */
361 static int
362 sfc_flow_parse_vlan(const struct rte_flow_item *item,
363                     struct sfc_flow_parse_ctx *parse_ctx,
364                     struct rte_flow_error *error)
365 {
366         int rc;
367         uint16_t vid;
368         efx_filter_spec_t *efx_spec = parse_ctx->filter;
369         const struct rte_flow_item_vlan *spec = NULL;
370         const struct rte_flow_item_vlan *mask = NULL;
371         const struct rte_flow_item_vlan supp_mask = {
372                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
373                 .inner_type = RTE_BE16(0xffff),
374         };
375
376         rc = sfc_flow_parse_init(item,
377                                  (const void **)&spec,
378                                  (const void **)&mask,
379                                  &supp_mask,
380                                  NULL,
381                                  sizeof(struct rte_flow_item_vlan),
382                                  error);
383         if (rc != 0)
384                 return rc;
385
386         /*
387          * VID is in big-endian byte order in item and
388          * in little-endian in efx_spec, so byte swap is used.
389          * If two VLAN items are included, the first matches
390          * the outer tag and the next matches the inner tag.
391          */
392         if (mask->tci == supp_mask.tci) {
393                 /* Apply mask to keep VID only */
394                 vid = rte_bswap16(spec->tci & mask->tci);
395
396                 if (!(efx_spec->efs_match_flags &
397                       EFX_FILTER_MATCH_OUTER_VID)) {
398                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
399                         efx_spec->efs_outer_vid = vid;
400                 } else if (!(efx_spec->efs_match_flags &
401                              EFX_FILTER_MATCH_INNER_VID)) {
402                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
403                         efx_spec->efs_inner_vid = vid;
404                 } else {
405                         rte_flow_error_set(error, EINVAL,
406                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
407                                            "More than two VLAN items");
408                         return -rte_errno;
409                 }
410         } else {
411                 rte_flow_error_set(error, EINVAL,
412                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
413                                    "VLAN ID in TCI match is required");
414                 return -rte_errno;
415         }
416
417         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
418                 rte_flow_error_set(error, EINVAL,
419                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
420                                    "VLAN TPID matching is not supported");
421                 return -rte_errno;
422         }
423         if (mask->inner_type == supp_mask.inner_type) {
424                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
425                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
426         } else if (mask->inner_type) {
427                 rte_flow_error_set(error, EINVAL,
428                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
429                                    "Bad mask for VLAN inner_type");
430                 return -rte_errno;
431         }
432
433         return 0;
434 }
435
436 /**
437  * Convert IPv4 item to EFX filter specification.
438  *
439  * @param item[in]
440  *   Item specification. Only source and destination addresses and
441  *   protocol fields are supported. If the mask is NULL, default
442  *   mask will be used. Ranging is not supported.
443  * @param efx_spec[in, out]
444  *   EFX filter specification to update.
445  * @param[out] error
446  *   Perform verbose error reporting if not NULL.
447  */
448 static int
449 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
450                     struct sfc_flow_parse_ctx *parse_ctx,
451                     struct rte_flow_error *error)
452 {
453         int rc;
454         efx_filter_spec_t *efx_spec = parse_ctx->filter;
455         const struct rte_flow_item_ipv4 *spec = NULL;
456         const struct rte_flow_item_ipv4 *mask = NULL;
457         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
458         const struct rte_flow_item_ipv4 supp_mask = {
459                 .hdr = {
460                         .src_addr = 0xffffffff,
461                         .dst_addr = 0xffffffff,
462                         .next_proto_id = 0xff,
463                 }
464         };
465
466         rc = sfc_flow_parse_init(item,
467                                  (const void **)&spec,
468                                  (const void **)&mask,
469                                  &supp_mask,
470                                  &rte_flow_item_ipv4_mask,
471                                  sizeof(struct rte_flow_item_ipv4),
472                                  error);
473         if (rc != 0)
474                 return rc;
475
476         /*
477          * Filtering by IPv4 source and destination addresses requires
478          * the appropriate ETHER_TYPE in hardware filters
479          */
480         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
481                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
482                 efx_spec->efs_ether_type = ether_type_ipv4;
483         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_ITEM, item,
486                         "Ethertype in pattern with IPV4 item should be appropriate");
487                 return -rte_errno;
488         }
489
490         if (spec == NULL)
491                 return 0;
492
493         /*
494          * IPv4 addresses are in big-endian byte order in item and in
495          * efx_spec
496          */
497         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
498                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
499                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
500         } else if (mask->hdr.src_addr != 0) {
501                 goto fail_bad_mask;
502         }
503
504         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
505                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
506                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
507         } else if (mask->hdr.dst_addr != 0) {
508                 goto fail_bad_mask;
509         }
510
511         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
512                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
513                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
514         } else if (mask->hdr.next_proto_id != 0) {
515                 goto fail_bad_mask;
516         }
517
518         return 0;
519
520 fail_bad_mask:
521         rte_flow_error_set(error, EINVAL,
522                            RTE_FLOW_ERROR_TYPE_ITEM, item,
523                            "Bad mask in the IPV4 pattern item");
524         return -rte_errno;
525 }
526
527 /**
528  * Convert IPv6 item to EFX filter specification.
529  *
530  * @param item[in]
531  *   Item specification. Only source and destination addresses and
532  *   next header fields are supported. If the mask is NULL, default
533  *   mask will be used. Ranging is not supported.
534  * @param efx_spec[in, out]
535  *   EFX filter specification to update.
536  * @param[out] error
537  *   Perform verbose error reporting if not NULL.
538  */
539 static int
540 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
541                     struct sfc_flow_parse_ctx *parse_ctx,
542                     struct rte_flow_error *error)
543 {
544         int rc;
545         efx_filter_spec_t *efx_spec = parse_ctx->filter;
546         const struct rte_flow_item_ipv6 *spec = NULL;
547         const struct rte_flow_item_ipv6 *mask = NULL;
548         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
549         const struct rte_flow_item_ipv6 supp_mask = {
550                 .hdr = {
551                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
552                                       0xff, 0xff, 0xff, 0xff,
553                                       0xff, 0xff, 0xff, 0xff,
554                                       0xff, 0xff, 0xff, 0xff },
555                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
556                                       0xff, 0xff, 0xff, 0xff,
557                                       0xff, 0xff, 0xff, 0xff,
558                                       0xff, 0xff, 0xff, 0xff },
559                         .proto = 0xff,
560                 }
561         };
562
563         rc = sfc_flow_parse_init(item,
564                                  (const void **)&spec,
565                                  (const void **)&mask,
566                                  &supp_mask,
567                                  &rte_flow_item_ipv6_mask,
568                                  sizeof(struct rte_flow_item_ipv6),
569                                  error);
570         if (rc != 0)
571                 return rc;
572
573         /*
574          * Filtering by IPv6 source and destination addresses requires
575          * the appropriate ETHER_TYPE in hardware filters
576          */
577         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
578                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
579                 efx_spec->efs_ether_type = ether_type_ipv6;
580         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
581                 rte_flow_error_set(error, EINVAL,
582                         RTE_FLOW_ERROR_TYPE_ITEM, item,
583                         "Ethertype in pattern with IPV6 item should be appropriate");
584                 return -rte_errno;
585         }
586
587         if (spec == NULL)
588                 return 0;
589
590         /*
591          * IPv6 addresses are in big-endian byte order in item and in
592          * efx_spec
593          */
594         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
595                    sizeof(mask->hdr.src_addr)) == 0) {
596                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
597
598                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
599                                  sizeof(spec->hdr.src_addr));
600                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
601                            sizeof(efx_spec->efs_rem_host));
602         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
603                                      sizeof(mask->hdr.src_addr))) {
604                 goto fail_bad_mask;
605         }
606
607         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
608                    sizeof(mask->hdr.dst_addr)) == 0) {
609                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
610
611                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
612                                  sizeof(spec->hdr.dst_addr));
613                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
614                            sizeof(efx_spec->efs_loc_host));
615         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
616                                      sizeof(mask->hdr.dst_addr))) {
617                 goto fail_bad_mask;
618         }
619
620         if (mask->hdr.proto == supp_mask.hdr.proto) {
621                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
622                 efx_spec->efs_ip_proto = spec->hdr.proto;
623         } else if (mask->hdr.proto != 0) {
624                 goto fail_bad_mask;
625         }
626
627         return 0;
628
629 fail_bad_mask:
630         rte_flow_error_set(error, EINVAL,
631                            RTE_FLOW_ERROR_TYPE_ITEM, item,
632                            "Bad mask in the IPV6 pattern item");
633         return -rte_errno;
634 }
635
636 /**
637  * Convert TCP item to EFX filter specification.
638  *
639  * @param item[in]
640  *   Item specification. Only source and destination ports fields
641  *   are supported. If the mask is NULL, default mask will be used.
642  *   Ranging is not supported.
643  * @param efx_spec[in, out]
644  *   EFX filter specification to update.
645  * @param[out] error
646  *   Perform verbose error reporting if not NULL.
647  */
648 static int
649 sfc_flow_parse_tcp(const struct rte_flow_item *item,
650                    struct sfc_flow_parse_ctx *parse_ctx,
651                    struct rte_flow_error *error)
652 {
653         int rc;
654         efx_filter_spec_t *efx_spec = parse_ctx->filter;
655         const struct rte_flow_item_tcp *spec = NULL;
656         const struct rte_flow_item_tcp *mask = NULL;
657         const struct rte_flow_item_tcp supp_mask = {
658                 .hdr = {
659                         .src_port = 0xffff,
660                         .dst_port = 0xffff,
661                 }
662         };
663
664         rc = sfc_flow_parse_init(item,
665                                  (const void **)&spec,
666                                  (const void **)&mask,
667                                  &supp_mask,
668                                  &rte_flow_item_tcp_mask,
669                                  sizeof(struct rte_flow_item_tcp),
670                                  error);
671         if (rc != 0)
672                 return rc;
673
674         /*
675          * Filtering by TCP source and destination ports requires
676          * the appropriate IP_PROTO in hardware filters
677          */
678         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
679                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
680                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
681         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
682                 rte_flow_error_set(error, EINVAL,
683                         RTE_FLOW_ERROR_TYPE_ITEM, item,
684                         "IP proto in pattern with TCP item should be appropriate");
685                 return -rte_errno;
686         }
687
688         if (spec == NULL)
689                 return 0;
690
691         /*
692          * Source and destination ports are in big-endian byte order in item and
693          * in little-endian in efx_spec, so byte swap is used
694          */
695         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
696                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
697                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
698         } else if (mask->hdr.src_port != 0) {
699                 goto fail_bad_mask;
700         }
701
702         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
703                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
704                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
705         } else if (mask->hdr.dst_port != 0) {
706                 goto fail_bad_mask;
707         }
708
709         return 0;
710
711 fail_bad_mask:
712         rte_flow_error_set(error, EINVAL,
713                            RTE_FLOW_ERROR_TYPE_ITEM, item,
714                            "Bad mask in the TCP pattern item");
715         return -rte_errno;
716 }
717
718 /**
719  * Convert UDP item to EFX filter specification.
720  *
721  * @param item[in]
722  *   Item specification. Only source and destination ports fields
723  *   are supported. If the mask is NULL, default mask will be used.
724  *   Ranging is not supported.
725  * @param efx_spec[in, out]
726  *   EFX filter specification to update.
727  * @param[out] error
728  *   Perform verbose error reporting if not NULL.
729  */
730 static int
731 sfc_flow_parse_udp(const struct rte_flow_item *item,
732                    struct sfc_flow_parse_ctx *parse_ctx,
733                    struct rte_flow_error *error)
734 {
735         int rc;
736         efx_filter_spec_t *efx_spec = parse_ctx->filter;
737         const struct rte_flow_item_udp *spec = NULL;
738         const struct rte_flow_item_udp *mask = NULL;
739         const struct rte_flow_item_udp supp_mask = {
740                 .hdr = {
741                         .src_port = 0xffff,
742                         .dst_port = 0xffff,
743                 }
744         };
745
746         rc = sfc_flow_parse_init(item,
747                                  (const void **)&spec,
748                                  (const void **)&mask,
749                                  &supp_mask,
750                                  &rte_flow_item_udp_mask,
751                                  sizeof(struct rte_flow_item_udp),
752                                  error);
753         if (rc != 0)
754                 return rc;
755
756         /*
757          * Filtering by UDP source and destination ports requires
758          * the appropriate IP_PROTO in hardware filters
759          */
760         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
761                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
762                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
763         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
764                 rte_flow_error_set(error, EINVAL,
765                         RTE_FLOW_ERROR_TYPE_ITEM, item,
766                         "IP proto in pattern with UDP item should be appropriate");
767                 return -rte_errno;
768         }
769
770         if (spec == NULL)
771                 return 0;
772
773         /*
774          * Source and destination ports are in big-endian byte order in item and
775          * in little-endian in efx_spec, so byte swap is used
776          */
777         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
778                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
779                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
780         } else if (mask->hdr.src_port != 0) {
781                 goto fail_bad_mask;
782         }
783
784         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
785                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
786                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
787         } else if (mask->hdr.dst_port != 0) {
788                 goto fail_bad_mask;
789         }
790
791         return 0;
792
793 fail_bad_mask:
794         rte_flow_error_set(error, EINVAL,
795                            RTE_FLOW_ERROR_TYPE_ITEM, item,
796                            "Bad mask in the UDP pattern item");
797         return -rte_errno;
798 }
799
800 /*
801  * Filters for encapsulated packets match based on the EtherType and IP
802  * protocol in the outer frame.
803  */
804 static int
805 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
806                                         efx_filter_spec_t *efx_spec,
807                                         uint8_t ip_proto,
808                                         struct rte_flow_error *error)
809 {
810         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
811                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
812                 efx_spec->efs_ip_proto = ip_proto;
813         } else if (efx_spec->efs_ip_proto != ip_proto) {
814                 switch (ip_proto) {
815                 case EFX_IPPROTO_UDP:
816                         rte_flow_error_set(error, EINVAL,
817                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
818                                 "Outer IP header protocol must be UDP "
819                                 "in VxLAN/GENEVE pattern");
820                         return -rte_errno;
821
822                 case EFX_IPPROTO_GRE:
823                         rte_flow_error_set(error, EINVAL,
824                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
825                                 "Outer IP header protocol must be GRE "
826                                 "in NVGRE pattern");
827                         return -rte_errno;
828
829                 default:
830                         rte_flow_error_set(error, EINVAL,
831                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
832                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
833                                 "are supported");
834                         return -rte_errno;
835                 }
836         }
837
838         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
839             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
840             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
841                 rte_flow_error_set(error, EINVAL,
842                         RTE_FLOW_ERROR_TYPE_ITEM, item,
843                         "Outer frame EtherType in pattern with tunneling "
844                         "must be IPv4 or IPv6");
845                 return -rte_errno;
846         }
847
848         return 0;
849 }
850
851 static int
852 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
853                                   const uint8_t *vni_or_vsid_val,
854                                   const uint8_t *vni_or_vsid_mask,
855                                   const struct rte_flow_item *item,
856                                   struct rte_flow_error *error)
857 {
858         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
859                 0xff, 0xff, 0xff
860         };
861
862         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
863                    EFX_VNI_OR_VSID_LEN) == 0) {
864                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
865                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
866                            EFX_VNI_OR_VSID_LEN);
867         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
868                 rte_flow_error_set(error, EINVAL,
869                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
870                                    "Unsupported VNI/VSID mask");
871                 return -rte_errno;
872         }
873
874         return 0;
875 }
876
877 /**
878  * Convert VXLAN item to EFX filter specification.
879  *
880  * @param item[in]
881  *   Item specification. Only VXLAN network identifier field is supported.
882  *   If the mask is NULL, default mask will be used.
883  *   Ranging is not supported.
884  * @param efx_spec[in, out]
885  *   EFX filter specification to update.
886  * @param[out] error
887  *   Perform verbose error reporting if not NULL.
888  */
889 static int
890 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
891                      struct sfc_flow_parse_ctx *parse_ctx,
892                      struct rte_flow_error *error)
893 {
894         int rc;
895         efx_filter_spec_t *efx_spec = parse_ctx->filter;
896         const struct rte_flow_item_vxlan *spec = NULL;
897         const struct rte_flow_item_vxlan *mask = NULL;
898         const struct rte_flow_item_vxlan supp_mask = {
899                 .vni = { 0xff, 0xff, 0xff }
900         };
901
902         rc = sfc_flow_parse_init(item,
903                                  (const void **)&spec,
904                                  (const void **)&mask,
905                                  &supp_mask,
906                                  &rte_flow_item_vxlan_mask,
907                                  sizeof(struct rte_flow_item_vxlan),
908                                  error);
909         if (rc != 0)
910                 return rc;
911
912         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
913                                                      EFX_IPPROTO_UDP, error);
914         if (rc != 0)
915                 return rc;
916
917         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
918         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
919
920         if (spec == NULL)
921                 return 0;
922
923         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
924                                                mask->vni, item, error);
925
926         return rc;
927 }
928
929 /**
930  * Convert GENEVE item to EFX filter specification.
931  *
932  * @param item[in]
933  *   Item specification. Only Virtual Network Identifier and protocol type
934  *   fields are supported. But protocol type can be only Ethernet (0x6558).
935  *   If the mask is NULL, default mask will be used.
936  *   Ranging is not supported.
937  * @param efx_spec[in, out]
938  *   EFX filter specification to update.
939  * @param[out] error
940  *   Perform verbose error reporting if not NULL.
941  */
942 static int
943 sfc_flow_parse_geneve(const struct rte_flow_item *item,
944                       struct sfc_flow_parse_ctx *parse_ctx,
945                       struct rte_flow_error *error)
946 {
947         int rc;
948         efx_filter_spec_t *efx_spec = parse_ctx->filter;
949         const struct rte_flow_item_geneve *spec = NULL;
950         const struct rte_flow_item_geneve *mask = NULL;
951         const struct rte_flow_item_geneve supp_mask = {
952                 .protocol = RTE_BE16(0xffff),
953                 .vni = { 0xff, 0xff, 0xff }
954         };
955
956         rc = sfc_flow_parse_init(item,
957                                  (const void **)&spec,
958                                  (const void **)&mask,
959                                  &supp_mask,
960                                  &rte_flow_item_geneve_mask,
961                                  sizeof(struct rte_flow_item_geneve),
962                                  error);
963         if (rc != 0)
964                 return rc;
965
966         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
967                                                      EFX_IPPROTO_UDP, error);
968         if (rc != 0)
969                 return rc;
970
971         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
972         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
973
974         if (spec == NULL)
975                 return 0;
976
977         if (mask->protocol == supp_mask.protocol) {
978                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
979                         rte_flow_error_set(error, EINVAL,
980                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
981                                 "GENEVE encap. protocol must be Ethernet "
982                                 "(0x6558) in the GENEVE pattern item");
983                         return -rte_errno;
984                 }
985         } else if (mask->protocol != 0) {
986                 rte_flow_error_set(error, EINVAL,
987                         RTE_FLOW_ERROR_TYPE_ITEM, item,
988                         "Unsupported mask for GENEVE encap. protocol");
989                 return -rte_errno;
990         }
991
992         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
993                                                mask->vni, item, error);
994
995         return rc;
996 }
997
998 /**
999  * Convert NVGRE item to EFX filter specification.
1000  *
1001  * @param item[in]
1002  *   Item specification. Only virtual subnet ID field is supported.
1003  *   If the mask is NULL, default mask will be used.
1004  *   Ranging is not supported.
1005  * @param efx_spec[in, out]
1006  *   EFX filter specification to update.
1007  * @param[out] error
1008  *   Perform verbose error reporting if not NULL.
1009  */
1010 static int
1011 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1012                      struct sfc_flow_parse_ctx *parse_ctx,
1013                      struct rte_flow_error *error)
1014 {
1015         int rc;
1016         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1017         const struct rte_flow_item_nvgre *spec = NULL;
1018         const struct rte_flow_item_nvgre *mask = NULL;
1019         const struct rte_flow_item_nvgre supp_mask = {
1020                 .tni = { 0xff, 0xff, 0xff }
1021         };
1022
1023         rc = sfc_flow_parse_init(item,
1024                                  (const void **)&spec,
1025                                  (const void **)&mask,
1026                                  &supp_mask,
1027                                  &rte_flow_item_nvgre_mask,
1028                                  sizeof(struct rte_flow_item_nvgre),
1029                                  error);
1030         if (rc != 0)
1031                 return rc;
1032
1033         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1034                                                      EFX_IPPROTO_GRE, error);
1035         if (rc != 0)
1036                 return rc;
1037
1038         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1039         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1040
1041         if (spec == NULL)
1042                 return 0;
1043
1044         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1045                                                mask->tni, item, error);
1046
1047         return rc;
1048 }
1049
1050 static const struct sfc_flow_item sfc_flow_items[] = {
1051         {
1052                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1053                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1054                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1055                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1056                 .parse = sfc_flow_parse_void,
1057         },
1058         {
1059                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1060                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1061                 .layer = SFC_FLOW_ITEM_L2,
1062                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1063                 .parse = sfc_flow_parse_eth,
1064         },
1065         {
1066                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1067                 .prev_layer = SFC_FLOW_ITEM_L2,
1068                 .layer = SFC_FLOW_ITEM_L2,
1069                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1070                 .parse = sfc_flow_parse_vlan,
1071         },
1072         {
1073                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1074                 .prev_layer = SFC_FLOW_ITEM_L2,
1075                 .layer = SFC_FLOW_ITEM_L3,
1076                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1077                 .parse = sfc_flow_parse_ipv4,
1078         },
1079         {
1080                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1081                 .prev_layer = SFC_FLOW_ITEM_L2,
1082                 .layer = SFC_FLOW_ITEM_L3,
1083                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1084                 .parse = sfc_flow_parse_ipv6,
1085         },
1086         {
1087                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1088                 .prev_layer = SFC_FLOW_ITEM_L3,
1089                 .layer = SFC_FLOW_ITEM_L4,
1090                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1091                 .parse = sfc_flow_parse_tcp,
1092         },
1093         {
1094                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1095                 .prev_layer = SFC_FLOW_ITEM_L3,
1096                 .layer = SFC_FLOW_ITEM_L4,
1097                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1098                 .parse = sfc_flow_parse_udp,
1099         },
1100         {
1101                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1102                 .prev_layer = SFC_FLOW_ITEM_L4,
1103                 .layer = SFC_FLOW_ITEM_START_LAYER,
1104                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1105                 .parse = sfc_flow_parse_vxlan,
1106         },
1107         {
1108                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1109                 .prev_layer = SFC_FLOW_ITEM_L4,
1110                 .layer = SFC_FLOW_ITEM_START_LAYER,
1111                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1112                 .parse = sfc_flow_parse_geneve,
1113         },
1114         {
1115                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1116                 .prev_layer = SFC_FLOW_ITEM_L3,
1117                 .layer = SFC_FLOW_ITEM_START_LAYER,
1118                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1119                 .parse = sfc_flow_parse_nvgre,
1120         },
1121 };
1122
1123 /*
1124  * Protocol-independent flow API support
1125  */
1126 static int
1127 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1128                     struct rte_flow *flow,
1129                     struct rte_flow_error *error)
1130 {
1131         struct sfc_flow_spec *spec = &flow->spec;
1132         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1133
1134         if (attr == NULL) {
1135                 rte_flow_error_set(error, EINVAL,
1136                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1137                                    "NULL attribute");
1138                 return -rte_errno;
1139         }
1140         if (attr->group != 0) {
1141                 rte_flow_error_set(error, ENOTSUP,
1142                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1143                                    "Groups are not supported");
1144                 return -rte_errno;
1145         }
1146         if (attr->egress != 0) {
1147                 rte_flow_error_set(error, ENOTSUP,
1148                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1149                                    "Egress is not supported");
1150                 return -rte_errno;
1151         }
1152         if (attr->ingress == 0) {
1153                 rte_flow_error_set(error, ENOTSUP,
1154                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1155                                    "Ingress is compulsory");
1156                 return -rte_errno;
1157         }
1158         if (attr->transfer == 0) {
1159                 if (attr->priority != 0) {
1160                         rte_flow_error_set(error, ENOTSUP,
1161                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1162                                            attr, "Priorities are unsupported");
1163                         return -rte_errno;
1164                 }
1165                 spec->type = SFC_FLOW_SPEC_FILTER;
1166                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1167                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1168                 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1169         } else {
1170                 rte_flow_error_set(error, ENOTSUP,
1171                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1172                                    "Transfer is not supported");
1173                 return -rte_errno;
1174         }
1175
1176         return 0;
1177 }
1178
1179 /* Get item from array sfc_flow_items */
1180 static const struct sfc_flow_item *
1181 sfc_flow_get_item(const struct sfc_flow_item *items,
1182                   unsigned int nb_items,
1183                   enum rte_flow_item_type type)
1184 {
1185         unsigned int i;
1186
1187         for (i = 0; i < nb_items; i++)
1188                 if (items[i].type == type)
1189                         return &items[i];
1190
1191         return NULL;
1192 }
1193
1194 int
1195 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1196                        unsigned int nb_flow_items,
1197                        const struct rte_flow_item pattern[],
1198                        struct sfc_flow_parse_ctx *parse_ctx,
1199                        struct rte_flow_error *error)
1200 {
1201         int rc;
1202         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1203         boolean_t is_ifrm = B_FALSE;
1204         const struct sfc_flow_item *item;
1205
1206         if (pattern == NULL) {
1207                 rte_flow_error_set(error, EINVAL,
1208                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1209                                    "NULL pattern");
1210                 return -rte_errno;
1211         }
1212
1213         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1214                 item = sfc_flow_get_item(flow_items, nb_flow_items,
1215                                          pattern->type);
1216                 if (item == NULL) {
1217                         rte_flow_error_set(error, ENOTSUP,
1218                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1219                                            "Unsupported pattern item");
1220                         return -rte_errno;
1221                 }
1222
1223                 /*
1224                  * Omitting one or several protocol layers at the beginning
1225                  * of pattern is supported
1226                  */
1227                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1228                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1229                     item->prev_layer != prev_layer) {
1230                         rte_flow_error_set(error, ENOTSUP,
1231                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1232                                            "Unexpected sequence of pattern items");
1233                         return -rte_errno;
1234                 }
1235
1236                 /*
1237                  * Allow only VOID and ETH pattern items in the inner frame.
1238                  * Also check that there is only one tunneling protocol.
1239                  */
1240                 switch (item->type) {
1241                 case RTE_FLOW_ITEM_TYPE_VOID:
1242                 case RTE_FLOW_ITEM_TYPE_ETH:
1243                         break;
1244
1245                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1246                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1247                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1248                         if (is_ifrm) {
1249                                 rte_flow_error_set(error, EINVAL,
1250                                         RTE_FLOW_ERROR_TYPE_ITEM,
1251                                         pattern,
1252                                         "More than one tunneling protocol");
1253                                 return -rte_errno;
1254                         }
1255                         is_ifrm = B_TRUE;
1256                         break;
1257
1258                 default:
1259                         if (is_ifrm) {
1260                                 rte_flow_error_set(error, EINVAL,
1261                                         RTE_FLOW_ERROR_TYPE_ITEM,
1262                                         pattern,
1263                                         "There is an unsupported pattern item "
1264                                         "in the inner frame");
1265                                 return -rte_errno;
1266                         }
1267                         break;
1268                 }
1269
1270                 if (parse_ctx->type != item->ctx_type) {
1271                         rte_flow_error_set(error, EINVAL,
1272                                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1273                                         "Parse context type mismatch");
1274                         return -rte_errno;
1275                 }
1276
1277                 rc = item->parse(pattern, parse_ctx, error);
1278                 if (rc != 0)
1279                         return rc;
1280
1281                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1282                         prev_layer = item->layer;
1283         }
1284
1285         return 0;
1286 }
1287
1288 static int
1289 sfc_flow_parse_queue(struct sfc_adapter *sa,
1290                      const struct rte_flow_action_queue *queue,
1291                      struct rte_flow *flow)
1292 {
1293         struct sfc_flow_spec *spec = &flow->spec;
1294         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1295         struct sfc_rxq *rxq;
1296
1297         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1298                 return -EINVAL;
1299
1300         rxq = &sa->rxq_ctrl[queue->index];
1301         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1302
1303         return 0;
1304 }
1305
1306 static int
1307 sfc_flow_parse_rss(struct sfc_adapter *sa,
1308                    const struct rte_flow_action_rss *action_rss,
1309                    struct rte_flow *flow)
1310 {
1311         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1312         struct sfc_rss *rss = &sas->rss;
1313         unsigned int rxq_sw_index;
1314         struct sfc_rxq *rxq;
1315         unsigned int rxq_hw_index_min;
1316         unsigned int rxq_hw_index_max;
1317         efx_rx_hash_type_t efx_hash_types;
1318         const uint8_t *rss_key;
1319         struct sfc_flow_spec *spec = &flow->spec;
1320         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1321         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1322         unsigned int i;
1323
1324         if (action_rss->queue_num == 0)
1325                 return -EINVAL;
1326
1327         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1328         rxq = &sa->rxq_ctrl[rxq_sw_index];
1329         rxq_hw_index_min = rxq->hw_index;
1330         rxq_hw_index_max = 0;
1331
1332         for (i = 0; i < action_rss->queue_num; ++i) {
1333                 rxq_sw_index = action_rss->queue[i];
1334
1335                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1336                         return -EINVAL;
1337
1338                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1339
1340                 if (rxq->hw_index < rxq_hw_index_min)
1341                         rxq_hw_index_min = rxq->hw_index;
1342
1343                 if (rxq->hw_index > rxq_hw_index_max)
1344                         rxq_hw_index_max = rxq->hw_index;
1345         }
1346
1347         switch (action_rss->func) {
1348         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1349         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1350                 break;
1351         default:
1352                 return -EINVAL;
1353         }
1354
1355         if (action_rss->level)
1356                 return -EINVAL;
1357
1358         /*
1359          * Dummy RSS action with only one queue and no specific settings
1360          * for hash types and key does not require dedicated RSS context
1361          * and may be simplified to single queue action.
1362          */
1363         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1364             action_rss->key_len == 0) {
1365                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1366                 return 0;
1367         }
1368
1369         if (action_rss->types) {
1370                 int rc;
1371
1372                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1373                                           &efx_hash_types);
1374                 if (rc != 0)
1375                         return -rc;
1376         } else {
1377                 unsigned int i;
1378
1379                 efx_hash_types = 0;
1380                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1381                         efx_hash_types |= rss->hf_map[i].efx;
1382         }
1383
1384         if (action_rss->key_len) {
1385                 if (action_rss->key_len != sizeof(rss->key))
1386                         return -EINVAL;
1387
1388                 rss_key = action_rss->key;
1389         } else {
1390                 rss_key = rss->key;
1391         }
1392
1393         spec_filter->rss = B_TRUE;
1394
1395         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1396         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1397         sfc_rss_conf->rss_hash_types = efx_hash_types;
1398         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1399
1400         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1401                 unsigned int nb_queues = action_rss->queue_num;
1402                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1403                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1404
1405                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1406         }
1407
1408         return 0;
1409 }
1410
1411 static int
1412 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1413                     unsigned int filters_count)
1414 {
1415         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1416         unsigned int i;
1417         int ret = 0;
1418
1419         for (i = 0; i < filters_count; i++) {
1420                 int rc;
1421
1422                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1423                 if (ret == 0 && rc != 0) {
1424                         sfc_err(sa, "failed to remove filter specification "
1425                                 "(rc = %d)", rc);
1426                         ret = rc;
1427                 }
1428         }
1429
1430         return ret;
1431 }
1432
1433 static int
1434 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1435 {
1436         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1437         unsigned int i;
1438         int rc = 0;
1439
1440         for (i = 0; i < spec_filter->count; i++) {
1441                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1442                 if (rc != 0) {
1443                         sfc_flow_spec_flush(sa, spec, i);
1444                         break;
1445                 }
1446         }
1447
1448         return rc;
1449 }
1450
1451 static int
1452 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1453 {
1454         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1455
1456         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1457 }
1458
1459 static int
1460 sfc_flow_filter_insert(struct sfc_adapter *sa,
1461                        struct rte_flow *flow)
1462 {
1463         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1464         struct sfc_rss *rss = &sas->rss;
1465         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1466         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1467         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1468         unsigned int i;
1469         int rc = 0;
1470
1471         if (spec_filter->rss) {
1472                 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1473                                               flow_rss->rxq_hw_index_min + 1,
1474                                               EFX_MAXRSS);
1475
1476                 rc = efx_rx_scale_context_alloc(sa->nic,
1477                                                 EFX_RX_SCALE_EXCLUSIVE,
1478                                                 rss_spread,
1479                                                 &efs_rss_context);
1480                 if (rc != 0)
1481                         goto fail_scale_context_alloc;
1482
1483                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1484                                            rss->hash_alg,
1485                                            flow_rss->rss_hash_types, B_TRUE);
1486                 if (rc != 0)
1487                         goto fail_scale_mode_set;
1488
1489                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1490                                           flow_rss->rss_key,
1491                                           sizeof(rss->key));
1492                 if (rc != 0)
1493                         goto fail_scale_key_set;
1494
1495                 /*
1496                  * At this point, fully elaborated filter specifications
1497                  * have been produced from the template. To make sure that
1498                  * RSS behaviour is consistent between them, set the same
1499                  * RSS context value everywhere.
1500                  */
1501                 for (i = 0; i < spec_filter->count; i++) {
1502                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1503
1504                         spec->efs_rss_context = efs_rss_context;
1505                         spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1506                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1507                 }
1508         }
1509
1510         rc = sfc_flow_spec_insert(sa, &flow->spec);
1511         if (rc != 0)
1512                 goto fail_filter_insert;
1513
1514         if (spec_filter->rss) {
1515                 /*
1516                  * Scale table is set after filter insertion because
1517                  * the table entries are relative to the base RxQ ID
1518                  * and the latter is submitted to the HW by means of
1519                  * inserting a filter, so by the time of the request
1520                  * the HW knows all the information needed to verify
1521                  * the table entries, and the operation will succeed
1522                  */
1523                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1524                                           flow_rss->rss_tbl,
1525                                           RTE_DIM(flow_rss->rss_tbl));
1526                 if (rc != 0)
1527                         goto fail_scale_tbl_set;
1528         }
1529
1530         return 0;
1531
1532 fail_scale_tbl_set:
1533         sfc_flow_spec_remove(sa, &flow->spec);
1534
1535 fail_filter_insert:
1536 fail_scale_key_set:
1537 fail_scale_mode_set:
1538         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1539                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1540
1541 fail_scale_context_alloc:
1542         return rc;
1543 }
1544
1545 static int
1546 sfc_flow_filter_remove(struct sfc_adapter *sa,
1547                        struct rte_flow *flow)
1548 {
1549         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1550         int rc = 0;
1551
1552         rc = sfc_flow_spec_remove(sa, &flow->spec);
1553         if (rc != 0)
1554                 return rc;
1555
1556         if (spec_filter->rss) {
1557                 /*
1558                  * All specifications for a given flow rule have the same RSS
1559                  * context, so that RSS context value is taken from the first
1560                  * filter specification
1561                  */
1562                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1563
1564                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1565         }
1566
1567         return rc;
1568 }
1569
1570 static int
1571 sfc_flow_parse_mark(struct sfc_adapter *sa,
1572                     const struct rte_flow_action_mark *mark,
1573                     struct rte_flow *flow)
1574 {
1575         struct sfc_flow_spec *spec = &flow->spec;
1576         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1577         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1578
1579         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1580                 return EINVAL;
1581
1582         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1583         spec_filter->template.efs_mark = mark->id;
1584
1585         return 0;
1586 }
1587
1588 static int
1589 sfc_flow_parse_actions(struct sfc_adapter *sa,
1590                        const struct rte_flow_action actions[],
1591                        struct rte_flow *flow,
1592                        struct rte_flow_error *error)
1593 {
1594         int rc;
1595         struct sfc_flow_spec *spec = &flow->spec;
1596         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1597         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1598         uint32_t actions_set = 0;
1599         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1600                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1601                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1602         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1603                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1604
1605         if (actions == NULL) {
1606                 rte_flow_error_set(error, EINVAL,
1607                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1608                                    "NULL actions");
1609                 return -rte_errno;
1610         }
1611
1612 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1613         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1614
1615         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1616                 switch (actions->type) {
1617                 case RTE_FLOW_ACTION_TYPE_VOID:
1618                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1619                                                actions_set);
1620                         break;
1621
1622                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1623                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1624                                                actions_set);
1625                         if ((actions_set & fate_actions_mask) != 0)
1626                                 goto fail_fate_actions;
1627
1628                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1629                         if (rc != 0) {
1630                                 rte_flow_error_set(error, EINVAL,
1631                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1632                                         "Bad QUEUE action");
1633                                 return -rte_errno;
1634                         }
1635                         break;
1636
1637                 case RTE_FLOW_ACTION_TYPE_RSS:
1638                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1639                                                actions_set);
1640                         if ((actions_set & fate_actions_mask) != 0)
1641                                 goto fail_fate_actions;
1642
1643                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1644                         if (rc != 0) {
1645                                 rte_flow_error_set(error, -rc,
1646                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1647                                         "Bad RSS action");
1648                                 return -rte_errno;
1649                         }
1650                         break;
1651
1652                 case RTE_FLOW_ACTION_TYPE_DROP:
1653                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1654                                                actions_set);
1655                         if ((actions_set & fate_actions_mask) != 0)
1656                                 goto fail_fate_actions;
1657
1658                         spec_filter->template.efs_dmaq_id =
1659                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1660                         break;
1661
1662                 case RTE_FLOW_ACTION_TYPE_FLAG:
1663                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1664                                                actions_set);
1665                         if ((actions_set & mark_actions_mask) != 0)
1666                                 goto fail_actions_overlap;
1667
1668                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1669                                 rte_flow_error_set(error, ENOTSUP,
1670                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1671                                         "FLAG action is not supported on the current Rx datapath");
1672                                 return -rte_errno;
1673                         }
1674
1675                         spec_filter->template.efs_flags |=
1676                                 EFX_FILTER_FLAG_ACTION_FLAG;
1677                         break;
1678
1679                 case RTE_FLOW_ACTION_TYPE_MARK:
1680                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1681                                                actions_set);
1682                         if ((actions_set & mark_actions_mask) != 0)
1683                                 goto fail_actions_overlap;
1684
1685                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1686                                 rte_flow_error_set(error, ENOTSUP,
1687                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1688                                         "MARK action is not supported on the current Rx datapath");
1689                                 return -rte_errno;
1690                         }
1691
1692                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1693                         if (rc != 0) {
1694                                 rte_flow_error_set(error, rc,
1695                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1696                                         "Bad MARK action");
1697                                 return -rte_errno;
1698                         }
1699                         break;
1700
1701                 default:
1702                         rte_flow_error_set(error, ENOTSUP,
1703                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1704                                            "Action is not supported");
1705                         return -rte_errno;
1706                 }
1707
1708                 actions_set |= (1UL << actions->type);
1709         }
1710 #undef SFC_BUILD_SET_OVERFLOW
1711
1712         /* When fate is unknown, drop traffic. */
1713         if ((actions_set & fate_actions_mask) == 0) {
1714                 spec_filter->template.efs_dmaq_id =
1715                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1716         }
1717
1718         return 0;
1719
1720 fail_fate_actions:
1721         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1722                            "Cannot combine several fate-deciding actions, "
1723                            "choose between QUEUE, RSS or DROP");
1724         return -rte_errno;
1725
1726 fail_actions_overlap:
1727         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1728                            "Overlapping actions are not supported");
1729         return -rte_errno;
1730 }
1731
1732 /**
1733  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1734  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1735  * specifications after copying.
1736  *
1737  * @param spec[in, out]
1738  *   SFC flow specification to update.
1739  * @param filters_count_for_one_val[in]
1740  *   How many specifications should have the same match flag, what is the
1741  *   number of specifications before copying.
1742  * @param error[out]
1743  *   Perform verbose error reporting if not NULL.
1744  */
1745 static int
1746 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1747                                unsigned int filters_count_for_one_val,
1748                                struct rte_flow_error *error)
1749 {
1750         unsigned int i;
1751         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1752         static const efx_filter_match_flags_t vals[] = {
1753                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1754                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1755         };
1756
1757         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1758                 rte_flow_error_set(error, EINVAL,
1759                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1760                         "Number of specifications is incorrect while copying "
1761                         "by unknown destination flags");
1762                 return -rte_errno;
1763         }
1764
1765         for (i = 0; i < spec_filter->count; i++) {
1766                 /* The check above ensures that divisor can't be zero here */
1767                 spec_filter->filters[i].efs_match_flags |=
1768                         vals[i / filters_count_for_one_val];
1769         }
1770
1771         return 0;
1772 }
1773
1774 /**
1775  * Check that the following conditions are met:
1776  * - the list of supported filters has a filter
1777  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1778  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1779  *   be inserted.
1780  *
1781  * @param match[in]
1782  *   The match flags of filter.
1783  * @param spec[in]
1784  *   Specification to be supplemented.
1785  * @param filter[in]
1786  *   SFC filter with list of supported filters.
1787  */
1788 static boolean_t
1789 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1790                                  __rte_unused efx_filter_spec_t *spec,
1791                                  struct sfc_filter *filter)
1792 {
1793         unsigned int i;
1794         efx_filter_match_flags_t match_mcast_dst;
1795
1796         match_mcast_dst =
1797                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1798                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1799         for (i = 0; i < filter->supported_match_num; i++) {
1800                 if (match_mcast_dst == filter->supported_match[i])
1801                         return B_TRUE;
1802         }
1803
1804         return B_FALSE;
1805 }
1806
1807 /**
1808  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1809  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1810  * specifications after copying.
1811  *
1812  * @param spec[in, out]
1813  *   SFC flow specification to update.
1814  * @param filters_count_for_one_val[in]
1815  *   How many specifications should have the same EtherType value, what is the
1816  *   number of specifications before copying.
1817  * @param error[out]
1818  *   Perform verbose error reporting if not NULL.
1819  */
1820 static int
1821 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1822                         unsigned int filters_count_for_one_val,
1823                         struct rte_flow_error *error)
1824 {
1825         unsigned int i;
1826         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1827         static const uint16_t vals[] = {
1828                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1829         };
1830
1831         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1832                 rte_flow_error_set(error, EINVAL,
1833                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1834                         "Number of specifications is incorrect "
1835                         "while copying by Ethertype");
1836                 return -rte_errno;
1837         }
1838
1839         for (i = 0; i < spec_filter->count; i++) {
1840                 spec_filter->filters[i].efs_match_flags |=
1841                         EFX_FILTER_MATCH_ETHER_TYPE;
1842
1843                 /*
1844                  * The check above ensures that
1845                  * filters_count_for_one_val is not 0
1846                  */
1847                 spec_filter->filters[i].efs_ether_type =
1848                         vals[i / filters_count_for_one_val];
1849         }
1850
1851         return 0;
1852 }
1853
1854 /**
1855  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1856  * in the same specifications after copying.
1857  *
1858  * @param spec[in, out]
1859  *   SFC flow specification to update.
1860  * @param filters_count_for_one_val[in]
1861  *   How many specifications should have the same match flag, what is the
1862  *   number of specifications before copying.
1863  * @param error[out]
1864  *   Perform verbose error reporting if not NULL.
1865  */
1866 static int
1867 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1868                             unsigned int filters_count_for_one_val,
1869                             struct rte_flow_error *error)
1870 {
1871         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1872         unsigned int i;
1873
1874         if (filters_count_for_one_val != spec_filter->count) {
1875                 rte_flow_error_set(error, EINVAL,
1876                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1877                         "Number of specifications is incorrect "
1878                         "while copying by outer VLAN ID");
1879                 return -rte_errno;
1880         }
1881
1882         for (i = 0; i < spec_filter->count; i++) {
1883                 spec_filter->filters[i].efs_match_flags |=
1884                         EFX_FILTER_MATCH_OUTER_VID;
1885
1886                 spec_filter->filters[i].efs_outer_vid = 0;
1887         }
1888
1889         return 0;
1890 }
1891
1892 /**
1893  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1894  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1895  * specifications after copying.
1896  *
1897  * @param spec[in, out]
1898  *   SFC flow specification to update.
1899  * @param filters_count_for_one_val[in]
1900  *   How many specifications should have the same match flag, what is the
1901  *   number of specifications before copying.
1902  * @param error[out]
1903  *   Perform verbose error reporting if not NULL.
1904  */
1905 static int
1906 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1907                                     unsigned int filters_count_for_one_val,
1908                                     struct rte_flow_error *error)
1909 {
1910         unsigned int i;
1911         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1912         static const efx_filter_match_flags_t vals[] = {
1913                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1914                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1915         };
1916
1917         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1918                 rte_flow_error_set(error, EINVAL,
1919                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1920                         "Number of specifications is incorrect while copying "
1921                         "by inner frame unknown destination flags");
1922                 return -rte_errno;
1923         }
1924
1925         for (i = 0; i < spec_filter->count; i++) {
1926                 /* The check above ensures that divisor can't be zero here */
1927                 spec_filter->filters[i].efs_match_flags |=
1928                         vals[i / filters_count_for_one_val];
1929         }
1930
1931         return 0;
1932 }
1933
1934 /**
1935  * Check that the following conditions are met:
1936  * - the specification corresponds to a filter for encapsulated traffic
1937  * - the list of supported filters has a filter
1938  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1939  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1940  *   be inserted.
1941  *
1942  * @param match[in]
1943  *   The match flags of filter.
1944  * @param spec[in]
1945  *   Specification to be supplemented.
1946  * @param filter[in]
1947  *   SFC filter with list of supported filters.
1948  */
1949 static boolean_t
1950 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1951                                       efx_filter_spec_t *spec,
1952                                       struct sfc_filter *filter)
1953 {
1954         unsigned int i;
1955         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1956         efx_filter_match_flags_t match_mcast_dst;
1957
1958         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1959                 return B_FALSE;
1960
1961         match_mcast_dst =
1962                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1963                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1964         for (i = 0; i < filter->supported_match_num; i++) {
1965                 if (match_mcast_dst == filter->supported_match[i])
1966                         return B_TRUE;
1967         }
1968
1969         return B_FALSE;
1970 }
1971
1972 /**
1973  * Check that the list of supported filters has a filter that differs
1974  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1975  * in this case that filter will be used and the flag
1976  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1977  *
1978  * @param match[in]
1979  *   The match flags of filter.
1980  * @param spec[in]
1981  *   Specification to be supplemented.
1982  * @param filter[in]
1983  *   SFC filter with list of supported filters.
1984  */
1985 static boolean_t
1986 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1987                               __rte_unused efx_filter_spec_t *spec,
1988                               struct sfc_filter *filter)
1989 {
1990         unsigned int i;
1991         efx_filter_match_flags_t match_without_vid =
1992                 match & ~EFX_FILTER_MATCH_OUTER_VID;
1993
1994         for (i = 0; i < filter->supported_match_num; i++) {
1995                 if (match_without_vid == filter->supported_match[i])
1996                         return B_FALSE;
1997         }
1998
1999         return B_TRUE;
2000 }
2001
2002 /*
2003  * Match flags that can be automatically added to filters.
2004  * Selecting the last minimum when searching for the copy flag ensures that the
2005  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2006  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2007  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2008  * filters.
2009  */
2010 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2011         {
2012                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2013                 .vals_count = 2,
2014                 .set_vals = sfc_flow_set_unknown_dst_flags,
2015                 .spec_check = sfc_flow_check_unknown_dst_flags,
2016         },
2017         {
2018                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2019                 .vals_count = 2,
2020                 .set_vals = sfc_flow_set_ethertypes,
2021                 .spec_check = NULL,
2022         },
2023         {
2024                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2025                 .vals_count = 2,
2026                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2027                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2028         },
2029         {
2030                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2031                 .vals_count = 1,
2032                 .set_vals = sfc_flow_set_outer_vid_flag,
2033                 .spec_check = sfc_flow_check_outer_vid_flag,
2034         },
2035 };
2036
2037 /* Get item from array sfc_flow_copy_flags */
2038 static const struct sfc_flow_copy_flag *
2039 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2040 {
2041         unsigned int i;
2042
2043         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2044                 if (sfc_flow_copy_flags[i].flag == flag)
2045                         return &sfc_flow_copy_flags[i];
2046         }
2047
2048         return NULL;
2049 }
2050
2051 /**
2052  * Make copies of the specifications, set match flag and values
2053  * of the field that corresponds to it.
2054  *
2055  * @param spec[in, out]
2056  *   SFC flow specification to update.
2057  * @param flag[in]
2058  *   The match flag to add.
2059  * @param error[out]
2060  *   Perform verbose error reporting if not NULL.
2061  */
2062 static int
2063 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2064                              efx_filter_match_flags_t flag,
2065                              struct rte_flow_error *error)
2066 {
2067         unsigned int i;
2068         unsigned int new_filters_count;
2069         unsigned int filters_count_for_one_val;
2070         const struct sfc_flow_copy_flag *copy_flag;
2071         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2072         int rc;
2073
2074         copy_flag = sfc_flow_get_copy_flag(flag);
2075         if (copy_flag == NULL) {
2076                 rte_flow_error_set(error, ENOTSUP,
2077                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2078                                    "Unsupported spec field for copying");
2079                 return -rte_errno;
2080         }
2081
2082         new_filters_count = spec_filter->count * copy_flag->vals_count;
2083         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2084                 rte_flow_error_set(error, EINVAL,
2085                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2086                         "Too much EFX specifications in the flow rule");
2087                 return -rte_errno;
2088         }
2089
2090         /* Copy filters specifications */
2091         for (i = spec_filter->count; i < new_filters_count; i++) {
2092                 spec_filter->filters[i] =
2093                         spec_filter->filters[i - spec_filter->count];
2094         }
2095
2096         filters_count_for_one_val = spec_filter->count;
2097         spec_filter->count = new_filters_count;
2098
2099         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2100         if (rc != 0)
2101                 return rc;
2102
2103         return 0;
2104 }
2105
2106 /**
2107  * Check that the given set of match flags missing in the original filter spec
2108  * could be covered by adding spec copies which specify the corresponding
2109  * flags and packet field values to match.
2110  *
2111  * @param miss_flags[in]
2112  *   Flags that are missing until the supported filter.
2113  * @param spec[in]
2114  *   Specification to be supplemented.
2115  * @param filter[in]
2116  *   SFC filter.
2117  *
2118  * @return
2119  *   Number of specifications after copy or 0, if the flags can not be added.
2120  */
2121 static unsigned int
2122 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2123                              efx_filter_spec_t *spec,
2124                              struct sfc_filter *filter)
2125 {
2126         unsigned int i;
2127         efx_filter_match_flags_t copy_flags = 0;
2128         efx_filter_match_flags_t flag;
2129         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2130         sfc_flow_spec_check *check;
2131         unsigned int multiplier = 1;
2132
2133         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2134                 flag = sfc_flow_copy_flags[i].flag;
2135                 check = sfc_flow_copy_flags[i].spec_check;
2136                 if ((flag & miss_flags) == flag) {
2137                         if (check != NULL && (!check(match, spec, filter)))
2138                                 continue;
2139
2140                         copy_flags |= flag;
2141                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2142                 }
2143         }
2144
2145         if (copy_flags == miss_flags)
2146                 return multiplier;
2147
2148         return 0;
2149 }
2150
2151 /**
2152  * Attempt to supplement the specification template to the minimally
2153  * supported set of match flags. To do this, it is necessary to copy
2154  * the specifications, filling them with the values of fields that
2155  * correspond to the missing flags.
2156  * The necessary and sufficient filter is built from the fewest number
2157  * of copies which could be made to cover the minimally required set
2158  * of flags.
2159  *
2160  * @param sa[in]
2161  *   SFC adapter.
2162  * @param spec[in, out]
2163  *   SFC flow specification to update.
2164  * @param error[out]
2165  *   Perform verbose error reporting if not NULL.
2166  */
2167 static int
2168 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2169                                struct sfc_flow_spec *spec,
2170                                struct rte_flow_error *error)
2171 {
2172         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2173         struct sfc_filter *filter = &sa->filter;
2174         efx_filter_match_flags_t miss_flags;
2175         efx_filter_match_flags_t min_miss_flags = 0;
2176         efx_filter_match_flags_t match;
2177         unsigned int min_multiplier = UINT_MAX;
2178         unsigned int multiplier;
2179         unsigned int i;
2180         int rc;
2181
2182         match = spec_filter->template.efs_match_flags;
2183         for (i = 0; i < filter->supported_match_num; i++) {
2184                 if ((match & filter->supported_match[i]) == match) {
2185                         miss_flags = filter->supported_match[i] & (~match);
2186                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2187                                 &spec_filter->template, filter);
2188                         if (multiplier > 0) {
2189                                 if (multiplier <= min_multiplier) {
2190                                         min_multiplier = multiplier;
2191                                         min_miss_flags = miss_flags;
2192                                 }
2193                         }
2194                 }
2195         }
2196
2197         if (min_multiplier == UINT_MAX) {
2198                 rte_flow_error_set(error, ENOTSUP,
2199                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2200                                    "The flow rule pattern is unsupported");
2201                 return -rte_errno;
2202         }
2203
2204         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2205                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2206
2207                 if ((flag & min_miss_flags) == flag) {
2208                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2209                         if (rc != 0)
2210                                 return rc;
2211                 }
2212         }
2213
2214         return 0;
2215 }
2216
2217 /**
2218  * Check that set of match flags is referred to by a filter. Filter is
2219  * described by match flags with the ability to add OUTER_VID and INNER_VID
2220  * flags.
2221  *
2222  * @param match_flags[in]
2223  *   Set of match flags.
2224  * @param flags_pattern[in]
2225  *   Pattern of filter match flags.
2226  */
2227 static boolean_t
2228 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2229                             efx_filter_match_flags_t flags_pattern)
2230 {
2231         if ((match_flags & flags_pattern) != flags_pattern)
2232                 return B_FALSE;
2233
2234         switch (match_flags & ~flags_pattern) {
2235         case 0:
2236         case EFX_FILTER_MATCH_OUTER_VID:
2237         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2238                 return B_TRUE;
2239         default:
2240                 return B_FALSE;
2241         }
2242 }
2243
2244 /**
2245  * Check whether the spec maps to a hardware filter which is known to be
2246  * ineffective despite being valid.
2247  *
2248  * @param filter[in]
2249  *   SFC filter with list of supported filters.
2250  * @param spec[in]
2251  *   SFC flow specification.
2252  */
2253 static boolean_t
2254 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2255                                   struct sfc_flow_spec *spec)
2256 {
2257         unsigned int i;
2258         uint16_t ether_type;
2259         uint8_t ip_proto;
2260         efx_filter_match_flags_t match_flags;
2261         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2262
2263         for (i = 0; i < spec_filter->count; i++) {
2264                 match_flags = spec_filter->filters[i].efs_match_flags;
2265
2266                 if (sfc_flow_is_match_with_vids(match_flags,
2267                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2268                     sfc_flow_is_match_with_vids(match_flags,
2269                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2270                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2271                         ether_type = spec_filter->filters[i].efs_ether_type;
2272                         if (filter->supports_ip_proto_or_addr_filter &&
2273                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2274                              ether_type == EFX_ETHER_TYPE_IPV6))
2275                                 return B_TRUE;
2276                 } else if (sfc_flow_is_match_with_vids(match_flags,
2277                                 EFX_FILTER_MATCH_ETHER_TYPE |
2278                                 EFX_FILTER_MATCH_IP_PROTO) ||
2279                            sfc_flow_is_match_with_vids(match_flags,
2280                                 EFX_FILTER_MATCH_ETHER_TYPE |
2281                                 EFX_FILTER_MATCH_IP_PROTO |
2282                                 EFX_FILTER_MATCH_LOC_MAC)) {
2283                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2284                         if (filter->supports_rem_or_local_port_filter &&
2285                             (ip_proto == EFX_IPPROTO_TCP ||
2286                              ip_proto == EFX_IPPROTO_UDP))
2287                                 return B_TRUE;
2288                 }
2289         }
2290
2291         return B_FALSE;
2292 }
2293
2294 static int
2295 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2296                               struct rte_flow *flow,
2297                               struct rte_flow_error *error)
2298 {
2299         struct sfc_flow_spec *spec = &flow->spec;
2300         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2301         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2302         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2303         int rc;
2304
2305         /* Initialize the first filter spec with template */
2306         spec_filter->filters[0] = *spec_tmpl;
2307         spec_filter->count = 1;
2308
2309         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2310                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2311                 if (rc != 0)
2312                         return rc;
2313         }
2314
2315         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2316                 rte_flow_error_set(error, ENOTSUP,
2317                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2318                         "The flow rule pattern is unsupported");
2319                 return -rte_errno;
2320         }
2321
2322         return 0;
2323 }
2324
2325 static int
2326 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2327                              const struct rte_flow_item pattern[],
2328                              const struct rte_flow_action actions[],
2329                              struct rte_flow *flow,
2330                              struct rte_flow_error *error)
2331 {
2332         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2333         struct sfc_flow_spec *spec = &flow->spec;
2334         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2335         struct sfc_flow_parse_ctx ctx;
2336         int rc;
2337
2338         ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2339         ctx.filter = &spec_filter->template;
2340
2341         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2342                                     pattern, &ctx, error);
2343         if (rc != 0)
2344                 goto fail_bad_value;
2345
2346         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2347         if (rc != 0)
2348                 goto fail_bad_value;
2349
2350         rc = sfc_flow_validate_match_flags(sa, flow, error);
2351         if (rc != 0)
2352                 goto fail_bad_value;
2353
2354         return 0;
2355
2356 fail_bad_value:
2357         return rc;
2358 }
2359
2360 static int
2361 sfc_flow_parse(struct rte_eth_dev *dev,
2362                const struct rte_flow_attr *attr,
2363                const struct rte_flow_item pattern[],
2364                const struct rte_flow_action actions[],
2365                struct rte_flow *flow,
2366                struct rte_flow_error *error)
2367 {
2368         const struct sfc_flow_ops_by_spec *ops;
2369         int rc;
2370
2371         rc = sfc_flow_parse_attr(attr, flow, error);
2372         if (rc != 0)
2373                 return rc;
2374
2375         ops = sfc_flow_get_ops_by_spec(flow);
2376         if (ops == NULL || ops->parse == NULL) {
2377                 rte_flow_error_set(error, ENOTSUP,
2378                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2379                                    "No backend to handle this flow");
2380                 return -rte_errno;
2381         }
2382
2383         return ops->parse(dev, pattern, actions, flow, error);
2384 }
2385
2386 static struct rte_flow *
2387 sfc_flow_zmalloc(struct rte_flow_error *error)
2388 {
2389         struct rte_flow *flow;
2390
2391         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2392         if (flow == NULL) {
2393                 rte_flow_error_set(error, ENOMEM,
2394                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2395                                    "Failed to allocate memory");
2396         }
2397
2398         return flow;
2399 }
2400
2401 static void
2402 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2403 {
2404         rte_free(flow);
2405 }
2406
2407 static int
2408 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2409                 struct rte_flow_error *error)
2410 {
2411         const struct sfc_flow_ops_by_spec *ops;
2412         int rc;
2413
2414         ops = sfc_flow_get_ops_by_spec(flow);
2415         if (ops == NULL || ops->insert == NULL) {
2416                 rte_flow_error_set(error, ENOTSUP,
2417                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2418                                    "No backend to handle this flow");
2419                 return rte_errno;
2420         }
2421
2422         rc = ops->insert(sa, flow);
2423         if (rc != 0) {
2424                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2425                                    NULL, "Failed to insert the flow rule");
2426         }
2427
2428         return rc;
2429 }
2430
2431 static int
2432 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2433                 struct rte_flow_error *error)
2434 {
2435         const struct sfc_flow_ops_by_spec *ops;
2436         int rc;
2437
2438         ops = sfc_flow_get_ops_by_spec(flow);
2439         if (ops == NULL || ops->remove == NULL) {
2440                 rte_flow_error_set(error, ENOTSUP,
2441                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2442                                    "No backend to handle this flow");
2443                 return rte_errno;
2444         }
2445
2446         rc = ops->remove(sa, flow);
2447         if (rc != 0) {
2448                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2449                                    NULL, "Failed to remove the flow rule");
2450         }
2451
2452         return rc;
2453 }
2454
2455 static int
2456 sfc_flow_validate(struct rte_eth_dev *dev,
2457                   const struct rte_flow_attr *attr,
2458                   const struct rte_flow_item pattern[],
2459                   const struct rte_flow_action actions[],
2460                   struct rte_flow_error *error)
2461 {
2462         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2463         struct rte_flow *flow;
2464         int rc;
2465
2466         flow = sfc_flow_zmalloc(error);
2467         if (flow == NULL)
2468                 return -rte_errno;
2469
2470         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2471
2472         sfc_flow_free(sa, flow);
2473
2474         return rc;
2475 }
2476
2477 static struct rte_flow *
2478 sfc_flow_create(struct rte_eth_dev *dev,
2479                 const struct rte_flow_attr *attr,
2480                 const struct rte_flow_item pattern[],
2481                 const struct rte_flow_action actions[],
2482                 struct rte_flow_error *error)
2483 {
2484         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2485         struct rte_flow *flow = NULL;
2486         int rc;
2487
2488         flow = sfc_flow_zmalloc(error);
2489         if (flow == NULL)
2490                 goto fail_no_mem;
2491
2492         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2493         if (rc != 0)
2494                 goto fail_bad_value;
2495
2496         sfc_adapter_lock(sa);
2497
2498         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2499
2500         if (sa->state == SFC_ADAPTER_STARTED) {
2501                 rc = sfc_flow_insert(sa, flow, error);
2502                 if (rc != 0)
2503                         goto fail_flow_insert;
2504         }
2505
2506         sfc_adapter_unlock(sa);
2507
2508         return flow;
2509
2510 fail_flow_insert:
2511         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2512
2513 fail_bad_value:
2514         sfc_flow_free(sa, flow);
2515         sfc_adapter_unlock(sa);
2516
2517 fail_no_mem:
2518         return NULL;
2519 }
2520
2521 static int
2522 sfc_flow_destroy(struct rte_eth_dev *dev,
2523                  struct rte_flow *flow,
2524                  struct rte_flow_error *error)
2525 {
2526         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2527         struct rte_flow *flow_ptr;
2528         int rc = EINVAL;
2529
2530         sfc_adapter_lock(sa);
2531
2532         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2533                 if (flow_ptr == flow)
2534                         rc = 0;
2535         }
2536         if (rc != 0) {
2537                 rte_flow_error_set(error, rc,
2538                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2539                                    "Failed to find flow rule to destroy");
2540                 goto fail_bad_value;
2541         }
2542
2543         if (sa->state == SFC_ADAPTER_STARTED)
2544                 rc = sfc_flow_remove(sa, flow, error);
2545
2546         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2547         sfc_flow_free(sa, flow);
2548
2549 fail_bad_value:
2550         sfc_adapter_unlock(sa);
2551
2552         return -rc;
2553 }
2554
2555 static int
2556 sfc_flow_flush(struct rte_eth_dev *dev,
2557                struct rte_flow_error *error)
2558 {
2559         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2560         struct rte_flow *flow;
2561         int ret = 0;
2562
2563         sfc_adapter_lock(sa);
2564
2565         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2566                 if (sa->state == SFC_ADAPTER_STARTED) {
2567                         int rc;
2568
2569                         rc = sfc_flow_remove(sa, flow, error);
2570                         if (rc != 0)
2571                                 ret = rc;
2572                 }
2573
2574                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2575                 sfc_flow_free(sa, flow);
2576         }
2577
2578         sfc_adapter_unlock(sa);
2579
2580         return -ret;
2581 }
2582
2583 static int
2584 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2585                  struct rte_flow_error *error)
2586 {
2587         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2588         int ret = 0;
2589
2590         sfc_adapter_lock(sa);
2591         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2592                 rte_flow_error_set(error, EBUSY,
2593                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2594                                    NULL, "please close the port first");
2595                 ret = -rte_errno;
2596         } else {
2597                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2598         }
2599         sfc_adapter_unlock(sa);
2600
2601         return ret;
2602 }
2603
2604 const struct rte_flow_ops sfc_flow_ops = {
2605         .validate = sfc_flow_validate,
2606         .create = sfc_flow_create,
2607         .destroy = sfc_flow_destroy,
2608         .flush = sfc_flow_flush,
2609         .query = NULL,
2610         .isolate = sfc_flow_isolate,
2611 };
2612
2613 void
2614 sfc_flow_init(struct sfc_adapter *sa)
2615 {
2616         SFC_ASSERT(sfc_adapter_is_locked(sa));
2617
2618         TAILQ_INIT(&sa->flow_list);
2619 }
2620
2621 void
2622 sfc_flow_fini(struct sfc_adapter *sa)
2623 {
2624         struct rte_flow *flow;
2625
2626         SFC_ASSERT(sfc_adapter_is_locked(sa));
2627
2628         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2629                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2630                 sfc_flow_free(sa, flow);
2631         }
2632 }
2633
2634 void
2635 sfc_flow_stop(struct sfc_adapter *sa)
2636 {
2637         struct rte_flow *flow;
2638
2639         SFC_ASSERT(sfc_adapter_is_locked(sa));
2640
2641         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2642                 sfc_flow_remove(sa, flow, NULL);
2643 }
2644
2645 int
2646 sfc_flow_start(struct sfc_adapter *sa)
2647 {
2648         struct rte_flow *flow;
2649         int rc = 0;
2650
2651         sfc_log_init(sa, "entry");
2652
2653         SFC_ASSERT(sfc_adapter_is_locked(sa));
2654
2655         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2656                 rc = sfc_flow_insert(sa, flow, NULL);
2657                 if (rc != 0)
2658                         goto fail_bad_flow;
2659         }
2660
2661         sfc_log_init(sa, "done");
2662
2663 fail_bad_flow:
2664         return rc;
2665 }