net/mlx5: fix packet length assert in MPRQ
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 #include "sfc_dp_rx.h"
26
27 struct sfc_flow_ops_by_spec {
28         sfc_flow_parse_cb_t     *parse;
29         sfc_flow_insert_cb_t    *insert;
30         sfc_flow_remove_cb_t    *remove;
31 };
32
33 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
34 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
35 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
36
37 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
38         .parse = sfc_flow_parse_rte_to_filter,
39         .insert = sfc_flow_filter_insert,
40         .remove = sfc_flow_filter_remove,
41 };
42
43 static const struct sfc_flow_ops_by_spec *
44 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
45 {
46         struct sfc_flow_spec *spec = &flow->spec;
47         const struct sfc_flow_ops_by_spec *ops = NULL;
48
49         switch (spec->type) {
50         case SFC_FLOW_SPEC_FILTER:
51                 ops = &sfc_flow_ops_filter;
52                 break;
53         default:
54                 SFC_ASSERT(false);
55                 break;
56         }
57
58         return ops;
59 }
60
61 /*
62  * Currently, filter-based (VNIC) flow API is implemented in such a manner
63  * that each flow rule is converted to one or more hardware filters.
64  * All elements of flow rule (attributes, pattern items, actions)
65  * correspond to one or more fields in the efx_filter_spec_s structure
66  * that is responsible for the hardware filter.
67  * If some required field is unset in the flow rule, then a handful
68  * of filter copies will be created to cover all possible values
69  * of such a field.
70  */
71
72 static sfc_flow_item_parse sfc_flow_parse_void;
73 static sfc_flow_item_parse sfc_flow_parse_eth;
74 static sfc_flow_item_parse sfc_flow_parse_vlan;
75 static sfc_flow_item_parse sfc_flow_parse_ipv4;
76 static sfc_flow_item_parse sfc_flow_parse_ipv6;
77 static sfc_flow_item_parse sfc_flow_parse_tcp;
78 static sfc_flow_item_parse sfc_flow_parse_udp;
79 static sfc_flow_item_parse sfc_flow_parse_vxlan;
80 static sfc_flow_item_parse sfc_flow_parse_geneve;
81 static sfc_flow_item_parse sfc_flow_parse_nvgre;
82
83 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
84                                      unsigned int filters_count_for_one_val,
85                                      struct rte_flow_error *error);
86
87 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
88                                         efx_filter_spec_t *spec,
89                                         struct sfc_filter *filter);
90
91 struct sfc_flow_copy_flag {
92         /* EFX filter specification match flag */
93         efx_filter_match_flags_t flag;
94         /* Number of values of corresponding field */
95         unsigned int vals_count;
96         /* Function to set values in specifications */
97         sfc_flow_spec_set_vals *set_vals;
98         /*
99          * Function to check that the specification is suitable
100          * for adding this match flag
101          */
102         sfc_flow_spec_check *spec_check;
103 };
104
105 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
106 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
107 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
108 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
109 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
110 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
111 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
112
113 static boolean_t
114 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
115 {
116         uint8_t sum = 0;
117         unsigned int i;
118
119         for (i = 0; i < size; i++)
120                 sum |= buf[i];
121
122         return (sum == 0) ? B_TRUE : B_FALSE;
123 }
124
125 /*
126  * Validate item and prepare structures spec and mask for parsing
127  */
128 int
129 sfc_flow_parse_init(const struct rte_flow_item *item,
130                     const void **spec_ptr,
131                     const void **mask_ptr,
132                     const void *supp_mask,
133                     const void *def_mask,
134                     unsigned int size,
135                     struct rte_flow_error *error)
136 {
137         const uint8_t *spec;
138         const uint8_t *mask;
139         const uint8_t *last;
140         uint8_t supp;
141         unsigned int i;
142
143         if (item == NULL) {
144                 rte_flow_error_set(error, EINVAL,
145                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
146                                    "NULL item");
147                 return -rte_errno;
148         }
149
150         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
151                 rte_flow_error_set(error, EINVAL,
152                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
153                                    "Mask or last is set without spec");
154                 return -rte_errno;
155         }
156
157         /*
158          * If "mask" is not set, default mask is used,
159          * but if default mask is NULL, "mask" should be set
160          */
161         if (item->mask == NULL) {
162                 if (def_mask == NULL) {
163                         rte_flow_error_set(error, EINVAL,
164                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
165                                 "Mask should be specified");
166                         return -rte_errno;
167                 }
168
169                 mask = def_mask;
170         } else {
171                 mask = item->mask;
172         }
173
174         spec = item->spec;
175         last = item->last;
176
177         if (spec == NULL)
178                 goto exit;
179
180         /*
181          * If field values in "last" are either 0 or equal to the corresponding
182          * values in "spec" then they are ignored
183          */
184         if (last != NULL &&
185             !sfc_flow_is_zero(last, size) &&
186             memcmp(last, spec, size) != 0) {
187                 rte_flow_error_set(error, ENOTSUP,
188                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
189                                    "Ranging is not supported");
190                 return -rte_errno;
191         }
192
193         if (supp_mask == NULL) {
194                 rte_flow_error_set(error, EINVAL,
195                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
196                         "Supported mask for item should be specified");
197                 return -rte_errno;
198         }
199
200         /* Check that mask does not ask for more match than supp_mask */
201         for (i = 0; i < size; i++) {
202                 supp = ((const uint8_t *)supp_mask)[i];
203
204                 if (~supp & mask[i]) {
205                         rte_flow_error_set(error, ENOTSUP,
206                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
207                                            "Item's field is not supported");
208                         return -rte_errno;
209                 }
210         }
211
212 exit:
213         *spec_ptr = spec;
214         *mask_ptr = mask;
215         return 0;
216 }
217
218 /*
219  * Protocol parsers.
220  * Masking is not supported, so masks in items should be either
221  * full or empty (zeroed) and set only for supported fields which
222  * are specified in the supp_mask.
223  */
224
225 static int
226 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
227                     __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
228                     __rte_unused struct rte_flow_error *error)
229 {
230         return 0;
231 }
232
233 /**
234  * Convert Ethernet item to EFX filter specification.
235  *
236  * @param item[in]
237  *   Item specification. Outer frame specification may only comprise
238  *   source/destination addresses and Ethertype field.
239  *   Inner frame specification may contain destination address only.
240  *   There is support for individual/group mask as well as for empty and full.
241  *   If the mask is NULL, default mask will be used. Ranging is not supported.
242  * @param efx_spec[in, out]
243  *   EFX filter specification to update.
244  * @param[out] error
245  *   Perform verbose error reporting if not NULL.
246  */
247 static int
248 sfc_flow_parse_eth(const struct rte_flow_item *item,
249                    struct sfc_flow_parse_ctx *parse_ctx,
250                    struct rte_flow_error *error)
251 {
252         int rc;
253         efx_filter_spec_t *efx_spec = parse_ctx->filter;
254         const struct rte_flow_item_eth *spec = NULL;
255         const struct rte_flow_item_eth *mask = NULL;
256         const struct rte_flow_item_eth supp_mask = {
257                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
258                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
259                 .type = 0xffff,
260         };
261         const struct rte_flow_item_eth ifrm_supp_mask = {
262                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
263         };
264         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
265                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
266         };
267         const struct rte_flow_item_eth *supp_mask_p;
268         const struct rte_flow_item_eth *def_mask_p;
269         uint8_t *loc_mac = NULL;
270         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
271                 EFX_TUNNEL_PROTOCOL_NONE);
272
273         if (is_ifrm) {
274                 supp_mask_p = &ifrm_supp_mask;
275                 def_mask_p = &ifrm_supp_mask;
276                 loc_mac = efx_spec->efs_ifrm_loc_mac;
277         } else {
278                 supp_mask_p = &supp_mask;
279                 def_mask_p = &rte_flow_item_eth_mask;
280                 loc_mac = efx_spec->efs_loc_mac;
281         }
282
283         rc = sfc_flow_parse_init(item,
284                                  (const void **)&spec,
285                                  (const void **)&mask,
286                                  supp_mask_p, def_mask_p,
287                                  sizeof(struct rte_flow_item_eth),
288                                  error);
289         if (rc != 0)
290                 return rc;
291
292         /* If "spec" is not set, could be any Ethernet */
293         if (spec == NULL)
294                 return 0;
295
296         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
297                 efx_spec->efs_match_flags |= is_ifrm ?
298                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
299                         EFX_FILTER_MATCH_LOC_MAC;
300                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
301                            EFX_MAC_ADDR_LEN);
302         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
303                           EFX_MAC_ADDR_LEN) == 0) {
304                 if (rte_is_unicast_ether_addr(&spec->dst))
305                         efx_spec->efs_match_flags |= is_ifrm ?
306                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
307                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
308                 else
309                         efx_spec->efs_match_flags |= is_ifrm ?
310                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
311                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
312         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
313                 goto fail_bad_mask;
314         }
315
316         /*
317          * ifrm_supp_mask ensures that the source address and
318          * ethertype masks are equal to zero in inner frame,
319          * so these fields are filled in only for the outer frame
320          */
321         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
322                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
323                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
324                            EFX_MAC_ADDR_LEN);
325         } else if (!rte_is_zero_ether_addr(&mask->src)) {
326                 goto fail_bad_mask;
327         }
328
329         /*
330          * Ether type is in big-endian byte order in item and
331          * in little-endian in efx_spec, so byte swap is used
332          */
333         if (mask->type == supp_mask.type) {
334                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
335                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
336         } else if (mask->type != 0) {
337                 goto fail_bad_mask;
338         }
339
340         return 0;
341
342 fail_bad_mask:
343         rte_flow_error_set(error, EINVAL,
344                            RTE_FLOW_ERROR_TYPE_ITEM, item,
345                            "Bad mask in the ETH pattern item");
346         return -rte_errno;
347 }
348
349 /**
350  * Convert VLAN item to EFX filter specification.
351  *
352  * @param item[in]
353  *   Item specification. Only VID field is supported.
354  *   The mask can not be NULL. Ranging is not supported.
355  * @param efx_spec[in, out]
356  *   EFX filter specification to update.
357  * @param[out] error
358  *   Perform verbose error reporting if not NULL.
359  */
360 static int
361 sfc_flow_parse_vlan(const struct rte_flow_item *item,
362                     struct sfc_flow_parse_ctx *parse_ctx,
363                     struct rte_flow_error *error)
364 {
365         int rc;
366         uint16_t vid;
367         efx_filter_spec_t *efx_spec = parse_ctx->filter;
368         const struct rte_flow_item_vlan *spec = NULL;
369         const struct rte_flow_item_vlan *mask = NULL;
370         const struct rte_flow_item_vlan supp_mask = {
371                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
372                 .inner_type = RTE_BE16(0xffff),
373         };
374
375         rc = sfc_flow_parse_init(item,
376                                  (const void **)&spec,
377                                  (const void **)&mask,
378                                  &supp_mask,
379                                  NULL,
380                                  sizeof(struct rte_flow_item_vlan),
381                                  error);
382         if (rc != 0)
383                 return rc;
384
385         /*
386          * VID is in big-endian byte order in item and
387          * in little-endian in efx_spec, so byte swap is used.
388          * If two VLAN items are included, the first matches
389          * the outer tag and the next matches the inner tag.
390          */
391         if (mask->tci == supp_mask.tci) {
392                 /* Apply mask to keep VID only */
393                 vid = rte_bswap16(spec->tci & mask->tci);
394
395                 if (!(efx_spec->efs_match_flags &
396                       EFX_FILTER_MATCH_OUTER_VID)) {
397                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
398                         efx_spec->efs_outer_vid = vid;
399                 } else if (!(efx_spec->efs_match_flags &
400                              EFX_FILTER_MATCH_INNER_VID)) {
401                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
402                         efx_spec->efs_inner_vid = vid;
403                 } else {
404                         rte_flow_error_set(error, EINVAL,
405                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
406                                            "More than two VLAN items");
407                         return -rte_errno;
408                 }
409         } else {
410                 rte_flow_error_set(error, EINVAL,
411                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
412                                    "VLAN ID in TCI match is required");
413                 return -rte_errno;
414         }
415
416         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
417                 rte_flow_error_set(error, EINVAL,
418                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
419                                    "VLAN TPID matching is not supported");
420                 return -rte_errno;
421         }
422         if (mask->inner_type == supp_mask.inner_type) {
423                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
424                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
425         } else if (mask->inner_type) {
426                 rte_flow_error_set(error, EINVAL,
427                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
428                                    "Bad mask for VLAN inner_type");
429                 return -rte_errno;
430         }
431
432         return 0;
433 }
434
435 /**
436  * Convert IPv4 item to EFX filter specification.
437  *
438  * @param item[in]
439  *   Item specification. Only source and destination addresses and
440  *   protocol fields are supported. If the mask is NULL, default
441  *   mask will be used. Ranging is not supported.
442  * @param efx_spec[in, out]
443  *   EFX filter specification to update.
444  * @param[out] error
445  *   Perform verbose error reporting if not NULL.
446  */
447 static int
448 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
449                     struct sfc_flow_parse_ctx *parse_ctx,
450                     struct rte_flow_error *error)
451 {
452         int rc;
453         efx_filter_spec_t *efx_spec = parse_ctx->filter;
454         const struct rte_flow_item_ipv4 *spec = NULL;
455         const struct rte_flow_item_ipv4 *mask = NULL;
456         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
457         const struct rte_flow_item_ipv4 supp_mask = {
458                 .hdr = {
459                         .src_addr = 0xffffffff,
460                         .dst_addr = 0xffffffff,
461                         .next_proto_id = 0xff,
462                 }
463         };
464
465         rc = sfc_flow_parse_init(item,
466                                  (const void **)&spec,
467                                  (const void **)&mask,
468                                  &supp_mask,
469                                  &rte_flow_item_ipv4_mask,
470                                  sizeof(struct rte_flow_item_ipv4),
471                                  error);
472         if (rc != 0)
473                 return rc;
474
475         /*
476          * Filtering by IPv4 source and destination addresses requires
477          * the appropriate ETHER_TYPE in hardware filters
478          */
479         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
480                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
481                 efx_spec->efs_ether_type = ether_type_ipv4;
482         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
483                 rte_flow_error_set(error, EINVAL,
484                         RTE_FLOW_ERROR_TYPE_ITEM, item,
485                         "Ethertype in pattern with IPV4 item should be appropriate");
486                 return -rte_errno;
487         }
488
489         if (spec == NULL)
490                 return 0;
491
492         /*
493          * IPv4 addresses are in big-endian byte order in item and in
494          * efx_spec
495          */
496         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
497                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
498                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
499         } else if (mask->hdr.src_addr != 0) {
500                 goto fail_bad_mask;
501         }
502
503         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
504                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
505                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
506         } else if (mask->hdr.dst_addr != 0) {
507                 goto fail_bad_mask;
508         }
509
510         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
511                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
512                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
513         } else if (mask->hdr.next_proto_id != 0) {
514                 goto fail_bad_mask;
515         }
516
517         return 0;
518
519 fail_bad_mask:
520         rte_flow_error_set(error, EINVAL,
521                            RTE_FLOW_ERROR_TYPE_ITEM, item,
522                            "Bad mask in the IPV4 pattern item");
523         return -rte_errno;
524 }
525
526 /**
527  * Convert IPv6 item to EFX filter specification.
528  *
529  * @param item[in]
530  *   Item specification. Only source and destination addresses and
531  *   next header fields are supported. If the mask is NULL, default
532  *   mask will be used. Ranging is not supported.
533  * @param efx_spec[in, out]
534  *   EFX filter specification to update.
535  * @param[out] error
536  *   Perform verbose error reporting if not NULL.
537  */
538 static int
539 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
540                     struct sfc_flow_parse_ctx *parse_ctx,
541                     struct rte_flow_error *error)
542 {
543         int rc;
544         efx_filter_spec_t *efx_spec = parse_ctx->filter;
545         const struct rte_flow_item_ipv6 *spec = NULL;
546         const struct rte_flow_item_ipv6 *mask = NULL;
547         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
548         const struct rte_flow_item_ipv6 supp_mask = {
549                 .hdr = {
550                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
551                                       0xff, 0xff, 0xff, 0xff,
552                                       0xff, 0xff, 0xff, 0xff,
553                                       0xff, 0xff, 0xff, 0xff },
554                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
555                                       0xff, 0xff, 0xff, 0xff,
556                                       0xff, 0xff, 0xff, 0xff,
557                                       0xff, 0xff, 0xff, 0xff },
558                         .proto = 0xff,
559                 }
560         };
561
562         rc = sfc_flow_parse_init(item,
563                                  (const void **)&spec,
564                                  (const void **)&mask,
565                                  &supp_mask,
566                                  &rte_flow_item_ipv6_mask,
567                                  sizeof(struct rte_flow_item_ipv6),
568                                  error);
569         if (rc != 0)
570                 return rc;
571
572         /*
573          * Filtering by IPv6 source and destination addresses requires
574          * the appropriate ETHER_TYPE in hardware filters
575          */
576         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
577                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
578                 efx_spec->efs_ether_type = ether_type_ipv6;
579         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
580                 rte_flow_error_set(error, EINVAL,
581                         RTE_FLOW_ERROR_TYPE_ITEM, item,
582                         "Ethertype in pattern with IPV6 item should be appropriate");
583                 return -rte_errno;
584         }
585
586         if (spec == NULL)
587                 return 0;
588
589         /*
590          * IPv6 addresses are in big-endian byte order in item and in
591          * efx_spec
592          */
593         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
594                    sizeof(mask->hdr.src_addr)) == 0) {
595                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
596
597                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
598                                  sizeof(spec->hdr.src_addr));
599                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
600                            sizeof(efx_spec->efs_rem_host));
601         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
602                                      sizeof(mask->hdr.src_addr))) {
603                 goto fail_bad_mask;
604         }
605
606         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
607                    sizeof(mask->hdr.dst_addr)) == 0) {
608                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
609
610                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
611                                  sizeof(spec->hdr.dst_addr));
612                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
613                            sizeof(efx_spec->efs_loc_host));
614         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
615                                      sizeof(mask->hdr.dst_addr))) {
616                 goto fail_bad_mask;
617         }
618
619         if (mask->hdr.proto == supp_mask.hdr.proto) {
620                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
621                 efx_spec->efs_ip_proto = spec->hdr.proto;
622         } else if (mask->hdr.proto != 0) {
623                 goto fail_bad_mask;
624         }
625
626         return 0;
627
628 fail_bad_mask:
629         rte_flow_error_set(error, EINVAL,
630                            RTE_FLOW_ERROR_TYPE_ITEM, item,
631                            "Bad mask in the IPV6 pattern item");
632         return -rte_errno;
633 }
634
635 /**
636  * Convert TCP item to EFX filter specification.
637  *
638  * @param item[in]
639  *   Item specification. Only source and destination ports fields
640  *   are supported. If the mask is NULL, default mask will be used.
641  *   Ranging is not supported.
642  * @param efx_spec[in, out]
643  *   EFX filter specification to update.
644  * @param[out] error
645  *   Perform verbose error reporting if not NULL.
646  */
647 static int
648 sfc_flow_parse_tcp(const struct rte_flow_item *item,
649                    struct sfc_flow_parse_ctx *parse_ctx,
650                    struct rte_flow_error *error)
651 {
652         int rc;
653         efx_filter_spec_t *efx_spec = parse_ctx->filter;
654         const struct rte_flow_item_tcp *spec = NULL;
655         const struct rte_flow_item_tcp *mask = NULL;
656         const struct rte_flow_item_tcp supp_mask = {
657                 .hdr = {
658                         .src_port = 0xffff,
659                         .dst_port = 0xffff,
660                 }
661         };
662
663         rc = sfc_flow_parse_init(item,
664                                  (const void **)&spec,
665                                  (const void **)&mask,
666                                  &supp_mask,
667                                  &rte_flow_item_tcp_mask,
668                                  sizeof(struct rte_flow_item_tcp),
669                                  error);
670         if (rc != 0)
671                 return rc;
672
673         /*
674          * Filtering by TCP source and destination ports requires
675          * the appropriate IP_PROTO in hardware filters
676          */
677         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
678                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
679                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
680         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
681                 rte_flow_error_set(error, EINVAL,
682                         RTE_FLOW_ERROR_TYPE_ITEM, item,
683                         "IP proto in pattern with TCP item should be appropriate");
684                 return -rte_errno;
685         }
686
687         if (spec == NULL)
688                 return 0;
689
690         /*
691          * Source and destination ports are in big-endian byte order in item and
692          * in little-endian in efx_spec, so byte swap is used
693          */
694         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
695                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
696                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
697         } else if (mask->hdr.src_port != 0) {
698                 goto fail_bad_mask;
699         }
700
701         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
702                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
703                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
704         } else if (mask->hdr.dst_port != 0) {
705                 goto fail_bad_mask;
706         }
707
708         return 0;
709
710 fail_bad_mask:
711         rte_flow_error_set(error, EINVAL,
712                            RTE_FLOW_ERROR_TYPE_ITEM, item,
713                            "Bad mask in the TCP pattern item");
714         return -rte_errno;
715 }
716
717 /**
718  * Convert UDP item to EFX filter specification.
719  *
720  * @param item[in]
721  *   Item specification. Only source and destination ports fields
722  *   are supported. If the mask is NULL, default mask will be used.
723  *   Ranging is not supported.
724  * @param efx_spec[in, out]
725  *   EFX filter specification to update.
726  * @param[out] error
727  *   Perform verbose error reporting if not NULL.
728  */
729 static int
730 sfc_flow_parse_udp(const struct rte_flow_item *item,
731                    struct sfc_flow_parse_ctx *parse_ctx,
732                    struct rte_flow_error *error)
733 {
734         int rc;
735         efx_filter_spec_t *efx_spec = parse_ctx->filter;
736         const struct rte_flow_item_udp *spec = NULL;
737         const struct rte_flow_item_udp *mask = NULL;
738         const struct rte_flow_item_udp supp_mask = {
739                 .hdr = {
740                         .src_port = 0xffff,
741                         .dst_port = 0xffff,
742                 }
743         };
744
745         rc = sfc_flow_parse_init(item,
746                                  (const void **)&spec,
747                                  (const void **)&mask,
748                                  &supp_mask,
749                                  &rte_flow_item_udp_mask,
750                                  sizeof(struct rte_flow_item_udp),
751                                  error);
752         if (rc != 0)
753                 return rc;
754
755         /*
756          * Filtering by UDP source and destination ports requires
757          * the appropriate IP_PROTO in hardware filters
758          */
759         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
760                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
761                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
762         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
763                 rte_flow_error_set(error, EINVAL,
764                         RTE_FLOW_ERROR_TYPE_ITEM, item,
765                         "IP proto in pattern with UDP item should be appropriate");
766                 return -rte_errno;
767         }
768
769         if (spec == NULL)
770                 return 0;
771
772         /*
773          * Source and destination ports are in big-endian byte order in item and
774          * in little-endian in efx_spec, so byte swap is used
775          */
776         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
777                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
778                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
779         } else if (mask->hdr.src_port != 0) {
780                 goto fail_bad_mask;
781         }
782
783         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
784                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
785                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
786         } else if (mask->hdr.dst_port != 0) {
787                 goto fail_bad_mask;
788         }
789
790         return 0;
791
792 fail_bad_mask:
793         rte_flow_error_set(error, EINVAL,
794                            RTE_FLOW_ERROR_TYPE_ITEM, item,
795                            "Bad mask in the UDP pattern item");
796         return -rte_errno;
797 }
798
799 /*
800  * Filters for encapsulated packets match based on the EtherType and IP
801  * protocol in the outer frame.
802  */
803 static int
804 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
805                                         efx_filter_spec_t *efx_spec,
806                                         uint8_t ip_proto,
807                                         struct rte_flow_error *error)
808 {
809         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
810                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
811                 efx_spec->efs_ip_proto = ip_proto;
812         } else if (efx_spec->efs_ip_proto != ip_proto) {
813                 switch (ip_proto) {
814                 case EFX_IPPROTO_UDP:
815                         rte_flow_error_set(error, EINVAL,
816                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
817                                 "Outer IP header protocol must be UDP "
818                                 "in VxLAN/GENEVE pattern");
819                         return -rte_errno;
820
821                 case EFX_IPPROTO_GRE:
822                         rte_flow_error_set(error, EINVAL,
823                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
824                                 "Outer IP header protocol must be GRE "
825                                 "in NVGRE pattern");
826                         return -rte_errno;
827
828                 default:
829                         rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
831                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
832                                 "are supported");
833                         return -rte_errno;
834                 }
835         }
836
837         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
838             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
839             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
840                 rte_flow_error_set(error, EINVAL,
841                         RTE_FLOW_ERROR_TYPE_ITEM, item,
842                         "Outer frame EtherType in pattern with tunneling "
843                         "must be IPv4 or IPv6");
844                 return -rte_errno;
845         }
846
847         return 0;
848 }
849
850 static int
851 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
852                                   const uint8_t *vni_or_vsid_val,
853                                   const uint8_t *vni_or_vsid_mask,
854                                   const struct rte_flow_item *item,
855                                   struct rte_flow_error *error)
856 {
857         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
858                 0xff, 0xff, 0xff
859         };
860
861         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
862                    EFX_VNI_OR_VSID_LEN) == 0) {
863                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
864                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
865                            EFX_VNI_OR_VSID_LEN);
866         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
867                 rte_flow_error_set(error, EINVAL,
868                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
869                                    "Unsupported VNI/VSID mask");
870                 return -rte_errno;
871         }
872
873         return 0;
874 }
875
876 /**
877  * Convert VXLAN item to EFX filter specification.
878  *
879  * @param item[in]
880  *   Item specification. Only VXLAN network identifier field is supported.
881  *   If the mask is NULL, default mask will be used.
882  *   Ranging is not supported.
883  * @param efx_spec[in, out]
884  *   EFX filter specification to update.
885  * @param[out] error
886  *   Perform verbose error reporting if not NULL.
887  */
888 static int
889 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
890                      struct sfc_flow_parse_ctx *parse_ctx,
891                      struct rte_flow_error *error)
892 {
893         int rc;
894         efx_filter_spec_t *efx_spec = parse_ctx->filter;
895         const struct rte_flow_item_vxlan *spec = NULL;
896         const struct rte_flow_item_vxlan *mask = NULL;
897         const struct rte_flow_item_vxlan supp_mask = {
898                 .vni = { 0xff, 0xff, 0xff }
899         };
900
901         rc = sfc_flow_parse_init(item,
902                                  (const void **)&spec,
903                                  (const void **)&mask,
904                                  &supp_mask,
905                                  &rte_flow_item_vxlan_mask,
906                                  sizeof(struct rte_flow_item_vxlan),
907                                  error);
908         if (rc != 0)
909                 return rc;
910
911         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
912                                                      EFX_IPPROTO_UDP, error);
913         if (rc != 0)
914                 return rc;
915
916         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
917         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
918
919         if (spec == NULL)
920                 return 0;
921
922         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
923                                                mask->vni, item, error);
924
925         return rc;
926 }
927
928 /**
929  * Convert GENEVE item to EFX filter specification.
930  *
931  * @param item[in]
932  *   Item specification. Only Virtual Network Identifier and protocol type
933  *   fields are supported. But protocol type can be only Ethernet (0x6558).
934  *   If the mask is NULL, default mask will be used.
935  *   Ranging is not supported.
936  * @param efx_spec[in, out]
937  *   EFX filter specification to update.
938  * @param[out] error
939  *   Perform verbose error reporting if not NULL.
940  */
941 static int
942 sfc_flow_parse_geneve(const struct rte_flow_item *item,
943                       struct sfc_flow_parse_ctx *parse_ctx,
944                       struct rte_flow_error *error)
945 {
946         int rc;
947         efx_filter_spec_t *efx_spec = parse_ctx->filter;
948         const struct rte_flow_item_geneve *spec = NULL;
949         const struct rte_flow_item_geneve *mask = NULL;
950         const struct rte_flow_item_geneve supp_mask = {
951                 .protocol = RTE_BE16(0xffff),
952                 .vni = { 0xff, 0xff, 0xff }
953         };
954
955         rc = sfc_flow_parse_init(item,
956                                  (const void **)&spec,
957                                  (const void **)&mask,
958                                  &supp_mask,
959                                  &rte_flow_item_geneve_mask,
960                                  sizeof(struct rte_flow_item_geneve),
961                                  error);
962         if (rc != 0)
963                 return rc;
964
965         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
966                                                      EFX_IPPROTO_UDP, error);
967         if (rc != 0)
968                 return rc;
969
970         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
971         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
972
973         if (spec == NULL)
974                 return 0;
975
976         if (mask->protocol == supp_mask.protocol) {
977                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
978                         rte_flow_error_set(error, EINVAL,
979                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
980                                 "GENEVE encap. protocol must be Ethernet "
981                                 "(0x6558) in the GENEVE pattern item");
982                         return -rte_errno;
983                 }
984         } else if (mask->protocol != 0) {
985                 rte_flow_error_set(error, EINVAL,
986                         RTE_FLOW_ERROR_TYPE_ITEM, item,
987                         "Unsupported mask for GENEVE encap. protocol");
988                 return -rte_errno;
989         }
990
991         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
992                                                mask->vni, item, error);
993
994         return rc;
995 }
996
997 /**
998  * Convert NVGRE item to EFX filter specification.
999  *
1000  * @param item[in]
1001  *   Item specification. Only virtual subnet ID field is supported.
1002  *   If the mask is NULL, default mask will be used.
1003  *   Ranging is not supported.
1004  * @param efx_spec[in, out]
1005  *   EFX filter specification to update.
1006  * @param[out] error
1007  *   Perform verbose error reporting if not NULL.
1008  */
1009 static int
1010 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1011                      struct sfc_flow_parse_ctx *parse_ctx,
1012                      struct rte_flow_error *error)
1013 {
1014         int rc;
1015         efx_filter_spec_t *efx_spec = parse_ctx->filter;
1016         const struct rte_flow_item_nvgre *spec = NULL;
1017         const struct rte_flow_item_nvgre *mask = NULL;
1018         const struct rte_flow_item_nvgre supp_mask = {
1019                 .tni = { 0xff, 0xff, 0xff }
1020         };
1021
1022         rc = sfc_flow_parse_init(item,
1023                                  (const void **)&spec,
1024                                  (const void **)&mask,
1025                                  &supp_mask,
1026                                  &rte_flow_item_nvgre_mask,
1027                                  sizeof(struct rte_flow_item_nvgre),
1028                                  error);
1029         if (rc != 0)
1030                 return rc;
1031
1032         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1033                                                      EFX_IPPROTO_GRE, error);
1034         if (rc != 0)
1035                 return rc;
1036
1037         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1038         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1039
1040         if (spec == NULL)
1041                 return 0;
1042
1043         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1044                                                mask->tni, item, error);
1045
1046         return rc;
1047 }
1048
1049 static const struct sfc_flow_item sfc_flow_items[] = {
1050         {
1051                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1052                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1053                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1054                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1055                 .parse = sfc_flow_parse_void,
1056         },
1057         {
1058                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1059                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1060                 .layer = SFC_FLOW_ITEM_L2,
1061                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1062                 .parse = sfc_flow_parse_eth,
1063         },
1064         {
1065                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1066                 .prev_layer = SFC_FLOW_ITEM_L2,
1067                 .layer = SFC_FLOW_ITEM_L2,
1068                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1069                 .parse = sfc_flow_parse_vlan,
1070         },
1071         {
1072                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1073                 .prev_layer = SFC_FLOW_ITEM_L2,
1074                 .layer = SFC_FLOW_ITEM_L3,
1075                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1076                 .parse = sfc_flow_parse_ipv4,
1077         },
1078         {
1079                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1080                 .prev_layer = SFC_FLOW_ITEM_L2,
1081                 .layer = SFC_FLOW_ITEM_L3,
1082                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1083                 .parse = sfc_flow_parse_ipv6,
1084         },
1085         {
1086                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1087                 .prev_layer = SFC_FLOW_ITEM_L3,
1088                 .layer = SFC_FLOW_ITEM_L4,
1089                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1090                 .parse = sfc_flow_parse_tcp,
1091         },
1092         {
1093                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1094                 .prev_layer = SFC_FLOW_ITEM_L3,
1095                 .layer = SFC_FLOW_ITEM_L4,
1096                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1097                 .parse = sfc_flow_parse_udp,
1098         },
1099         {
1100                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1101                 .prev_layer = SFC_FLOW_ITEM_L4,
1102                 .layer = SFC_FLOW_ITEM_START_LAYER,
1103                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1104                 .parse = sfc_flow_parse_vxlan,
1105         },
1106         {
1107                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1108                 .prev_layer = SFC_FLOW_ITEM_L4,
1109                 .layer = SFC_FLOW_ITEM_START_LAYER,
1110                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1111                 .parse = sfc_flow_parse_geneve,
1112         },
1113         {
1114                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1115                 .prev_layer = SFC_FLOW_ITEM_L3,
1116                 .layer = SFC_FLOW_ITEM_START_LAYER,
1117                 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1118                 .parse = sfc_flow_parse_nvgre,
1119         },
1120 };
1121
1122 /*
1123  * Protocol-independent flow API support
1124  */
1125 static int
1126 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1127                     struct rte_flow *flow,
1128                     struct rte_flow_error *error)
1129 {
1130         struct sfc_flow_spec *spec = &flow->spec;
1131         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1132
1133         if (attr == NULL) {
1134                 rte_flow_error_set(error, EINVAL,
1135                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1136                                    "NULL attribute");
1137                 return -rte_errno;
1138         }
1139         if (attr->group != 0) {
1140                 rte_flow_error_set(error, ENOTSUP,
1141                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1142                                    "Groups are not supported");
1143                 return -rte_errno;
1144         }
1145         if (attr->egress != 0) {
1146                 rte_flow_error_set(error, ENOTSUP,
1147                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1148                                    "Egress is not supported");
1149                 return -rte_errno;
1150         }
1151         if (attr->ingress == 0) {
1152                 rte_flow_error_set(error, ENOTSUP,
1153                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1154                                    "Ingress is compulsory");
1155                 return -rte_errno;
1156         }
1157         if (attr->transfer == 0) {
1158                 if (attr->priority != 0) {
1159                         rte_flow_error_set(error, ENOTSUP,
1160                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1161                                            attr, "Priorities are unsupported");
1162                         return -rte_errno;
1163                 }
1164                 spec->type = SFC_FLOW_SPEC_FILTER;
1165                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1166                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1167                 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1168         } else {
1169                 rte_flow_error_set(error, ENOTSUP,
1170                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1171                                    "Transfer is not supported");
1172                 return -rte_errno;
1173         }
1174
1175         return 0;
1176 }
1177
1178 /* Get item from array sfc_flow_items */
1179 static const struct sfc_flow_item *
1180 sfc_flow_get_item(const struct sfc_flow_item *items,
1181                   unsigned int nb_items,
1182                   enum rte_flow_item_type type)
1183 {
1184         unsigned int i;
1185
1186         for (i = 0; i < nb_items; i++)
1187                 if (items[i].type == type)
1188                         return &items[i];
1189
1190         return NULL;
1191 }
1192
1193 int
1194 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1195                        unsigned int nb_flow_items,
1196                        const struct rte_flow_item pattern[],
1197                        struct sfc_flow_parse_ctx *parse_ctx,
1198                        struct rte_flow_error *error)
1199 {
1200         int rc;
1201         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1202         boolean_t is_ifrm = B_FALSE;
1203         const struct sfc_flow_item *item;
1204
1205         if (pattern == NULL) {
1206                 rte_flow_error_set(error, EINVAL,
1207                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1208                                    "NULL pattern");
1209                 return -rte_errno;
1210         }
1211
1212         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1213                 item = sfc_flow_get_item(flow_items, nb_flow_items,
1214                                          pattern->type);
1215                 if (item == NULL) {
1216                         rte_flow_error_set(error, ENOTSUP,
1217                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1218                                            "Unsupported pattern item");
1219                         return -rte_errno;
1220                 }
1221
1222                 /*
1223                  * Omitting one or several protocol layers at the beginning
1224                  * of pattern is supported
1225                  */
1226                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1227                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1228                     item->prev_layer != prev_layer) {
1229                         rte_flow_error_set(error, ENOTSUP,
1230                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1231                                            "Unexpected sequence of pattern items");
1232                         return -rte_errno;
1233                 }
1234
1235                 /*
1236                  * Allow only VOID and ETH pattern items in the inner frame.
1237                  * Also check that there is only one tunneling protocol.
1238                  */
1239                 switch (item->type) {
1240                 case RTE_FLOW_ITEM_TYPE_VOID:
1241                 case RTE_FLOW_ITEM_TYPE_ETH:
1242                         break;
1243
1244                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1245                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1246                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1247                         if (is_ifrm) {
1248                                 rte_flow_error_set(error, EINVAL,
1249                                         RTE_FLOW_ERROR_TYPE_ITEM,
1250                                         pattern,
1251                                         "More than one tunneling protocol");
1252                                 return -rte_errno;
1253                         }
1254                         is_ifrm = B_TRUE;
1255                         break;
1256
1257                 default:
1258                         if (is_ifrm) {
1259                                 rte_flow_error_set(error, EINVAL,
1260                                         RTE_FLOW_ERROR_TYPE_ITEM,
1261                                         pattern,
1262                                         "There is an unsupported pattern item "
1263                                         "in the inner frame");
1264                                 return -rte_errno;
1265                         }
1266                         break;
1267                 }
1268
1269                 if (parse_ctx->type != item->ctx_type) {
1270                         rte_flow_error_set(error, EINVAL,
1271                                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1272                                         "Parse context type mismatch");
1273                         return -rte_errno;
1274                 }
1275
1276                 rc = item->parse(pattern, parse_ctx, error);
1277                 if (rc != 0)
1278                         return rc;
1279
1280                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1281                         prev_layer = item->layer;
1282         }
1283
1284         return 0;
1285 }
1286
1287 static int
1288 sfc_flow_parse_queue(struct sfc_adapter *sa,
1289                      const struct rte_flow_action_queue *queue,
1290                      struct rte_flow *flow)
1291 {
1292         struct sfc_flow_spec *spec = &flow->spec;
1293         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1294         struct sfc_rxq *rxq;
1295
1296         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1297                 return -EINVAL;
1298
1299         rxq = &sa->rxq_ctrl[queue->index];
1300         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1301
1302         return 0;
1303 }
1304
1305 static int
1306 sfc_flow_parse_rss(struct sfc_adapter *sa,
1307                    const struct rte_flow_action_rss *action_rss,
1308                    struct rte_flow *flow)
1309 {
1310         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1311         struct sfc_rss *rss = &sas->rss;
1312         unsigned int rxq_sw_index;
1313         struct sfc_rxq *rxq;
1314         unsigned int rxq_hw_index_min;
1315         unsigned int rxq_hw_index_max;
1316         efx_rx_hash_type_t efx_hash_types;
1317         const uint8_t *rss_key;
1318         struct sfc_flow_spec *spec = &flow->spec;
1319         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1320         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1321         unsigned int i;
1322
1323         if (action_rss->queue_num == 0)
1324                 return -EINVAL;
1325
1326         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1327         rxq = &sa->rxq_ctrl[rxq_sw_index];
1328         rxq_hw_index_min = rxq->hw_index;
1329         rxq_hw_index_max = 0;
1330
1331         for (i = 0; i < action_rss->queue_num; ++i) {
1332                 rxq_sw_index = action_rss->queue[i];
1333
1334                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1335                         return -EINVAL;
1336
1337                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1338
1339                 if (rxq->hw_index < rxq_hw_index_min)
1340                         rxq_hw_index_min = rxq->hw_index;
1341
1342                 if (rxq->hw_index > rxq_hw_index_max)
1343                         rxq_hw_index_max = rxq->hw_index;
1344         }
1345
1346         switch (action_rss->func) {
1347         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1348         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1349                 break;
1350         default:
1351                 return -EINVAL;
1352         }
1353
1354         if (action_rss->level)
1355                 return -EINVAL;
1356
1357         /*
1358          * Dummy RSS action with only one queue and no specific settings
1359          * for hash types and key does not require dedicated RSS context
1360          * and may be simplified to single queue action.
1361          */
1362         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1363             action_rss->key_len == 0) {
1364                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1365                 return 0;
1366         }
1367
1368         if (action_rss->types) {
1369                 int rc;
1370
1371                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1372                                           &efx_hash_types);
1373                 if (rc != 0)
1374                         return -rc;
1375         } else {
1376                 unsigned int i;
1377
1378                 efx_hash_types = 0;
1379                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1380                         efx_hash_types |= rss->hf_map[i].efx;
1381         }
1382
1383         if (action_rss->key_len) {
1384                 if (action_rss->key_len != sizeof(rss->key))
1385                         return -EINVAL;
1386
1387                 rss_key = action_rss->key;
1388         } else {
1389                 rss_key = rss->key;
1390         }
1391
1392         spec_filter->rss = B_TRUE;
1393
1394         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1395         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1396         sfc_rss_conf->rss_hash_types = efx_hash_types;
1397         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1398
1399         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1400                 unsigned int nb_queues = action_rss->queue_num;
1401                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1402                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1403
1404                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1405         }
1406
1407         return 0;
1408 }
1409
1410 static int
1411 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1412                     unsigned int filters_count)
1413 {
1414         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1415         unsigned int i;
1416         int ret = 0;
1417
1418         for (i = 0; i < filters_count; i++) {
1419                 int rc;
1420
1421                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1422                 if (ret == 0 && rc != 0) {
1423                         sfc_err(sa, "failed to remove filter specification "
1424                                 "(rc = %d)", rc);
1425                         ret = rc;
1426                 }
1427         }
1428
1429         return ret;
1430 }
1431
1432 static int
1433 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1434 {
1435         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1436         unsigned int i;
1437         int rc = 0;
1438
1439         for (i = 0; i < spec_filter->count; i++) {
1440                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1441                 if (rc != 0) {
1442                         sfc_flow_spec_flush(sa, spec, i);
1443                         break;
1444                 }
1445         }
1446
1447         return rc;
1448 }
1449
1450 static int
1451 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1452 {
1453         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1454
1455         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1456 }
1457
1458 static int
1459 sfc_flow_filter_insert(struct sfc_adapter *sa,
1460                        struct rte_flow *flow)
1461 {
1462         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1463         struct sfc_rss *rss = &sas->rss;
1464         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1465         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1466         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1467         unsigned int i;
1468         int rc = 0;
1469
1470         if (spec_filter->rss) {
1471                 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1472                                               flow_rss->rxq_hw_index_min + 1,
1473                                               EFX_MAXRSS);
1474
1475                 rc = efx_rx_scale_context_alloc(sa->nic,
1476                                                 EFX_RX_SCALE_EXCLUSIVE,
1477                                                 rss_spread,
1478                                                 &efs_rss_context);
1479                 if (rc != 0)
1480                         goto fail_scale_context_alloc;
1481
1482                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1483                                            rss->hash_alg,
1484                                            flow_rss->rss_hash_types, B_TRUE);
1485                 if (rc != 0)
1486                         goto fail_scale_mode_set;
1487
1488                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1489                                           flow_rss->rss_key,
1490                                           sizeof(rss->key));
1491                 if (rc != 0)
1492                         goto fail_scale_key_set;
1493
1494                 /*
1495                  * At this point, fully elaborated filter specifications
1496                  * have been produced from the template. To make sure that
1497                  * RSS behaviour is consistent between them, set the same
1498                  * RSS context value everywhere.
1499                  */
1500                 for (i = 0; i < spec_filter->count; i++) {
1501                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1502
1503                         spec->efs_rss_context = efs_rss_context;
1504                         spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1505                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1506                 }
1507         }
1508
1509         rc = sfc_flow_spec_insert(sa, &flow->spec);
1510         if (rc != 0)
1511                 goto fail_filter_insert;
1512
1513         if (spec_filter->rss) {
1514                 /*
1515                  * Scale table is set after filter insertion because
1516                  * the table entries are relative to the base RxQ ID
1517                  * and the latter is submitted to the HW by means of
1518                  * inserting a filter, so by the time of the request
1519                  * the HW knows all the information needed to verify
1520                  * the table entries, and the operation will succeed
1521                  */
1522                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1523                                           flow_rss->rss_tbl,
1524                                           RTE_DIM(flow_rss->rss_tbl));
1525                 if (rc != 0)
1526                         goto fail_scale_tbl_set;
1527         }
1528
1529         return 0;
1530
1531 fail_scale_tbl_set:
1532         sfc_flow_spec_remove(sa, &flow->spec);
1533
1534 fail_filter_insert:
1535 fail_scale_key_set:
1536 fail_scale_mode_set:
1537         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1538                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1539
1540 fail_scale_context_alloc:
1541         return rc;
1542 }
1543
1544 static int
1545 sfc_flow_filter_remove(struct sfc_adapter *sa,
1546                        struct rte_flow *flow)
1547 {
1548         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1549         int rc = 0;
1550
1551         rc = sfc_flow_spec_remove(sa, &flow->spec);
1552         if (rc != 0)
1553                 return rc;
1554
1555         if (spec_filter->rss) {
1556                 /*
1557                  * All specifications for a given flow rule have the same RSS
1558                  * context, so that RSS context value is taken from the first
1559                  * filter specification
1560                  */
1561                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1562
1563                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1564         }
1565
1566         return rc;
1567 }
1568
1569 static int
1570 sfc_flow_parse_mark(struct sfc_adapter *sa,
1571                     const struct rte_flow_action_mark *mark,
1572                     struct rte_flow *flow)
1573 {
1574         struct sfc_flow_spec *spec = &flow->spec;
1575         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1576         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1577
1578         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1579                 return EINVAL;
1580
1581         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1582         spec_filter->template.efs_mark = mark->id;
1583
1584         return 0;
1585 }
1586
1587 static int
1588 sfc_flow_parse_actions(struct sfc_adapter *sa,
1589                        const struct rte_flow_action actions[],
1590                        struct rte_flow *flow,
1591                        struct rte_flow_error *error)
1592 {
1593         int rc;
1594         struct sfc_flow_spec *spec = &flow->spec;
1595         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1596         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1597         uint32_t actions_set = 0;
1598         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1599                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1600                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1601         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1602                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1603
1604         if (actions == NULL) {
1605                 rte_flow_error_set(error, EINVAL,
1606                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1607                                    "NULL actions");
1608                 return -rte_errno;
1609         }
1610
1611 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1612         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1613
1614         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1615                 switch (actions->type) {
1616                 case RTE_FLOW_ACTION_TYPE_VOID:
1617                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1618                                                actions_set);
1619                         break;
1620
1621                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1622                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1623                                                actions_set);
1624                         if ((actions_set & fate_actions_mask) != 0)
1625                                 goto fail_fate_actions;
1626
1627                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1628                         if (rc != 0) {
1629                                 rte_flow_error_set(error, EINVAL,
1630                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1631                                         "Bad QUEUE action");
1632                                 return -rte_errno;
1633                         }
1634                         break;
1635
1636                 case RTE_FLOW_ACTION_TYPE_RSS:
1637                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1638                                                actions_set);
1639                         if ((actions_set & fate_actions_mask) != 0)
1640                                 goto fail_fate_actions;
1641
1642                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1643                         if (rc != 0) {
1644                                 rte_flow_error_set(error, -rc,
1645                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1646                                         "Bad RSS action");
1647                                 return -rte_errno;
1648                         }
1649                         break;
1650
1651                 case RTE_FLOW_ACTION_TYPE_DROP:
1652                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1653                                                actions_set);
1654                         if ((actions_set & fate_actions_mask) != 0)
1655                                 goto fail_fate_actions;
1656
1657                         spec_filter->template.efs_dmaq_id =
1658                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1659                         break;
1660
1661                 case RTE_FLOW_ACTION_TYPE_FLAG:
1662                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1663                                                actions_set);
1664                         if ((actions_set & mark_actions_mask) != 0)
1665                                 goto fail_actions_overlap;
1666
1667                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1668                                 rte_flow_error_set(error, ENOTSUP,
1669                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1670                                         "FLAG action is not supported on the current Rx datapath");
1671                                 return -rte_errno;
1672                         }
1673
1674                         spec_filter->template.efs_flags |=
1675                                 EFX_FILTER_FLAG_ACTION_FLAG;
1676                         break;
1677
1678                 case RTE_FLOW_ACTION_TYPE_MARK:
1679                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1680                                                actions_set);
1681                         if ((actions_set & mark_actions_mask) != 0)
1682                                 goto fail_actions_overlap;
1683
1684                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1685                                 rte_flow_error_set(error, ENOTSUP,
1686                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1687                                         "MARK action is not supported on the current Rx datapath");
1688                                 return -rte_errno;
1689                         }
1690
1691                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1692                         if (rc != 0) {
1693                                 rte_flow_error_set(error, rc,
1694                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1695                                         "Bad MARK action");
1696                                 return -rte_errno;
1697                         }
1698                         break;
1699
1700                 default:
1701                         rte_flow_error_set(error, ENOTSUP,
1702                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1703                                            "Action is not supported");
1704                         return -rte_errno;
1705                 }
1706
1707                 actions_set |= (1UL << actions->type);
1708         }
1709 #undef SFC_BUILD_SET_OVERFLOW
1710
1711         /* When fate is unknown, drop traffic. */
1712         if ((actions_set & fate_actions_mask) == 0) {
1713                 spec_filter->template.efs_dmaq_id =
1714                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1715         }
1716
1717         return 0;
1718
1719 fail_fate_actions:
1720         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1721                            "Cannot combine several fate-deciding actions, "
1722                            "choose between QUEUE, RSS or DROP");
1723         return -rte_errno;
1724
1725 fail_actions_overlap:
1726         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1727                            "Overlapping actions are not supported");
1728         return -rte_errno;
1729 }
1730
1731 /**
1732  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1733  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1734  * specifications after copying.
1735  *
1736  * @param spec[in, out]
1737  *   SFC flow specification to update.
1738  * @param filters_count_for_one_val[in]
1739  *   How many specifications should have the same match flag, what is the
1740  *   number of specifications before copying.
1741  * @param error[out]
1742  *   Perform verbose error reporting if not NULL.
1743  */
1744 static int
1745 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1746                                unsigned int filters_count_for_one_val,
1747                                struct rte_flow_error *error)
1748 {
1749         unsigned int i;
1750         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1751         static const efx_filter_match_flags_t vals[] = {
1752                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1753                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1754         };
1755
1756         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1757                 rte_flow_error_set(error, EINVAL,
1758                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1759                         "Number of specifications is incorrect while copying "
1760                         "by unknown destination flags");
1761                 return -rte_errno;
1762         }
1763
1764         for (i = 0; i < spec_filter->count; i++) {
1765                 /* The check above ensures that divisor can't be zero here */
1766                 spec_filter->filters[i].efs_match_flags |=
1767                         vals[i / filters_count_for_one_val];
1768         }
1769
1770         return 0;
1771 }
1772
1773 /**
1774  * Check that the following conditions are met:
1775  * - the list of supported filters has a filter
1776  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1777  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1778  *   be inserted.
1779  *
1780  * @param match[in]
1781  *   The match flags of filter.
1782  * @param spec[in]
1783  *   Specification to be supplemented.
1784  * @param filter[in]
1785  *   SFC filter with list of supported filters.
1786  */
1787 static boolean_t
1788 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1789                                  __rte_unused efx_filter_spec_t *spec,
1790                                  struct sfc_filter *filter)
1791 {
1792         unsigned int i;
1793         efx_filter_match_flags_t match_mcast_dst;
1794
1795         match_mcast_dst =
1796                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1797                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1798         for (i = 0; i < filter->supported_match_num; i++) {
1799                 if (match_mcast_dst == filter->supported_match[i])
1800                         return B_TRUE;
1801         }
1802
1803         return B_FALSE;
1804 }
1805
1806 /**
1807  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1808  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1809  * specifications after copying.
1810  *
1811  * @param spec[in, out]
1812  *   SFC flow specification to update.
1813  * @param filters_count_for_one_val[in]
1814  *   How many specifications should have the same EtherType value, what is the
1815  *   number of specifications before copying.
1816  * @param error[out]
1817  *   Perform verbose error reporting if not NULL.
1818  */
1819 static int
1820 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1821                         unsigned int filters_count_for_one_val,
1822                         struct rte_flow_error *error)
1823 {
1824         unsigned int i;
1825         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1826         static const uint16_t vals[] = {
1827                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1828         };
1829
1830         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1831                 rte_flow_error_set(error, EINVAL,
1832                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1833                         "Number of specifications is incorrect "
1834                         "while copying by Ethertype");
1835                 return -rte_errno;
1836         }
1837
1838         for (i = 0; i < spec_filter->count; i++) {
1839                 spec_filter->filters[i].efs_match_flags |=
1840                         EFX_FILTER_MATCH_ETHER_TYPE;
1841
1842                 /*
1843                  * The check above ensures that
1844                  * filters_count_for_one_val is not 0
1845                  */
1846                 spec_filter->filters[i].efs_ether_type =
1847                         vals[i / filters_count_for_one_val];
1848         }
1849
1850         return 0;
1851 }
1852
1853 /**
1854  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1855  * in the same specifications after copying.
1856  *
1857  * @param spec[in, out]
1858  *   SFC flow specification to update.
1859  * @param filters_count_for_one_val[in]
1860  *   How many specifications should have the same match flag, what is the
1861  *   number of specifications before copying.
1862  * @param error[out]
1863  *   Perform verbose error reporting if not NULL.
1864  */
1865 static int
1866 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1867                             unsigned int filters_count_for_one_val,
1868                             struct rte_flow_error *error)
1869 {
1870         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1871         unsigned int i;
1872
1873         if (filters_count_for_one_val != spec_filter->count) {
1874                 rte_flow_error_set(error, EINVAL,
1875                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1876                         "Number of specifications is incorrect "
1877                         "while copying by outer VLAN ID");
1878                 return -rte_errno;
1879         }
1880
1881         for (i = 0; i < spec_filter->count; i++) {
1882                 spec_filter->filters[i].efs_match_flags |=
1883                         EFX_FILTER_MATCH_OUTER_VID;
1884
1885                 spec_filter->filters[i].efs_outer_vid = 0;
1886         }
1887
1888         return 0;
1889 }
1890
1891 /**
1892  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1893  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1894  * specifications after copying.
1895  *
1896  * @param spec[in, out]
1897  *   SFC flow specification to update.
1898  * @param filters_count_for_one_val[in]
1899  *   How many specifications should have the same match flag, what is the
1900  *   number of specifications before copying.
1901  * @param error[out]
1902  *   Perform verbose error reporting if not NULL.
1903  */
1904 static int
1905 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1906                                     unsigned int filters_count_for_one_val,
1907                                     struct rte_flow_error *error)
1908 {
1909         unsigned int i;
1910         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1911         static const efx_filter_match_flags_t vals[] = {
1912                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1913                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1914         };
1915
1916         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1917                 rte_flow_error_set(error, EINVAL,
1918                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1919                         "Number of specifications is incorrect while copying "
1920                         "by inner frame unknown destination flags");
1921                 return -rte_errno;
1922         }
1923
1924         for (i = 0; i < spec_filter->count; i++) {
1925                 /* The check above ensures that divisor can't be zero here */
1926                 spec_filter->filters[i].efs_match_flags |=
1927                         vals[i / filters_count_for_one_val];
1928         }
1929
1930         return 0;
1931 }
1932
1933 /**
1934  * Check that the following conditions are met:
1935  * - the specification corresponds to a filter for encapsulated traffic
1936  * - the list of supported filters has a filter
1937  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1938  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1939  *   be inserted.
1940  *
1941  * @param match[in]
1942  *   The match flags of filter.
1943  * @param spec[in]
1944  *   Specification to be supplemented.
1945  * @param filter[in]
1946  *   SFC filter with list of supported filters.
1947  */
1948 static boolean_t
1949 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1950                                       efx_filter_spec_t *spec,
1951                                       struct sfc_filter *filter)
1952 {
1953         unsigned int i;
1954         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1955         efx_filter_match_flags_t match_mcast_dst;
1956
1957         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1958                 return B_FALSE;
1959
1960         match_mcast_dst =
1961                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1962                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1963         for (i = 0; i < filter->supported_match_num; i++) {
1964                 if (match_mcast_dst == filter->supported_match[i])
1965                         return B_TRUE;
1966         }
1967
1968         return B_FALSE;
1969 }
1970
1971 /**
1972  * Check that the list of supported filters has a filter that differs
1973  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1974  * in this case that filter will be used and the flag
1975  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1976  *
1977  * @param match[in]
1978  *   The match flags of filter.
1979  * @param spec[in]
1980  *   Specification to be supplemented.
1981  * @param filter[in]
1982  *   SFC filter with list of supported filters.
1983  */
1984 static boolean_t
1985 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1986                               __rte_unused efx_filter_spec_t *spec,
1987                               struct sfc_filter *filter)
1988 {
1989         unsigned int i;
1990         efx_filter_match_flags_t match_without_vid =
1991                 match & ~EFX_FILTER_MATCH_OUTER_VID;
1992
1993         for (i = 0; i < filter->supported_match_num; i++) {
1994                 if (match_without_vid == filter->supported_match[i])
1995                         return B_FALSE;
1996         }
1997
1998         return B_TRUE;
1999 }
2000
2001 /*
2002  * Match flags that can be automatically added to filters.
2003  * Selecting the last minimum when searching for the copy flag ensures that the
2004  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2005  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2006  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2007  * filters.
2008  */
2009 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2010         {
2011                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2012                 .vals_count = 2,
2013                 .set_vals = sfc_flow_set_unknown_dst_flags,
2014                 .spec_check = sfc_flow_check_unknown_dst_flags,
2015         },
2016         {
2017                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2018                 .vals_count = 2,
2019                 .set_vals = sfc_flow_set_ethertypes,
2020                 .spec_check = NULL,
2021         },
2022         {
2023                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2024                 .vals_count = 2,
2025                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2026                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2027         },
2028         {
2029                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2030                 .vals_count = 1,
2031                 .set_vals = sfc_flow_set_outer_vid_flag,
2032                 .spec_check = sfc_flow_check_outer_vid_flag,
2033         },
2034 };
2035
2036 /* Get item from array sfc_flow_copy_flags */
2037 static const struct sfc_flow_copy_flag *
2038 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2039 {
2040         unsigned int i;
2041
2042         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2043                 if (sfc_flow_copy_flags[i].flag == flag)
2044                         return &sfc_flow_copy_flags[i];
2045         }
2046
2047         return NULL;
2048 }
2049
2050 /**
2051  * Make copies of the specifications, set match flag and values
2052  * of the field that corresponds to it.
2053  *
2054  * @param spec[in, out]
2055  *   SFC flow specification to update.
2056  * @param flag[in]
2057  *   The match flag to add.
2058  * @param error[out]
2059  *   Perform verbose error reporting if not NULL.
2060  */
2061 static int
2062 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2063                              efx_filter_match_flags_t flag,
2064                              struct rte_flow_error *error)
2065 {
2066         unsigned int i;
2067         unsigned int new_filters_count;
2068         unsigned int filters_count_for_one_val;
2069         const struct sfc_flow_copy_flag *copy_flag;
2070         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2071         int rc;
2072
2073         copy_flag = sfc_flow_get_copy_flag(flag);
2074         if (copy_flag == NULL) {
2075                 rte_flow_error_set(error, ENOTSUP,
2076                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2077                                    "Unsupported spec field for copying");
2078                 return -rte_errno;
2079         }
2080
2081         new_filters_count = spec_filter->count * copy_flag->vals_count;
2082         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2083                 rte_flow_error_set(error, EINVAL,
2084                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2085                         "Too much EFX specifications in the flow rule");
2086                 return -rte_errno;
2087         }
2088
2089         /* Copy filters specifications */
2090         for (i = spec_filter->count; i < new_filters_count; i++) {
2091                 spec_filter->filters[i] =
2092                         spec_filter->filters[i - spec_filter->count];
2093         }
2094
2095         filters_count_for_one_val = spec_filter->count;
2096         spec_filter->count = new_filters_count;
2097
2098         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2099         if (rc != 0)
2100                 return rc;
2101
2102         return 0;
2103 }
2104
2105 /**
2106  * Check that the given set of match flags missing in the original filter spec
2107  * could be covered by adding spec copies which specify the corresponding
2108  * flags and packet field values to match.
2109  *
2110  * @param miss_flags[in]
2111  *   Flags that are missing until the supported filter.
2112  * @param spec[in]
2113  *   Specification to be supplemented.
2114  * @param filter[in]
2115  *   SFC filter.
2116  *
2117  * @return
2118  *   Number of specifications after copy or 0, if the flags can not be added.
2119  */
2120 static unsigned int
2121 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2122                              efx_filter_spec_t *spec,
2123                              struct sfc_filter *filter)
2124 {
2125         unsigned int i;
2126         efx_filter_match_flags_t copy_flags = 0;
2127         efx_filter_match_flags_t flag;
2128         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2129         sfc_flow_spec_check *check;
2130         unsigned int multiplier = 1;
2131
2132         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2133                 flag = sfc_flow_copy_flags[i].flag;
2134                 check = sfc_flow_copy_flags[i].spec_check;
2135                 if ((flag & miss_flags) == flag) {
2136                         if (check != NULL && (!check(match, spec, filter)))
2137                                 continue;
2138
2139                         copy_flags |= flag;
2140                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2141                 }
2142         }
2143
2144         if (copy_flags == miss_flags)
2145                 return multiplier;
2146
2147         return 0;
2148 }
2149
2150 /**
2151  * Attempt to supplement the specification template to the minimally
2152  * supported set of match flags. To do this, it is necessary to copy
2153  * the specifications, filling them with the values of fields that
2154  * correspond to the missing flags.
2155  * The necessary and sufficient filter is built from the fewest number
2156  * of copies which could be made to cover the minimally required set
2157  * of flags.
2158  *
2159  * @param sa[in]
2160  *   SFC adapter.
2161  * @param spec[in, out]
2162  *   SFC flow specification to update.
2163  * @param error[out]
2164  *   Perform verbose error reporting if not NULL.
2165  */
2166 static int
2167 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2168                                struct sfc_flow_spec *spec,
2169                                struct rte_flow_error *error)
2170 {
2171         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2172         struct sfc_filter *filter = &sa->filter;
2173         efx_filter_match_flags_t miss_flags;
2174         efx_filter_match_flags_t min_miss_flags = 0;
2175         efx_filter_match_flags_t match;
2176         unsigned int min_multiplier = UINT_MAX;
2177         unsigned int multiplier;
2178         unsigned int i;
2179         int rc;
2180
2181         match = spec_filter->template.efs_match_flags;
2182         for (i = 0; i < filter->supported_match_num; i++) {
2183                 if ((match & filter->supported_match[i]) == match) {
2184                         miss_flags = filter->supported_match[i] & (~match);
2185                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2186                                 &spec_filter->template, filter);
2187                         if (multiplier > 0) {
2188                                 if (multiplier <= min_multiplier) {
2189                                         min_multiplier = multiplier;
2190                                         min_miss_flags = miss_flags;
2191                                 }
2192                         }
2193                 }
2194         }
2195
2196         if (min_multiplier == UINT_MAX) {
2197                 rte_flow_error_set(error, ENOTSUP,
2198                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2199                                    "The flow rule pattern is unsupported");
2200                 return -rte_errno;
2201         }
2202
2203         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2204                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2205
2206                 if ((flag & min_miss_flags) == flag) {
2207                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2208                         if (rc != 0)
2209                                 return rc;
2210                 }
2211         }
2212
2213         return 0;
2214 }
2215
2216 /**
2217  * Check that set of match flags is referred to by a filter. Filter is
2218  * described by match flags with the ability to add OUTER_VID and INNER_VID
2219  * flags.
2220  *
2221  * @param match_flags[in]
2222  *   Set of match flags.
2223  * @param flags_pattern[in]
2224  *   Pattern of filter match flags.
2225  */
2226 static boolean_t
2227 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2228                             efx_filter_match_flags_t flags_pattern)
2229 {
2230         if ((match_flags & flags_pattern) != flags_pattern)
2231                 return B_FALSE;
2232
2233         switch (match_flags & ~flags_pattern) {
2234         case 0:
2235         case EFX_FILTER_MATCH_OUTER_VID:
2236         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2237                 return B_TRUE;
2238         default:
2239                 return B_FALSE;
2240         }
2241 }
2242
2243 /**
2244  * Check whether the spec maps to a hardware filter which is known to be
2245  * ineffective despite being valid.
2246  *
2247  * @param filter[in]
2248  *   SFC filter with list of supported filters.
2249  * @param spec[in]
2250  *   SFC flow specification.
2251  */
2252 static boolean_t
2253 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2254                                   struct sfc_flow_spec *spec)
2255 {
2256         unsigned int i;
2257         uint16_t ether_type;
2258         uint8_t ip_proto;
2259         efx_filter_match_flags_t match_flags;
2260         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2261
2262         for (i = 0; i < spec_filter->count; i++) {
2263                 match_flags = spec_filter->filters[i].efs_match_flags;
2264
2265                 if (sfc_flow_is_match_with_vids(match_flags,
2266                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2267                     sfc_flow_is_match_with_vids(match_flags,
2268                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2269                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2270                         ether_type = spec_filter->filters[i].efs_ether_type;
2271                         if (filter->supports_ip_proto_or_addr_filter &&
2272                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2273                              ether_type == EFX_ETHER_TYPE_IPV6))
2274                                 return B_TRUE;
2275                 } else if (sfc_flow_is_match_with_vids(match_flags,
2276                                 EFX_FILTER_MATCH_ETHER_TYPE |
2277                                 EFX_FILTER_MATCH_IP_PROTO) ||
2278                            sfc_flow_is_match_with_vids(match_flags,
2279                                 EFX_FILTER_MATCH_ETHER_TYPE |
2280                                 EFX_FILTER_MATCH_IP_PROTO |
2281                                 EFX_FILTER_MATCH_LOC_MAC)) {
2282                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2283                         if (filter->supports_rem_or_local_port_filter &&
2284                             (ip_proto == EFX_IPPROTO_TCP ||
2285                              ip_proto == EFX_IPPROTO_UDP))
2286                                 return B_TRUE;
2287                 }
2288         }
2289
2290         return B_FALSE;
2291 }
2292
2293 static int
2294 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2295                               struct rte_flow *flow,
2296                               struct rte_flow_error *error)
2297 {
2298         struct sfc_flow_spec *spec = &flow->spec;
2299         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2300         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2301         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2302         int rc;
2303
2304         /* Initialize the first filter spec with template */
2305         spec_filter->filters[0] = *spec_tmpl;
2306         spec_filter->count = 1;
2307
2308         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2309                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2310                 if (rc != 0)
2311                         return rc;
2312         }
2313
2314         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2315                 rte_flow_error_set(error, ENOTSUP,
2316                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2317                         "The flow rule pattern is unsupported");
2318                 return -rte_errno;
2319         }
2320
2321         return 0;
2322 }
2323
2324 static int
2325 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2326                              const struct rte_flow_item pattern[],
2327                              const struct rte_flow_action actions[],
2328                              struct rte_flow *flow,
2329                              struct rte_flow_error *error)
2330 {
2331         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2332         struct sfc_flow_spec *spec = &flow->spec;
2333         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2334         struct sfc_flow_parse_ctx ctx;
2335         int rc;
2336
2337         ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2338         ctx.filter = &spec_filter->template;
2339
2340         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2341                                     pattern, &ctx, error);
2342         if (rc != 0)
2343                 goto fail_bad_value;
2344
2345         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2346         if (rc != 0)
2347                 goto fail_bad_value;
2348
2349         rc = sfc_flow_validate_match_flags(sa, flow, error);
2350         if (rc != 0)
2351                 goto fail_bad_value;
2352
2353         return 0;
2354
2355 fail_bad_value:
2356         return rc;
2357 }
2358
2359 static int
2360 sfc_flow_parse(struct rte_eth_dev *dev,
2361                const struct rte_flow_attr *attr,
2362                const struct rte_flow_item pattern[],
2363                const struct rte_flow_action actions[],
2364                struct rte_flow *flow,
2365                struct rte_flow_error *error)
2366 {
2367         const struct sfc_flow_ops_by_spec *ops;
2368         int rc;
2369
2370         rc = sfc_flow_parse_attr(attr, flow, error);
2371         if (rc != 0)
2372                 return rc;
2373
2374         ops = sfc_flow_get_ops_by_spec(flow);
2375         if (ops == NULL || ops->parse == NULL) {
2376                 rte_flow_error_set(error, ENOTSUP,
2377                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2378                                    "No backend to handle this flow");
2379                 return -rte_errno;
2380         }
2381
2382         return ops->parse(dev, pattern, actions, flow, error);
2383 }
2384
2385 static struct rte_flow *
2386 sfc_flow_zmalloc(struct rte_flow_error *error)
2387 {
2388         struct rte_flow *flow;
2389
2390         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2391         if (flow == NULL) {
2392                 rte_flow_error_set(error, ENOMEM,
2393                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2394                                    "Failed to allocate memory");
2395         }
2396
2397         return flow;
2398 }
2399
2400 static void
2401 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2402 {
2403         rte_free(flow);
2404 }
2405
2406 static int
2407 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2408                 struct rte_flow_error *error)
2409 {
2410         const struct sfc_flow_ops_by_spec *ops;
2411         int rc;
2412
2413         ops = sfc_flow_get_ops_by_spec(flow);
2414         if (ops == NULL || ops->insert == NULL) {
2415                 rte_flow_error_set(error, ENOTSUP,
2416                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2417                                    "No backend to handle this flow");
2418                 return rte_errno;
2419         }
2420
2421         rc = ops->insert(sa, flow);
2422         if (rc != 0) {
2423                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2424                                    NULL, "Failed to insert the flow rule");
2425         }
2426
2427         return rc;
2428 }
2429
2430 static int
2431 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2432                 struct rte_flow_error *error)
2433 {
2434         const struct sfc_flow_ops_by_spec *ops;
2435         int rc;
2436
2437         ops = sfc_flow_get_ops_by_spec(flow);
2438         if (ops == NULL || ops->remove == NULL) {
2439                 rte_flow_error_set(error, ENOTSUP,
2440                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2441                                    "No backend to handle this flow");
2442                 return rte_errno;
2443         }
2444
2445         rc = ops->remove(sa, flow);
2446         if (rc != 0) {
2447                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2448                                    NULL, "Failed to remove the flow rule");
2449         }
2450
2451         return rc;
2452 }
2453
2454 static int
2455 sfc_flow_validate(struct rte_eth_dev *dev,
2456                   const struct rte_flow_attr *attr,
2457                   const struct rte_flow_item pattern[],
2458                   const struct rte_flow_action actions[],
2459                   struct rte_flow_error *error)
2460 {
2461         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2462         struct rte_flow *flow;
2463         int rc;
2464
2465         flow = sfc_flow_zmalloc(error);
2466         if (flow == NULL)
2467                 return -rte_errno;
2468
2469         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2470
2471         sfc_flow_free(sa, flow);
2472
2473         return rc;
2474 }
2475
2476 static struct rte_flow *
2477 sfc_flow_create(struct rte_eth_dev *dev,
2478                 const struct rte_flow_attr *attr,
2479                 const struct rte_flow_item pattern[],
2480                 const struct rte_flow_action actions[],
2481                 struct rte_flow_error *error)
2482 {
2483         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2484         struct rte_flow *flow = NULL;
2485         int rc;
2486
2487         flow = sfc_flow_zmalloc(error);
2488         if (flow == NULL)
2489                 goto fail_no_mem;
2490
2491         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2492         if (rc != 0)
2493                 goto fail_bad_value;
2494
2495         sfc_adapter_lock(sa);
2496
2497         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2498
2499         if (sa->state == SFC_ADAPTER_STARTED) {
2500                 rc = sfc_flow_insert(sa, flow, error);
2501                 if (rc != 0)
2502                         goto fail_flow_insert;
2503         }
2504
2505         sfc_adapter_unlock(sa);
2506
2507         return flow;
2508
2509 fail_flow_insert:
2510         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2511
2512 fail_bad_value:
2513         sfc_flow_free(sa, flow);
2514         sfc_adapter_unlock(sa);
2515
2516 fail_no_mem:
2517         return NULL;
2518 }
2519
2520 static int
2521 sfc_flow_destroy(struct rte_eth_dev *dev,
2522                  struct rte_flow *flow,
2523                  struct rte_flow_error *error)
2524 {
2525         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2526         struct rte_flow *flow_ptr;
2527         int rc = EINVAL;
2528
2529         sfc_adapter_lock(sa);
2530
2531         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2532                 if (flow_ptr == flow)
2533                         rc = 0;
2534         }
2535         if (rc != 0) {
2536                 rte_flow_error_set(error, rc,
2537                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2538                                    "Failed to find flow rule to destroy");
2539                 goto fail_bad_value;
2540         }
2541
2542         if (sa->state == SFC_ADAPTER_STARTED)
2543                 rc = sfc_flow_remove(sa, flow, error);
2544
2545         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2546         sfc_flow_free(sa, flow);
2547
2548 fail_bad_value:
2549         sfc_adapter_unlock(sa);
2550
2551         return -rc;
2552 }
2553
2554 static int
2555 sfc_flow_flush(struct rte_eth_dev *dev,
2556                struct rte_flow_error *error)
2557 {
2558         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2559         struct rte_flow *flow;
2560         int ret = 0;
2561
2562         sfc_adapter_lock(sa);
2563
2564         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2565                 if (sa->state == SFC_ADAPTER_STARTED) {
2566                         int rc;
2567
2568                         rc = sfc_flow_remove(sa, flow, error);
2569                         if (rc != 0)
2570                                 ret = rc;
2571                 }
2572
2573                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2574                 sfc_flow_free(sa, flow);
2575         }
2576
2577         sfc_adapter_unlock(sa);
2578
2579         return -ret;
2580 }
2581
2582 static int
2583 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2584                  struct rte_flow_error *error)
2585 {
2586         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2587         int ret = 0;
2588
2589         sfc_adapter_lock(sa);
2590         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2591                 rte_flow_error_set(error, EBUSY,
2592                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2593                                    NULL, "please close the port first");
2594                 ret = -rte_errno;
2595         } else {
2596                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2597         }
2598         sfc_adapter_unlock(sa);
2599
2600         return ret;
2601 }
2602
2603 const struct rte_flow_ops sfc_flow_ops = {
2604         .validate = sfc_flow_validate,
2605         .create = sfc_flow_create,
2606         .destroy = sfc_flow_destroy,
2607         .flush = sfc_flow_flush,
2608         .query = NULL,
2609         .isolate = sfc_flow_isolate,
2610 };
2611
2612 void
2613 sfc_flow_init(struct sfc_adapter *sa)
2614 {
2615         SFC_ASSERT(sfc_adapter_is_locked(sa));
2616
2617         TAILQ_INIT(&sa->flow_list);
2618 }
2619
2620 void
2621 sfc_flow_fini(struct sfc_adapter *sa)
2622 {
2623         struct rte_flow *flow;
2624
2625         SFC_ASSERT(sfc_adapter_is_locked(sa));
2626
2627         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2628                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2629                 sfc_flow_free(sa, flow);
2630         }
2631 }
2632
2633 void
2634 sfc_flow_stop(struct sfc_adapter *sa)
2635 {
2636         struct rte_flow *flow;
2637
2638         SFC_ASSERT(sfc_adapter_is_locked(sa));
2639
2640         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2641                 sfc_flow_remove(sa, flow, NULL);
2642 }
2643
2644 int
2645 sfc_flow_start(struct sfc_adapter *sa)
2646 {
2647         struct rte_flow *flow;
2648         int rc = 0;
2649
2650         sfc_log_init(sa, "entry");
2651
2652         SFC_ASSERT(sfc_adapter_is_locked(sa));
2653
2654         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2655                 rc = sfc_flow_insert(sa, flow, NULL);
2656                 if (rc != 0)
2657                         goto fail_bad_flow;
2658         }
2659
2660         sfc_log_init(sa, "done");
2661
2662 fail_bad_flow:
2663         return rc;
2664 }