net/sfc: generalise flow start and stop path
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 #include "sfc_dp_rx.h"
26
27 struct sfc_flow_ops_by_spec {
28         sfc_flow_parse_cb_t     *parse;
29         sfc_flow_insert_cb_t    *insert;
30         sfc_flow_remove_cb_t    *remove;
31 };
32
33 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
34 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
35 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
36
37 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
38         .parse = sfc_flow_parse_rte_to_filter,
39         .insert = sfc_flow_filter_insert,
40         .remove = sfc_flow_filter_remove,
41 };
42
43 static const struct sfc_flow_ops_by_spec *
44 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
45 {
46         struct sfc_flow_spec *spec = &flow->spec;
47         const struct sfc_flow_ops_by_spec *ops = NULL;
48
49         switch (spec->type) {
50         case SFC_FLOW_SPEC_FILTER:
51                 ops = &sfc_flow_ops_filter;
52                 break;
53         default:
54                 SFC_ASSERT(false);
55                 break;
56         }
57
58         return ops;
59 }
60
61 /*
62  * Currently, filter-based (VNIC) flow API is implemented in such a manner
63  * that each flow rule is converted to one or more hardware filters.
64  * All elements of flow rule (attributes, pattern items, actions)
65  * correspond to one or more fields in the efx_filter_spec_s structure
66  * that is responsible for the hardware filter.
67  * If some required field is unset in the flow rule, then a handful
68  * of filter copies will be created to cover all possible values
69  * of such a field.
70  */
71
72 enum sfc_flow_item_layers {
73         SFC_FLOW_ITEM_ANY_LAYER,
74         SFC_FLOW_ITEM_START_LAYER,
75         SFC_FLOW_ITEM_L2,
76         SFC_FLOW_ITEM_L3,
77         SFC_FLOW_ITEM_L4,
78 };
79
80 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
81                                   efx_filter_spec_t *spec,
82                                   struct rte_flow_error *error);
83
84 struct sfc_flow_item {
85         enum rte_flow_item_type type;           /* Type of item */
86         enum sfc_flow_item_layers layer;        /* Layer of item */
87         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
88         sfc_flow_item_parse *parse;             /* Parsing function */
89 };
90
91 static sfc_flow_item_parse sfc_flow_parse_void;
92 static sfc_flow_item_parse sfc_flow_parse_eth;
93 static sfc_flow_item_parse sfc_flow_parse_vlan;
94 static sfc_flow_item_parse sfc_flow_parse_ipv4;
95 static sfc_flow_item_parse sfc_flow_parse_ipv6;
96 static sfc_flow_item_parse sfc_flow_parse_tcp;
97 static sfc_flow_item_parse sfc_flow_parse_udp;
98 static sfc_flow_item_parse sfc_flow_parse_vxlan;
99 static sfc_flow_item_parse sfc_flow_parse_geneve;
100 static sfc_flow_item_parse sfc_flow_parse_nvgre;
101
102 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
103                                      unsigned int filters_count_for_one_val,
104                                      struct rte_flow_error *error);
105
106 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
107                                         efx_filter_spec_t *spec,
108                                         struct sfc_filter *filter);
109
110 struct sfc_flow_copy_flag {
111         /* EFX filter specification match flag */
112         efx_filter_match_flags_t flag;
113         /* Number of values of corresponding field */
114         unsigned int vals_count;
115         /* Function to set values in specifications */
116         sfc_flow_spec_set_vals *set_vals;
117         /*
118          * Function to check that the specification is suitable
119          * for adding this match flag
120          */
121         sfc_flow_spec_check *spec_check;
122 };
123
124 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
125 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
126 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
127 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
128 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
129 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
130 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
131
132 static boolean_t
133 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
134 {
135         uint8_t sum = 0;
136         unsigned int i;
137
138         for (i = 0; i < size; i++)
139                 sum |= buf[i];
140
141         return (sum == 0) ? B_TRUE : B_FALSE;
142 }
143
144 /*
145  * Validate item and prepare structures spec and mask for parsing
146  */
147 static int
148 sfc_flow_parse_init(const struct rte_flow_item *item,
149                     const void **spec_ptr,
150                     const void **mask_ptr,
151                     const void *supp_mask,
152                     const void *def_mask,
153                     unsigned int size,
154                     struct rte_flow_error *error)
155 {
156         const uint8_t *spec;
157         const uint8_t *mask;
158         const uint8_t *last;
159         uint8_t supp;
160         unsigned int i;
161
162         if (item == NULL) {
163                 rte_flow_error_set(error, EINVAL,
164                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
165                                    "NULL item");
166                 return -rte_errno;
167         }
168
169         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
170                 rte_flow_error_set(error, EINVAL,
171                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
172                                    "Mask or last is set without spec");
173                 return -rte_errno;
174         }
175
176         /*
177          * If "mask" is not set, default mask is used,
178          * but if default mask is NULL, "mask" should be set
179          */
180         if (item->mask == NULL) {
181                 if (def_mask == NULL) {
182                         rte_flow_error_set(error, EINVAL,
183                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
184                                 "Mask should be specified");
185                         return -rte_errno;
186                 }
187
188                 mask = def_mask;
189         } else {
190                 mask = item->mask;
191         }
192
193         spec = item->spec;
194         last = item->last;
195
196         if (spec == NULL)
197                 goto exit;
198
199         /*
200          * If field values in "last" are either 0 or equal to the corresponding
201          * values in "spec" then they are ignored
202          */
203         if (last != NULL &&
204             !sfc_flow_is_zero(last, size) &&
205             memcmp(last, spec, size) != 0) {
206                 rte_flow_error_set(error, ENOTSUP,
207                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
208                                    "Ranging is not supported");
209                 return -rte_errno;
210         }
211
212         if (supp_mask == NULL) {
213                 rte_flow_error_set(error, EINVAL,
214                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
215                         "Supported mask for item should be specified");
216                 return -rte_errno;
217         }
218
219         /* Check that mask does not ask for more match than supp_mask */
220         for (i = 0; i < size; i++) {
221                 supp = ((const uint8_t *)supp_mask)[i];
222
223                 if (~supp & mask[i]) {
224                         rte_flow_error_set(error, ENOTSUP,
225                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
226                                            "Item's field is not supported");
227                         return -rte_errno;
228                 }
229         }
230
231 exit:
232         *spec_ptr = spec;
233         *mask_ptr = mask;
234         return 0;
235 }
236
237 /*
238  * Protocol parsers.
239  * Masking is not supported, so masks in items should be either
240  * full or empty (zeroed) and set only for supported fields which
241  * are specified in the supp_mask.
242  */
243
244 static int
245 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
246                     __rte_unused efx_filter_spec_t *efx_spec,
247                     __rte_unused struct rte_flow_error *error)
248 {
249         return 0;
250 }
251
252 /**
253  * Convert Ethernet item to EFX filter specification.
254  *
255  * @param item[in]
256  *   Item specification. Outer frame specification may only comprise
257  *   source/destination addresses and Ethertype field.
258  *   Inner frame specification may contain destination address only.
259  *   There is support for individual/group mask as well as for empty and full.
260  *   If the mask is NULL, default mask will be used. Ranging is not supported.
261  * @param efx_spec[in, out]
262  *   EFX filter specification to update.
263  * @param[out] error
264  *   Perform verbose error reporting if not NULL.
265  */
266 static int
267 sfc_flow_parse_eth(const struct rte_flow_item *item,
268                    efx_filter_spec_t *efx_spec,
269                    struct rte_flow_error *error)
270 {
271         int rc;
272         const struct rte_flow_item_eth *spec = NULL;
273         const struct rte_flow_item_eth *mask = NULL;
274         const struct rte_flow_item_eth supp_mask = {
275                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
276                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
277                 .type = 0xffff,
278         };
279         const struct rte_flow_item_eth ifrm_supp_mask = {
280                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
281         };
282         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
283                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
284         };
285         const struct rte_flow_item_eth *supp_mask_p;
286         const struct rte_flow_item_eth *def_mask_p;
287         uint8_t *loc_mac = NULL;
288         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
289                 EFX_TUNNEL_PROTOCOL_NONE);
290
291         if (is_ifrm) {
292                 supp_mask_p = &ifrm_supp_mask;
293                 def_mask_p = &ifrm_supp_mask;
294                 loc_mac = efx_spec->efs_ifrm_loc_mac;
295         } else {
296                 supp_mask_p = &supp_mask;
297                 def_mask_p = &rte_flow_item_eth_mask;
298                 loc_mac = efx_spec->efs_loc_mac;
299         }
300
301         rc = sfc_flow_parse_init(item,
302                                  (const void **)&spec,
303                                  (const void **)&mask,
304                                  supp_mask_p, def_mask_p,
305                                  sizeof(struct rte_flow_item_eth),
306                                  error);
307         if (rc != 0)
308                 return rc;
309
310         /* If "spec" is not set, could be any Ethernet */
311         if (spec == NULL)
312                 return 0;
313
314         if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
315                 efx_spec->efs_match_flags |= is_ifrm ?
316                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
317                         EFX_FILTER_MATCH_LOC_MAC;
318                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
319                            EFX_MAC_ADDR_LEN);
320         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
321                           EFX_MAC_ADDR_LEN) == 0) {
322                 if (rte_is_unicast_ether_addr(&spec->dst))
323                         efx_spec->efs_match_flags |= is_ifrm ?
324                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
325                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
326                 else
327                         efx_spec->efs_match_flags |= is_ifrm ?
328                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
329                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
330         } else if (!rte_is_zero_ether_addr(&mask->dst)) {
331                 goto fail_bad_mask;
332         }
333
334         /*
335          * ifrm_supp_mask ensures that the source address and
336          * ethertype masks are equal to zero in inner frame,
337          * so these fields are filled in only for the outer frame
338          */
339         if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
340                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
341                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
342                            EFX_MAC_ADDR_LEN);
343         } else if (!rte_is_zero_ether_addr(&mask->src)) {
344                 goto fail_bad_mask;
345         }
346
347         /*
348          * Ether type is in big-endian byte order in item and
349          * in little-endian in efx_spec, so byte swap is used
350          */
351         if (mask->type == supp_mask.type) {
352                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
353                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
354         } else if (mask->type != 0) {
355                 goto fail_bad_mask;
356         }
357
358         return 0;
359
360 fail_bad_mask:
361         rte_flow_error_set(error, EINVAL,
362                            RTE_FLOW_ERROR_TYPE_ITEM, item,
363                            "Bad mask in the ETH pattern item");
364         return -rte_errno;
365 }
366
367 /**
368  * Convert VLAN item to EFX filter specification.
369  *
370  * @param item[in]
371  *   Item specification. Only VID field is supported.
372  *   The mask can not be NULL. Ranging is not supported.
373  * @param efx_spec[in, out]
374  *   EFX filter specification to update.
375  * @param[out] error
376  *   Perform verbose error reporting if not NULL.
377  */
378 static int
379 sfc_flow_parse_vlan(const struct rte_flow_item *item,
380                     efx_filter_spec_t *efx_spec,
381                     struct rte_flow_error *error)
382 {
383         int rc;
384         uint16_t vid;
385         const struct rte_flow_item_vlan *spec = NULL;
386         const struct rte_flow_item_vlan *mask = NULL;
387         const struct rte_flow_item_vlan supp_mask = {
388                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
389                 .inner_type = RTE_BE16(0xffff),
390         };
391
392         rc = sfc_flow_parse_init(item,
393                                  (const void **)&spec,
394                                  (const void **)&mask,
395                                  &supp_mask,
396                                  NULL,
397                                  sizeof(struct rte_flow_item_vlan),
398                                  error);
399         if (rc != 0)
400                 return rc;
401
402         /*
403          * VID is in big-endian byte order in item and
404          * in little-endian in efx_spec, so byte swap is used.
405          * If two VLAN items are included, the first matches
406          * the outer tag and the next matches the inner tag.
407          */
408         if (mask->tci == supp_mask.tci) {
409                 /* Apply mask to keep VID only */
410                 vid = rte_bswap16(spec->tci & mask->tci);
411
412                 if (!(efx_spec->efs_match_flags &
413                       EFX_FILTER_MATCH_OUTER_VID)) {
414                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
415                         efx_spec->efs_outer_vid = vid;
416                 } else if (!(efx_spec->efs_match_flags &
417                              EFX_FILTER_MATCH_INNER_VID)) {
418                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
419                         efx_spec->efs_inner_vid = vid;
420                 } else {
421                         rte_flow_error_set(error, EINVAL,
422                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
423                                            "More than two VLAN items");
424                         return -rte_errno;
425                 }
426         } else {
427                 rte_flow_error_set(error, EINVAL,
428                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
429                                    "VLAN ID in TCI match is required");
430                 return -rte_errno;
431         }
432
433         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
434                 rte_flow_error_set(error, EINVAL,
435                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
436                                    "VLAN TPID matching is not supported");
437                 return -rte_errno;
438         }
439         if (mask->inner_type == supp_mask.inner_type) {
440                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
441                 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
442         } else if (mask->inner_type) {
443                 rte_flow_error_set(error, EINVAL,
444                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
445                                    "Bad mask for VLAN inner_type");
446                 return -rte_errno;
447         }
448
449         return 0;
450 }
451
452 /**
453  * Convert IPv4 item to EFX filter specification.
454  *
455  * @param item[in]
456  *   Item specification. Only source and destination addresses and
457  *   protocol fields are supported. If the mask is NULL, default
458  *   mask will be used. Ranging is not supported.
459  * @param efx_spec[in, out]
460  *   EFX filter specification to update.
461  * @param[out] error
462  *   Perform verbose error reporting if not NULL.
463  */
464 static int
465 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
466                     efx_filter_spec_t *efx_spec,
467                     struct rte_flow_error *error)
468 {
469         int rc;
470         const struct rte_flow_item_ipv4 *spec = NULL;
471         const struct rte_flow_item_ipv4 *mask = NULL;
472         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
473         const struct rte_flow_item_ipv4 supp_mask = {
474                 .hdr = {
475                         .src_addr = 0xffffffff,
476                         .dst_addr = 0xffffffff,
477                         .next_proto_id = 0xff,
478                 }
479         };
480
481         rc = sfc_flow_parse_init(item,
482                                  (const void **)&spec,
483                                  (const void **)&mask,
484                                  &supp_mask,
485                                  &rte_flow_item_ipv4_mask,
486                                  sizeof(struct rte_flow_item_ipv4),
487                                  error);
488         if (rc != 0)
489                 return rc;
490
491         /*
492          * Filtering by IPv4 source and destination addresses requires
493          * the appropriate ETHER_TYPE in hardware filters
494          */
495         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
496                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
497                 efx_spec->efs_ether_type = ether_type_ipv4;
498         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
499                 rte_flow_error_set(error, EINVAL,
500                         RTE_FLOW_ERROR_TYPE_ITEM, item,
501                         "Ethertype in pattern with IPV4 item should be appropriate");
502                 return -rte_errno;
503         }
504
505         if (spec == NULL)
506                 return 0;
507
508         /*
509          * IPv4 addresses are in big-endian byte order in item and in
510          * efx_spec
511          */
512         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
513                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
514                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
515         } else if (mask->hdr.src_addr != 0) {
516                 goto fail_bad_mask;
517         }
518
519         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
520                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
521                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
522         } else if (mask->hdr.dst_addr != 0) {
523                 goto fail_bad_mask;
524         }
525
526         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
527                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
528                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
529         } else if (mask->hdr.next_proto_id != 0) {
530                 goto fail_bad_mask;
531         }
532
533         return 0;
534
535 fail_bad_mask:
536         rte_flow_error_set(error, EINVAL,
537                            RTE_FLOW_ERROR_TYPE_ITEM, item,
538                            "Bad mask in the IPV4 pattern item");
539         return -rte_errno;
540 }
541
542 /**
543  * Convert IPv6 item to EFX filter specification.
544  *
545  * @param item[in]
546  *   Item specification. Only source and destination addresses and
547  *   next header fields are supported. If the mask is NULL, default
548  *   mask will be used. Ranging is not supported.
549  * @param efx_spec[in, out]
550  *   EFX filter specification to update.
551  * @param[out] error
552  *   Perform verbose error reporting if not NULL.
553  */
554 static int
555 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
556                     efx_filter_spec_t *efx_spec,
557                     struct rte_flow_error *error)
558 {
559         int rc;
560         const struct rte_flow_item_ipv6 *spec = NULL;
561         const struct rte_flow_item_ipv6 *mask = NULL;
562         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
563         const struct rte_flow_item_ipv6 supp_mask = {
564                 .hdr = {
565                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
566                                       0xff, 0xff, 0xff, 0xff,
567                                       0xff, 0xff, 0xff, 0xff,
568                                       0xff, 0xff, 0xff, 0xff },
569                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
570                                       0xff, 0xff, 0xff, 0xff,
571                                       0xff, 0xff, 0xff, 0xff,
572                                       0xff, 0xff, 0xff, 0xff },
573                         .proto = 0xff,
574                 }
575         };
576
577         rc = sfc_flow_parse_init(item,
578                                  (const void **)&spec,
579                                  (const void **)&mask,
580                                  &supp_mask,
581                                  &rte_flow_item_ipv6_mask,
582                                  sizeof(struct rte_flow_item_ipv6),
583                                  error);
584         if (rc != 0)
585                 return rc;
586
587         /*
588          * Filtering by IPv6 source and destination addresses requires
589          * the appropriate ETHER_TYPE in hardware filters
590          */
591         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
592                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
593                 efx_spec->efs_ether_type = ether_type_ipv6;
594         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
595                 rte_flow_error_set(error, EINVAL,
596                         RTE_FLOW_ERROR_TYPE_ITEM, item,
597                         "Ethertype in pattern with IPV6 item should be appropriate");
598                 return -rte_errno;
599         }
600
601         if (spec == NULL)
602                 return 0;
603
604         /*
605          * IPv6 addresses are in big-endian byte order in item and in
606          * efx_spec
607          */
608         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
609                    sizeof(mask->hdr.src_addr)) == 0) {
610                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
611
612                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
613                                  sizeof(spec->hdr.src_addr));
614                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
615                            sizeof(efx_spec->efs_rem_host));
616         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
617                                      sizeof(mask->hdr.src_addr))) {
618                 goto fail_bad_mask;
619         }
620
621         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
622                    sizeof(mask->hdr.dst_addr)) == 0) {
623                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
624
625                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
626                                  sizeof(spec->hdr.dst_addr));
627                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
628                            sizeof(efx_spec->efs_loc_host));
629         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
630                                      sizeof(mask->hdr.dst_addr))) {
631                 goto fail_bad_mask;
632         }
633
634         if (mask->hdr.proto == supp_mask.hdr.proto) {
635                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
636                 efx_spec->efs_ip_proto = spec->hdr.proto;
637         } else if (mask->hdr.proto != 0) {
638                 goto fail_bad_mask;
639         }
640
641         return 0;
642
643 fail_bad_mask:
644         rte_flow_error_set(error, EINVAL,
645                            RTE_FLOW_ERROR_TYPE_ITEM, item,
646                            "Bad mask in the IPV6 pattern item");
647         return -rte_errno;
648 }
649
650 /**
651  * Convert TCP item to EFX filter specification.
652  *
653  * @param item[in]
654  *   Item specification. Only source and destination ports fields
655  *   are supported. If the mask is NULL, default mask will be used.
656  *   Ranging is not supported.
657  * @param efx_spec[in, out]
658  *   EFX filter specification to update.
659  * @param[out] error
660  *   Perform verbose error reporting if not NULL.
661  */
662 static int
663 sfc_flow_parse_tcp(const struct rte_flow_item *item,
664                    efx_filter_spec_t *efx_spec,
665                    struct rte_flow_error *error)
666 {
667         int rc;
668         const struct rte_flow_item_tcp *spec = NULL;
669         const struct rte_flow_item_tcp *mask = NULL;
670         const struct rte_flow_item_tcp supp_mask = {
671                 .hdr = {
672                         .src_port = 0xffff,
673                         .dst_port = 0xffff,
674                 }
675         };
676
677         rc = sfc_flow_parse_init(item,
678                                  (const void **)&spec,
679                                  (const void **)&mask,
680                                  &supp_mask,
681                                  &rte_flow_item_tcp_mask,
682                                  sizeof(struct rte_flow_item_tcp),
683                                  error);
684         if (rc != 0)
685                 return rc;
686
687         /*
688          * Filtering by TCP source and destination ports requires
689          * the appropriate IP_PROTO in hardware filters
690          */
691         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
692                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
693                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
694         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
695                 rte_flow_error_set(error, EINVAL,
696                         RTE_FLOW_ERROR_TYPE_ITEM, item,
697                         "IP proto in pattern with TCP item should be appropriate");
698                 return -rte_errno;
699         }
700
701         if (spec == NULL)
702                 return 0;
703
704         /*
705          * Source and destination ports are in big-endian byte order in item and
706          * in little-endian in efx_spec, so byte swap is used
707          */
708         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
709                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
710                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
711         } else if (mask->hdr.src_port != 0) {
712                 goto fail_bad_mask;
713         }
714
715         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
716                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
717                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
718         } else if (mask->hdr.dst_port != 0) {
719                 goto fail_bad_mask;
720         }
721
722         return 0;
723
724 fail_bad_mask:
725         rte_flow_error_set(error, EINVAL,
726                            RTE_FLOW_ERROR_TYPE_ITEM, item,
727                            "Bad mask in the TCP pattern item");
728         return -rte_errno;
729 }
730
731 /**
732  * Convert UDP item to EFX filter specification.
733  *
734  * @param item[in]
735  *   Item specification. Only source and destination ports fields
736  *   are supported. If the mask is NULL, default mask will be used.
737  *   Ranging is not supported.
738  * @param efx_spec[in, out]
739  *   EFX filter specification to update.
740  * @param[out] error
741  *   Perform verbose error reporting if not NULL.
742  */
743 static int
744 sfc_flow_parse_udp(const struct rte_flow_item *item,
745                    efx_filter_spec_t *efx_spec,
746                    struct rte_flow_error *error)
747 {
748         int rc;
749         const struct rte_flow_item_udp *spec = NULL;
750         const struct rte_flow_item_udp *mask = NULL;
751         const struct rte_flow_item_udp supp_mask = {
752                 .hdr = {
753                         .src_port = 0xffff,
754                         .dst_port = 0xffff,
755                 }
756         };
757
758         rc = sfc_flow_parse_init(item,
759                                  (const void **)&spec,
760                                  (const void **)&mask,
761                                  &supp_mask,
762                                  &rte_flow_item_udp_mask,
763                                  sizeof(struct rte_flow_item_udp),
764                                  error);
765         if (rc != 0)
766                 return rc;
767
768         /*
769          * Filtering by UDP source and destination ports requires
770          * the appropriate IP_PROTO in hardware filters
771          */
772         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
773                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
774                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
775         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
776                 rte_flow_error_set(error, EINVAL,
777                         RTE_FLOW_ERROR_TYPE_ITEM, item,
778                         "IP proto in pattern with UDP item should be appropriate");
779                 return -rte_errno;
780         }
781
782         if (spec == NULL)
783                 return 0;
784
785         /*
786          * Source and destination ports are in big-endian byte order in item and
787          * in little-endian in efx_spec, so byte swap is used
788          */
789         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
790                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
791                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
792         } else if (mask->hdr.src_port != 0) {
793                 goto fail_bad_mask;
794         }
795
796         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
797                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
798                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
799         } else if (mask->hdr.dst_port != 0) {
800                 goto fail_bad_mask;
801         }
802
803         return 0;
804
805 fail_bad_mask:
806         rte_flow_error_set(error, EINVAL,
807                            RTE_FLOW_ERROR_TYPE_ITEM, item,
808                            "Bad mask in the UDP pattern item");
809         return -rte_errno;
810 }
811
812 /*
813  * Filters for encapsulated packets match based on the EtherType and IP
814  * protocol in the outer frame.
815  */
816 static int
817 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
818                                         efx_filter_spec_t *efx_spec,
819                                         uint8_t ip_proto,
820                                         struct rte_flow_error *error)
821 {
822         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
823                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
824                 efx_spec->efs_ip_proto = ip_proto;
825         } else if (efx_spec->efs_ip_proto != ip_proto) {
826                 switch (ip_proto) {
827                 case EFX_IPPROTO_UDP:
828                         rte_flow_error_set(error, EINVAL,
829                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
830                                 "Outer IP header protocol must be UDP "
831                                 "in VxLAN/GENEVE pattern");
832                         return -rte_errno;
833
834                 case EFX_IPPROTO_GRE:
835                         rte_flow_error_set(error, EINVAL,
836                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
837                                 "Outer IP header protocol must be GRE "
838                                 "in NVGRE pattern");
839                         return -rte_errno;
840
841                 default:
842                         rte_flow_error_set(error, EINVAL,
843                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
844                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
845                                 "are supported");
846                         return -rte_errno;
847                 }
848         }
849
850         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
851             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
852             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
853                 rte_flow_error_set(error, EINVAL,
854                         RTE_FLOW_ERROR_TYPE_ITEM, item,
855                         "Outer frame EtherType in pattern with tunneling "
856                         "must be IPv4 or IPv6");
857                 return -rte_errno;
858         }
859
860         return 0;
861 }
862
863 static int
864 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
865                                   const uint8_t *vni_or_vsid_val,
866                                   const uint8_t *vni_or_vsid_mask,
867                                   const struct rte_flow_item *item,
868                                   struct rte_flow_error *error)
869 {
870         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
871                 0xff, 0xff, 0xff
872         };
873
874         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
875                    EFX_VNI_OR_VSID_LEN) == 0) {
876                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
877                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
878                            EFX_VNI_OR_VSID_LEN);
879         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
880                 rte_flow_error_set(error, EINVAL,
881                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
882                                    "Unsupported VNI/VSID mask");
883                 return -rte_errno;
884         }
885
886         return 0;
887 }
888
889 /**
890  * Convert VXLAN item to EFX filter specification.
891  *
892  * @param item[in]
893  *   Item specification. Only VXLAN network identifier field is supported.
894  *   If the mask is NULL, default mask will be used.
895  *   Ranging is not supported.
896  * @param efx_spec[in, out]
897  *   EFX filter specification to update.
898  * @param[out] error
899  *   Perform verbose error reporting if not NULL.
900  */
901 static int
902 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
903                      efx_filter_spec_t *efx_spec,
904                      struct rte_flow_error *error)
905 {
906         int rc;
907         const struct rte_flow_item_vxlan *spec = NULL;
908         const struct rte_flow_item_vxlan *mask = NULL;
909         const struct rte_flow_item_vxlan supp_mask = {
910                 .vni = { 0xff, 0xff, 0xff }
911         };
912
913         rc = sfc_flow_parse_init(item,
914                                  (const void **)&spec,
915                                  (const void **)&mask,
916                                  &supp_mask,
917                                  &rte_flow_item_vxlan_mask,
918                                  sizeof(struct rte_flow_item_vxlan),
919                                  error);
920         if (rc != 0)
921                 return rc;
922
923         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
924                                                      EFX_IPPROTO_UDP, error);
925         if (rc != 0)
926                 return rc;
927
928         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
929         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
930
931         if (spec == NULL)
932                 return 0;
933
934         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
935                                                mask->vni, item, error);
936
937         return rc;
938 }
939
940 /**
941  * Convert GENEVE item to EFX filter specification.
942  *
943  * @param item[in]
944  *   Item specification. Only Virtual Network Identifier and protocol type
945  *   fields are supported. But protocol type can be only Ethernet (0x6558).
946  *   If the mask is NULL, default mask will be used.
947  *   Ranging is not supported.
948  * @param efx_spec[in, out]
949  *   EFX filter specification to update.
950  * @param[out] error
951  *   Perform verbose error reporting if not NULL.
952  */
953 static int
954 sfc_flow_parse_geneve(const struct rte_flow_item *item,
955                       efx_filter_spec_t *efx_spec,
956                       struct rte_flow_error *error)
957 {
958         int rc;
959         const struct rte_flow_item_geneve *spec = NULL;
960         const struct rte_flow_item_geneve *mask = NULL;
961         const struct rte_flow_item_geneve supp_mask = {
962                 .protocol = RTE_BE16(0xffff),
963                 .vni = { 0xff, 0xff, 0xff }
964         };
965
966         rc = sfc_flow_parse_init(item,
967                                  (const void **)&spec,
968                                  (const void **)&mask,
969                                  &supp_mask,
970                                  &rte_flow_item_geneve_mask,
971                                  sizeof(struct rte_flow_item_geneve),
972                                  error);
973         if (rc != 0)
974                 return rc;
975
976         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
977                                                      EFX_IPPROTO_UDP, error);
978         if (rc != 0)
979                 return rc;
980
981         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
982         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
983
984         if (spec == NULL)
985                 return 0;
986
987         if (mask->protocol == supp_mask.protocol) {
988                 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
989                         rte_flow_error_set(error, EINVAL,
990                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
991                                 "GENEVE encap. protocol must be Ethernet "
992                                 "(0x6558) in the GENEVE pattern item");
993                         return -rte_errno;
994                 }
995         } else if (mask->protocol != 0) {
996                 rte_flow_error_set(error, EINVAL,
997                         RTE_FLOW_ERROR_TYPE_ITEM, item,
998                         "Unsupported mask for GENEVE encap. protocol");
999                 return -rte_errno;
1000         }
1001
1002         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1003                                                mask->vni, item, error);
1004
1005         return rc;
1006 }
1007
1008 /**
1009  * Convert NVGRE item to EFX filter specification.
1010  *
1011  * @param item[in]
1012  *   Item specification. Only virtual subnet ID field is supported.
1013  *   If the mask is NULL, default mask will be used.
1014  *   Ranging is not supported.
1015  * @param efx_spec[in, out]
1016  *   EFX filter specification to update.
1017  * @param[out] error
1018  *   Perform verbose error reporting if not NULL.
1019  */
1020 static int
1021 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1022                      efx_filter_spec_t *efx_spec,
1023                      struct rte_flow_error *error)
1024 {
1025         int rc;
1026         const struct rte_flow_item_nvgre *spec = NULL;
1027         const struct rte_flow_item_nvgre *mask = NULL;
1028         const struct rte_flow_item_nvgre supp_mask = {
1029                 .tni = { 0xff, 0xff, 0xff }
1030         };
1031
1032         rc = sfc_flow_parse_init(item,
1033                                  (const void **)&spec,
1034                                  (const void **)&mask,
1035                                  &supp_mask,
1036                                  &rte_flow_item_nvgre_mask,
1037                                  sizeof(struct rte_flow_item_nvgre),
1038                                  error);
1039         if (rc != 0)
1040                 return rc;
1041
1042         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1043                                                      EFX_IPPROTO_GRE, error);
1044         if (rc != 0)
1045                 return rc;
1046
1047         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1048         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1049
1050         if (spec == NULL)
1051                 return 0;
1052
1053         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1054                                                mask->tni, item, error);
1055
1056         return rc;
1057 }
1058
1059 static const struct sfc_flow_item sfc_flow_items[] = {
1060         {
1061                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1062                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1063                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1064                 .parse = sfc_flow_parse_void,
1065         },
1066         {
1067                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1068                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1069                 .layer = SFC_FLOW_ITEM_L2,
1070                 .parse = sfc_flow_parse_eth,
1071         },
1072         {
1073                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1074                 .prev_layer = SFC_FLOW_ITEM_L2,
1075                 .layer = SFC_FLOW_ITEM_L2,
1076                 .parse = sfc_flow_parse_vlan,
1077         },
1078         {
1079                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1080                 .prev_layer = SFC_FLOW_ITEM_L2,
1081                 .layer = SFC_FLOW_ITEM_L3,
1082                 .parse = sfc_flow_parse_ipv4,
1083         },
1084         {
1085                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1086                 .prev_layer = SFC_FLOW_ITEM_L2,
1087                 .layer = SFC_FLOW_ITEM_L3,
1088                 .parse = sfc_flow_parse_ipv6,
1089         },
1090         {
1091                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1092                 .prev_layer = SFC_FLOW_ITEM_L3,
1093                 .layer = SFC_FLOW_ITEM_L4,
1094                 .parse = sfc_flow_parse_tcp,
1095         },
1096         {
1097                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1098                 .prev_layer = SFC_FLOW_ITEM_L3,
1099                 .layer = SFC_FLOW_ITEM_L4,
1100                 .parse = sfc_flow_parse_udp,
1101         },
1102         {
1103                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1104                 .prev_layer = SFC_FLOW_ITEM_L4,
1105                 .layer = SFC_FLOW_ITEM_START_LAYER,
1106                 .parse = sfc_flow_parse_vxlan,
1107         },
1108         {
1109                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1110                 .prev_layer = SFC_FLOW_ITEM_L4,
1111                 .layer = SFC_FLOW_ITEM_START_LAYER,
1112                 .parse = sfc_flow_parse_geneve,
1113         },
1114         {
1115                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1116                 .prev_layer = SFC_FLOW_ITEM_L3,
1117                 .layer = SFC_FLOW_ITEM_START_LAYER,
1118                 .parse = sfc_flow_parse_nvgre,
1119         },
1120 };
1121
1122 /*
1123  * Protocol-independent flow API support
1124  */
1125 static int
1126 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1127                     struct rte_flow *flow,
1128                     struct rte_flow_error *error)
1129 {
1130         struct sfc_flow_spec *spec = &flow->spec;
1131         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1132
1133         if (attr == NULL) {
1134                 rte_flow_error_set(error, EINVAL,
1135                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1136                                    "NULL attribute");
1137                 return -rte_errno;
1138         }
1139         if (attr->group != 0) {
1140                 rte_flow_error_set(error, ENOTSUP,
1141                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1142                                    "Groups are not supported");
1143                 return -rte_errno;
1144         }
1145         if (attr->egress != 0) {
1146                 rte_flow_error_set(error, ENOTSUP,
1147                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1148                                    "Egress is not supported");
1149                 return -rte_errno;
1150         }
1151         if (attr->ingress == 0) {
1152                 rte_flow_error_set(error, ENOTSUP,
1153                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1154                                    "Ingress is compulsory");
1155                 return -rte_errno;
1156         }
1157         if (attr->transfer == 0) {
1158                 if (attr->priority != 0) {
1159                         rte_flow_error_set(error, ENOTSUP,
1160                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1161                                            attr, "Priorities are unsupported");
1162                         return -rte_errno;
1163                 }
1164                 spec->type = SFC_FLOW_SPEC_FILTER;
1165                 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1166                 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1167         } else {
1168                 rte_flow_error_set(error, ENOTSUP,
1169                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1170                                    "Transfer is not supported");
1171                 return -rte_errno;
1172         }
1173
1174         return 0;
1175 }
1176
1177 /* Get item from array sfc_flow_items */
1178 static const struct sfc_flow_item *
1179 sfc_flow_get_item(enum rte_flow_item_type type)
1180 {
1181         unsigned int i;
1182
1183         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1184                 if (sfc_flow_items[i].type == type)
1185                         return &sfc_flow_items[i];
1186
1187         return NULL;
1188 }
1189
1190 static int
1191 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1192                        struct rte_flow *flow,
1193                        struct rte_flow_error *error)
1194 {
1195         int rc;
1196         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1197         boolean_t is_ifrm = B_FALSE;
1198         const struct sfc_flow_item *item;
1199         struct sfc_flow_spec *spec = &flow->spec;
1200         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1201
1202         if (pattern == NULL) {
1203                 rte_flow_error_set(error, EINVAL,
1204                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1205                                    "NULL pattern");
1206                 return -rte_errno;
1207         }
1208
1209         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1210                 item = sfc_flow_get_item(pattern->type);
1211                 if (item == NULL) {
1212                         rte_flow_error_set(error, ENOTSUP,
1213                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1214                                            "Unsupported pattern item");
1215                         return -rte_errno;
1216                 }
1217
1218                 /*
1219                  * Omitting one or several protocol layers at the beginning
1220                  * of pattern is supported
1221                  */
1222                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1223                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1224                     item->prev_layer != prev_layer) {
1225                         rte_flow_error_set(error, ENOTSUP,
1226                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1227                                            "Unexpected sequence of pattern items");
1228                         return -rte_errno;
1229                 }
1230
1231                 /*
1232                  * Allow only VOID and ETH pattern items in the inner frame.
1233                  * Also check that there is only one tunneling protocol.
1234                  */
1235                 switch (item->type) {
1236                 case RTE_FLOW_ITEM_TYPE_VOID:
1237                 case RTE_FLOW_ITEM_TYPE_ETH:
1238                         break;
1239
1240                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1241                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1242                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1243                         if (is_ifrm) {
1244                                 rte_flow_error_set(error, EINVAL,
1245                                         RTE_FLOW_ERROR_TYPE_ITEM,
1246                                         pattern,
1247                                         "More than one tunneling protocol");
1248                                 return -rte_errno;
1249                         }
1250                         is_ifrm = B_TRUE;
1251                         break;
1252
1253                 default:
1254                         if (is_ifrm) {
1255                                 rte_flow_error_set(error, EINVAL,
1256                                         RTE_FLOW_ERROR_TYPE_ITEM,
1257                                         pattern,
1258                                         "There is an unsupported pattern item "
1259                                         "in the inner frame");
1260                                 return -rte_errno;
1261                         }
1262                         break;
1263                 }
1264
1265                 rc = item->parse(pattern, &spec_filter->template, error);
1266                 if (rc != 0)
1267                         return rc;
1268
1269                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1270                         prev_layer = item->layer;
1271         }
1272
1273         return 0;
1274 }
1275
1276 static int
1277 sfc_flow_parse_queue(struct sfc_adapter *sa,
1278                      const struct rte_flow_action_queue *queue,
1279                      struct rte_flow *flow)
1280 {
1281         struct sfc_flow_spec *spec = &flow->spec;
1282         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1283         struct sfc_rxq *rxq;
1284
1285         if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1286                 return -EINVAL;
1287
1288         rxq = &sa->rxq_ctrl[queue->index];
1289         spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1290
1291         return 0;
1292 }
1293
1294 static int
1295 sfc_flow_parse_rss(struct sfc_adapter *sa,
1296                    const struct rte_flow_action_rss *action_rss,
1297                    struct rte_flow *flow)
1298 {
1299         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1300         struct sfc_rss *rss = &sas->rss;
1301         unsigned int rxq_sw_index;
1302         struct sfc_rxq *rxq;
1303         unsigned int rxq_hw_index_min;
1304         unsigned int rxq_hw_index_max;
1305         efx_rx_hash_type_t efx_hash_types;
1306         const uint8_t *rss_key;
1307         struct sfc_flow_spec *spec = &flow->spec;
1308         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1309         struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1310         unsigned int i;
1311
1312         if (action_rss->queue_num == 0)
1313                 return -EINVAL;
1314
1315         rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1316         rxq = &sa->rxq_ctrl[rxq_sw_index];
1317         rxq_hw_index_min = rxq->hw_index;
1318         rxq_hw_index_max = 0;
1319
1320         for (i = 0; i < action_rss->queue_num; ++i) {
1321                 rxq_sw_index = action_rss->queue[i];
1322
1323                 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1324                         return -EINVAL;
1325
1326                 rxq = &sa->rxq_ctrl[rxq_sw_index];
1327
1328                 if (rxq->hw_index < rxq_hw_index_min)
1329                         rxq_hw_index_min = rxq->hw_index;
1330
1331                 if (rxq->hw_index > rxq_hw_index_max)
1332                         rxq_hw_index_max = rxq->hw_index;
1333         }
1334
1335         switch (action_rss->func) {
1336         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1337         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1338                 break;
1339         default:
1340                 return -EINVAL;
1341         }
1342
1343         if (action_rss->level)
1344                 return -EINVAL;
1345
1346         /*
1347          * Dummy RSS action with only one queue and no specific settings
1348          * for hash types and key does not require dedicated RSS context
1349          * and may be simplified to single queue action.
1350          */
1351         if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1352             action_rss->key_len == 0) {
1353                 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1354                 return 0;
1355         }
1356
1357         if (action_rss->types) {
1358                 int rc;
1359
1360                 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1361                                           &efx_hash_types);
1362                 if (rc != 0)
1363                         return -rc;
1364         } else {
1365                 unsigned int i;
1366
1367                 efx_hash_types = 0;
1368                 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1369                         efx_hash_types |= rss->hf_map[i].efx;
1370         }
1371
1372         if (action_rss->key_len) {
1373                 if (action_rss->key_len != sizeof(rss->key))
1374                         return -EINVAL;
1375
1376                 rss_key = action_rss->key;
1377         } else {
1378                 rss_key = rss->key;
1379         }
1380
1381         spec_filter->rss = B_TRUE;
1382
1383         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1384         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1385         sfc_rss_conf->rss_hash_types = efx_hash_types;
1386         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1387
1388         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1389                 unsigned int nb_queues = action_rss->queue_num;
1390                 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1391                 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1392
1393                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1394         }
1395
1396         return 0;
1397 }
1398
1399 static int
1400 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1401                     unsigned int filters_count)
1402 {
1403         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1404         unsigned int i;
1405         int ret = 0;
1406
1407         for (i = 0; i < filters_count; i++) {
1408                 int rc;
1409
1410                 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1411                 if (ret == 0 && rc != 0) {
1412                         sfc_err(sa, "failed to remove filter specification "
1413                                 "(rc = %d)", rc);
1414                         ret = rc;
1415                 }
1416         }
1417
1418         return ret;
1419 }
1420
1421 static int
1422 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1423 {
1424         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1425         unsigned int i;
1426         int rc = 0;
1427
1428         for (i = 0; i < spec_filter->count; i++) {
1429                 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1430                 if (rc != 0) {
1431                         sfc_flow_spec_flush(sa, spec, i);
1432                         break;
1433                 }
1434         }
1435
1436         return rc;
1437 }
1438
1439 static int
1440 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1441 {
1442         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1443
1444         return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1445 }
1446
1447 static int
1448 sfc_flow_filter_insert(struct sfc_adapter *sa,
1449                        struct rte_flow *flow)
1450 {
1451         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1452         struct sfc_rss *rss = &sas->rss;
1453         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1454         struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1455         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1456         unsigned int i;
1457         int rc = 0;
1458
1459         if (spec_filter->rss) {
1460                 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1461                                               flow_rss->rxq_hw_index_min + 1,
1462                                               EFX_MAXRSS);
1463
1464                 rc = efx_rx_scale_context_alloc(sa->nic,
1465                                                 EFX_RX_SCALE_EXCLUSIVE,
1466                                                 rss_spread,
1467                                                 &efs_rss_context);
1468                 if (rc != 0)
1469                         goto fail_scale_context_alloc;
1470
1471                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1472                                            rss->hash_alg,
1473                                            flow_rss->rss_hash_types, B_TRUE);
1474                 if (rc != 0)
1475                         goto fail_scale_mode_set;
1476
1477                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1478                                           flow_rss->rss_key,
1479                                           sizeof(rss->key));
1480                 if (rc != 0)
1481                         goto fail_scale_key_set;
1482
1483                 /*
1484                  * At this point, fully elaborated filter specifications
1485                  * have been produced from the template. To make sure that
1486                  * RSS behaviour is consistent between them, set the same
1487                  * RSS context value everywhere.
1488                  */
1489                 for (i = 0; i < spec_filter->count; i++) {
1490                         efx_filter_spec_t *spec = &spec_filter->filters[i];
1491
1492                         spec->efs_rss_context = efs_rss_context;
1493                         spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1494                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1495                 }
1496         }
1497
1498         rc = sfc_flow_spec_insert(sa, &flow->spec);
1499         if (rc != 0)
1500                 goto fail_filter_insert;
1501
1502         if (spec_filter->rss) {
1503                 /*
1504                  * Scale table is set after filter insertion because
1505                  * the table entries are relative to the base RxQ ID
1506                  * and the latter is submitted to the HW by means of
1507                  * inserting a filter, so by the time of the request
1508                  * the HW knows all the information needed to verify
1509                  * the table entries, and the operation will succeed
1510                  */
1511                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1512                                           flow_rss->rss_tbl,
1513                                           RTE_DIM(flow_rss->rss_tbl));
1514                 if (rc != 0)
1515                         goto fail_scale_tbl_set;
1516         }
1517
1518         return 0;
1519
1520 fail_scale_tbl_set:
1521         sfc_flow_spec_remove(sa, &flow->spec);
1522
1523 fail_filter_insert:
1524 fail_scale_key_set:
1525 fail_scale_mode_set:
1526         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1527                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1528
1529 fail_scale_context_alloc:
1530         return rc;
1531 }
1532
1533 static int
1534 sfc_flow_filter_remove(struct sfc_adapter *sa,
1535                        struct rte_flow *flow)
1536 {
1537         struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1538         int rc = 0;
1539
1540         rc = sfc_flow_spec_remove(sa, &flow->spec);
1541         if (rc != 0)
1542                 return rc;
1543
1544         if (spec_filter->rss) {
1545                 /*
1546                  * All specifications for a given flow rule have the same RSS
1547                  * context, so that RSS context value is taken from the first
1548                  * filter specification
1549                  */
1550                 efx_filter_spec_t *spec = &spec_filter->filters[0];
1551
1552                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1553         }
1554
1555         return rc;
1556 }
1557
1558 static int
1559 sfc_flow_parse_mark(struct sfc_adapter *sa,
1560                     const struct rte_flow_action_mark *mark,
1561                     struct rte_flow *flow)
1562 {
1563         struct sfc_flow_spec *spec = &flow->spec;
1564         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1565         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1566
1567         if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1568                 return EINVAL;
1569
1570         spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1571         spec_filter->template.efs_mark = mark->id;
1572
1573         return 0;
1574 }
1575
1576 static int
1577 sfc_flow_parse_actions(struct sfc_adapter *sa,
1578                        const struct rte_flow_action actions[],
1579                        struct rte_flow *flow,
1580                        struct rte_flow_error *error)
1581 {
1582         int rc;
1583         struct sfc_flow_spec *spec = &flow->spec;
1584         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1585         const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1586         uint32_t actions_set = 0;
1587         const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1588                                            (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1589                                            (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1590         const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1591                                            (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1592
1593         if (actions == NULL) {
1594                 rte_flow_error_set(error, EINVAL,
1595                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1596                                    "NULL actions");
1597                 return -rte_errno;
1598         }
1599
1600 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1601         RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1602
1603         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1604                 switch (actions->type) {
1605                 case RTE_FLOW_ACTION_TYPE_VOID:
1606                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1607                                                actions_set);
1608                         break;
1609
1610                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1611                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1612                                                actions_set);
1613                         if ((actions_set & fate_actions_mask) != 0)
1614                                 goto fail_fate_actions;
1615
1616                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1617                         if (rc != 0) {
1618                                 rte_flow_error_set(error, EINVAL,
1619                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1620                                         "Bad QUEUE action");
1621                                 return -rte_errno;
1622                         }
1623                         break;
1624
1625                 case RTE_FLOW_ACTION_TYPE_RSS:
1626                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1627                                                actions_set);
1628                         if ((actions_set & fate_actions_mask) != 0)
1629                                 goto fail_fate_actions;
1630
1631                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1632                         if (rc != 0) {
1633                                 rte_flow_error_set(error, -rc,
1634                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1635                                         "Bad RSS action");
1636                                 return -rte_errno;
1637                         }
1638                         break;
1639
1640                 case RTE_FLOW_ACTION_TYPE_DROP:
1641                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1642                                                actions_set);
1643                         if ((actions_set & fate_actions_mask) != 0)
1644                                 goto fail_fate_actions;
1645
1646                         spec_filter->template.efs_dmaq_id =
1647                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1648                         break;
1649
1650                 case RTE_FLOW_ACTION_TYPE_FLAG:
1651                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1652                                                actions_set);
1653                         if ((actions_set & mark_actions_mask) != 0)
1654                                 goto fail_actions_overlap;
1655
1656                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1657                                 rte_flow_error_set(error, ENOTSUP,
1658                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1659                                         "FLAG action is not supported on the current Rx datapath");
1660                                 return -rte_errno;
1661                         }
1662
1663                         spec_filter->template.efs_flags |=
1664                                 EFX_FILTER_FLAG_ACTION_FLAG;
1665                         break;
1666
1667                 case RTE_FLOW_ACTION_TYPE_MARK:
1668                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1669                                                actions_set);
1670                         if ((actions_set & mark_actions_mask) != 0)
1671                                 goto fail_actions_overlap;
1672
1673                         if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1674                                 rte_flow_error_set(error, ENOTSUP,
1675                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1676                                         "MARK action is not supported on the current Rx datapath");
1677                                 return -rte_errno;
1678                         }
1679
1680                         rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1681                         if (rc != 0) {
1682                                 rte_flow_error_set(error, rc,
1683                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1684                                         "Bad MARK action");
1685                                 return -rte_errno;
1686                         }
1687                         break;
1688
1689                 default:
1690                         rte_flow_error_set(error, ENOTSUP,
1691                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1692                                            "Action is not supported");
1693                         return -rte_errno;
1694                 }
1695
1696                 actions_set |= (1UL << actions->type);
1697         }
1698 #undef SFC_BUILD_SET_OVERFLOW
1699
1700         /* When fate is unknown, drop traffic. */
1701         if ((actions_set & fate_actions_mask) == 0) {
1702                 spec_filter->template.efs_dmaq_id =
1703                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1704         }
1705
1706         return 0;
1707
1708 fail_fate_actions:
1709         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1710                            "Cannot combine several fate-deciding actions, "
1711                            "choose between QUEUE, RSS or DROP");
1712         return -rte_errno;
1713
1714 fail_actions_overlap:
1715         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1716                            "Overlapping actions are not supported");
1717         return -rte_errno;
1718 }
1719
1720 /**
1721  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1722  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1723  * specifications after copying.
1724  *
1725  * @param spec[in, out]
1726  *   SFC flow specification to update.
1727  * @param filters_count_for_one_val[in]
1728  *   How many specifications should have the same match flag, what is the
1729  *   number of specifications before copying.
1730  * @param error[out]
1731  *   Perform verbose error reporting if not NULL.
1732  */
1733 static int
1734 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1735                                unsigned int filters_count_for_one_val,
1736                                struct rte_flow_error *error)
1737 {
1738         unsigned int i;
1739         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1740         static const efx_filter_match_flags_t vals[] = {
1741                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1742                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1743         };
1744
1745         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1746                 rte_flow_error_set(error, EINVAL,
1747                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1748                         "Number of specifications is incorrect while copying "
1749                         "by unknown destination flags");
1750                 return -rte_errno;
1751         }
1752
1753         for (i = 0; i < spec_filter->count; i++) {
1754                 /* The check above ensures that divisor can't be zero here */
1755                 spec_filter->filters[i].efs_match_flags |=
1756                         vals[i / filters_count_for_one_val];
1757         }
1758
1759         return 0;
1760 }
1761
1762 /**
1763  * Check that the following conditions are met:
1764  * - the list of supported filters has a filter
1765  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1766  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1767  *   be inserted.
1768  *
1769  * @param match[in]
1770  *   The match flags of filter.
1771  * @param spec[in]
1772  *   Specification to be supplemented.
1773  * @param filter[in]
1774  *   SFC filter with list of supported filters.
1775  */
1776 static boolean_t
1777 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1778                                  __rte_unused efx_filter_spec_t *spec,
1779                                  struct sfc_filter *filter)
1780 {
1781         unsigned int i;
1782         efx_filter_match_flags_t match_mcast_dst;
1783
1784         match_mcast_dst =
1785                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1786                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1787         for (i = 0; i < filter->supported_match_num; i++) {
1788                 if (match_mcast_dst == filter->supported_match[i])
1789                         return B_TRUE;
1790         }
1791
1792         return B_FALSE;
1793 }
1794
1795 /**
1796  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1797  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1798  * specifications after copying.
1799  *
1800  * @param spec[in, out]
1801  *   SFC flow specification to update.
1802  * @param filters_count_for_one_val[in]
1803  *   How many specifications should have the same EtherType value, what is the
1804  *   number of specifications before copying.
1805  * @param error[out]
1806  *   Perform verbose error reporting if not NULL.
1807  */
1808 static int
1809 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1810                         unsigned int filters_count_for_one_val,
1811                         struct rte_flow_error *error)
1812 {
1813         unsigned int i;
1814         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1815         static const uint16_t vals[] = {
1816                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1817         };
1818
1819         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1820                 rte_flow_error_set(error, EINVAL,
1821                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1822                         "Number of specifications is incorrect "
1823                         "while copying by Ethertype");
1824                 return -rte_errno;
1825         }
1826
1827         for (i = 0; i < spec_filter->count; i++) {
1828                 spec_filter->filters[i].efs_match_flags |=
1829                         EFX_FILTER_MATCH_ETHER_TYPE;
1830
1831                 /*
1832                  * The check above ensures that
1833                  * filters_count_for_one_val is not 0
1834                  */
1835                 spec_filter->filters[i].efs_ether_type =
1836                         vals[i / filters_count_for_one_val];
1837         }
1838
1839         return 0;
1840 }
1841
1842 /**
1843  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1844  * in the same specifications after copying.
1845  *
1846  * @param spec[in, out]
1847  *   SFC flow specification to update.
1848  * @param filters_count_for_one_val[in]
1849  *   How many specifications should have the same match flag, what is the
1850  *   number of specifications before copying.
1851  * @param error[out]
1852  *   Perform verbose error reporting if not NULL.
1853  */
1854 static int
1855 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1856                             unsigned int filters_count_for_one_val,
1857                             struct rte_flow_error *error)
1858 {
1859         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1860         unsigned int i;
1861
1862         if (filters_count_for_one_val != spec_filter->count) {
1863                 rte_flow_error_set(error, EINVAL,
1864                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1865                         "Number of specifications is incorrect "
1866                         "while copying by outer VLAN ID");
1867                 return -rte_errno;
1868         }
1869
1870         for (i = 0; i < spec_filter->count; i++) {
1871                 spec_filter->filters[i].efs_match_flags |=
1872                         EFX_FILTER_MATCH_OUTER_VID;
1873
1874                 spec_filter->filters[i].efs_outer_vid = 0;
1875         }
1876
1877         return 0;
1878 }
1879
1880 /**
1881  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1882  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1883  * specifications after copying.
1884  *
1885  * @param spec[in, out]
1886  *   SFC flow specification to update.
1887  * @param filters_count_for_one_val[in]
1888  *   How many specifications should have the same match flag, what is the
1889  *   number of specifications before copying.
1890  * @param error[out]
1891  *   Perform verbose error reporting if not NULL.
1892  */
1893 static int
1894 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1895                                     unsigned int filters_count_for_one_val,
1896                                     struct rte_flow_error *error)
1897 {
1898         unsigned int i;
1899         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1900         static const efx_filter_match_flags_t vals[] = {
1901                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1902                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1903         };
1904
1905         if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1906                 rte_flow_error_set(error, EINVAL,
1907                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1908                         "Number of specifications is incorrect while copying "
1909                         "by inner frame unknown destination flags");
1910                 return -rte_errno;
1911         }
1912
1913         for (i = 0; i < spec_filter->count; i++) {
1914                 /* The check above ensures that divisor can't be zero here */
1915                 spec_filter->filters[i].efs_match_flags |=
1916                         vals[i / filters_count_for_one_val];
1917         }
1918
1919         return 0;
1920 }
1921
1922 /**
1923  * Check that the following conditions are met:
1924  * - the specification corresponds to a filter for encapsulated traffic
1925  * - the list of supported filters has a filter
1926  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1927  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1928  *   be inserted.
1929  *
1930  * @param match[in]
1931  *   The match flags of filter.
1932  * @param spec[in]
1933  *   Specification to be supplemented.
1934  * @param filter[in]
1935  *   SFC filter with list of supported filters.
1936  */
1937 static boolean_t
1938 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1939                                       efx_filter_spec_t *spec,
1940                                       struct sfc_filter *filter)
1941 {
1942         unsigned int i;
1943         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1944         efx_filter_match_flags_t match_mcast_dst;
1945
1946         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1947                 return B_FALSE;
1948
1949         match_mcast_dst =
1950                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1951                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1952         for (i = 0; i < filter->supported_match_num; i++) {
1953                 if (match_mcast_dst == filter->supported_match[i])
1954                         return B_TRUE;
1955         }
1956
1957         return B_FALSE;
1958 }
1959
1960 /**
1961  * Check that the list of supported filters has a filter that differs
1962  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1963  * in this case that filter will be used and the flag
1964  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1965  *
1966  * @param match[in]
1967  *   The match flags of filter.
1968  * @param spec[in]
1969  *   Specification to be supplemented.
1970  * @param filter[in]
1971  *   SFC filter with list of supported filters.
1972  */
1973 static boolean_t
1974 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1975                               __rte_unused efx_filter_spec_t *spec,
1976                               struct sfc_filter *filter)
1977 {
1978         unsigned int i;
1979         efx_filter_match_flags_t match_without_vid =
1980                 match & ~EFX_FILTER_MATCH_OUTER_VID;
1981
1982         for (i = 0; i < filter->supported_match_num; i++) {
1983                 if (match_without_vid == filter->supported_match[i])
1984                         return B_FALSE;
1985         }
1986
1987         return B_TRUE;
1988 }
1989
1990 /*
1991  * Match flags that can be automatically added to filters.
1992  * Selecting the last minimum when searching for the copy flag ensures that the
1993  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1994  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1995  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1996  * filters.
1997  */
1998 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1999         {
2000                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2001                 .vals_count = 2,
2002                 .set_vals = sfc_flow_set_unknown_dst_flags,
2003                 .spec_check = sfc_flow_check_unknown_dst_flags,
2004         },
2005         {
2006                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2007                 .vals_count = 2,
2008                 .set_vals = sfc_flow_set_ethertypes,
2009                 .spec_check = NULL,
2010         },
2011         {
2012                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2013                 .vals_count = 2,
2014                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2015                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2016         },
2017         {
2018                 .flag = EFX_FILTER_MATCH_OUTER_VID,
2019                 .vals_count = 1,
2020                 .set_vals = sfc_flow_set_outer_vid_flag,
2021                 .spec_check = sfc_flow_check_outer_vid_flag,
2022         },
2023 };
2024
2025 /* Get item from array sfc_flow_copy_flags */
2026 static const struct sfc_flow_copy_flag *
2027 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2028 {
2029         unsigned int i;
2030
2031         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2032                 if (sfc_flow_copy_flags[i].flag == flag)
2033                         return &sfc_flow_copy_flags[i];
2034         }
2035
2036         return NULL;
2037 }
2038
2039 /**
2040  * Make copies of the specifications, set match flag and values
2041  * of the field that corresponds to it.
2042  *
2043  * @param spec[in, out]
2044  *   SFC flow specification to update.
2045  * @param flag[in]
2046  *   The match flag to add.
2047  * @param error[out]
2048  *   Perform verbose error reporting if not NULL.
2049  */
2050 static int
2051 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2052                              efx_filter_match_flags_t flag,
2053                              struct rte_flow_error *error)
2054 {
2055         unsigned int i;
2056         unsigned int new_filters_count;
2057         unsigned int filters_count_for_one_val;
2058         const struct sfc_flow_copy_flag *copy_flag;
2059         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2060         int rc;
2061
2062         copy_flag = sfc_flow_get_copy_flag(flag);
2063         if (copy_flag == NULL) {
2064                 rte_flow_error_set(error, ENOTSUP,
2065                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2066                                    "Unsupported spec field for copying");
2067                 return -rte_errno;
2068         }
2069
2070         new_filters_count = spec_filter->count * copy_flag->vals_count;
2071         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2072                 rte_flow_error_set(error, EINVAL,
2073                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2074                         "Too much EFX specifications in the flow rule");
2075                 return -rte_errno;
2076         }
2077
2078         /* Copy filters specifications */
2079         for (i = spec_filter->count; i < new_filters_count; i++) {
2080                 spec_filter->filters[i] =
2081                         spec_filter->filters[i - spec_filter->count];
2082         }
2083
2084         filters_count_for_one_val = spec_filter->count;
2085         spec_filter->count = new_filters_count;
2086
2087         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2088         if (rc != 0)
2089                 return rc;
2090
2091         return 0;
2092 }
2093
2094 /**
2095  * Check that the given set of match flags missing in the original filter spec
2096  * could be covered by adding spec copies which specify the corresponding
2097  * flags and packet field values to match.
2098  *
2099  * @param miss_flags[in]
2100  *   Flags that are missing until the supported filter.
2101  * @param spec[in]
2102  *   Specification to be supplemented.
2103  * @param filter[in]
2104  *   SFC filter.
2105  *
2106  * @return
2107  *   Number of specifications after copy or 0, if the flags can not be added.
2108  */
2109 static unsigned int
2110 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2111                              efx_filter_spec_t *spec,
2112                              struct sfc_filter *filter)
2113 {
2114         unsigned int i;
2115         efx_filter_match_flags_t copy_flags = 0;
2116         efx_filter_match_flags_t flag;
2117         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2118         sfc_flow_spec_check *check;
2119         unsigned int multiplier = 1;
2120
2121         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2122                 flag = sfc_flow_copy_flags[i].flag;
2123                 check = sfc_flow_copy_flags[i].spec_check;
2124                 if ((flag & miss_flags) == flag) {
2125                         if (check != NULL && (!check(match, spec, filter)))
2126                                 continue;
2127
2128                         copy_flags |= flag;
2129                         multiplier *= sfc_flow_copy_flags[i].vals_count;
2130                 }
2131         }
2132
2133         if (copy_flags == miss_flags)
2134                 return multiplier;
2135
2136         return 0;
2137 }
2138
2139 /**
2140  * Attempt to supplement the specification template to the minimally
2141  * supported set of match flags. To do this, it is necessary to copy
2142  * the specifications, filling them with the values of fields that
2143  * correspond to the missing flags.
2144  * The necessary and sufficient filter is built from the fewest number
2145  * of copies which could be made to cover the minimally required set
2146  * of flags.
2147  *
2148  * @param sa[in]
2149  *   SFC adapter.
2150  * @param spec[in, out]
2151  *   SFC flow specification to update.
2152  * @param error[out]
2153  *   Perform verbose error reporting if not NULL.
2154  */
2155 static int
2156 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2157                                struct sfc_flow_spec *spec,
2158                                struct rte_flow_error *error)
2159 {
2160         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2161         struct sfc_filter *filter = &sa->filter;
2162         efx_filter_match_flags_t miss_flags;
2163         efx_filter_match_flags_t min_miss_flags = 0;
2164         efx_filter_match_flags_t match;
2165         unsigned int min_multiplier = UINT_MAX;
2166         unsigned int multiplier;
2167         unsigned int i;
2168         int rc;
2169
2170         match = spec_filter->template.efs_match_flags;
2171         for (i = 0; i < filter->supported_match_num; i++) {
2172                 if ((match & filter->supported_match[i]) == match) {
2173                         miss_flags = filter->supported_match[i] & (~match);
2174                         multiplier = sfc_flow_check_missing_flags(miss_flags,
2175                                 &spec_filter->template, filter);
2176                         if (multiplier > 0) {
2177                                 if (multiplier <= min_multiplier) {
2178                                         min_multiplier = multiplier;
2179                                         min_miss_flags = miss_flags;
2180                                 }
2181                         }
2182                 }
2183         }
2184
2185         if (min_multiplier == UINT_MAX) {
2186                 rte_flow_error_set(error, ENOTSUP,
2187                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2188                                    "The flow rule pattern is unsupported");
2189                 return -rte_errno;
2190         }
2191
2192         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2193                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2194
2195                 if ((flag & min_miss_flags) == flag) {
2196                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2197                         if (rc != 0)
2198                                 return rc;
2199                 }
2200         }
2201
2202         return 0;
2203 }
2204
2205 /**
2206  * Check that set of match flags is referred to by a filter. Filter is
2207  * described by match flags with the ability to add OUTER_VID and INNER_VID
2208  * flags.
2209  *
2210  * @param match_flags[in]
2211  *   Set of match flags.
2212  * @param flags_pattern[in]
2213  *   Pattern of filter match flags.
2214  */
2215 static boolean_t
2216 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2217                             efx_filter_match_flags_t flags_pattern)
2218 {
2219         if ((match_flags & flags_pattern) != flags_pattern)
2220                 return B_FALSE;
2221
2222         switch (match_flags & ~flags_pattern) {
2223         case 0:
2224         case EFX_FILTER_MATCH_OUTER_VID:
2225         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2226                 return B_TRUE;
2227         default:
2228                 return B_FALSE;
2229         }
2230 }
2231
2232 /**
2233  * Check whether the spec maps to a hardware filter which is known to be
2234  * ineffective despite being valid.
2235  *
2236  * @param filter[in]
2237  *   SFC filter with list of supported filters.
2238  * @param spec[in]
2239  *   SFC flow specification.
2240  */
2241 static boolean_t
2242 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2243                                   struct sfc_flow_spec *spec)
2244 {
2245         unsigned int i;
2246         uint16_t ether_type;
2247         uint8_t ip_proto;
2248         efx_filter_match_flags_t match_flags;
2249         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2250
2251         for (i = 0; i < spec_filter->count; i++) {
2252                 match_flags = spec_filter->filters[i].efs_match_flags;
2253
2254                 if (sfc_flow_is_match_with_vids(match_flags,
2255                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
2256                     sfc_flow_is_match_with_vids(match_flags,
2257                                                 EFX_FILTER_MATCH_ETHER_TYPE |
2258                                                 EFX_FILTER_MATCH_LOC_MAC)) {
2259                         ether_type = spec_filter->filters[i].efs_ether_type;
2260                         if (filter->supports_ip_proto_or_addr_filter &&
2261                             (ether_type == EFX_ETHER_TYPE_IPV4 ||
2262                              ether_type == EFX_ETHER_TYPE_IPV6))
2263                                 return B_TRUE;
2264                 } else if (sfc_flow_is_match_with_vids(match_flags,
2265                                 EFX_FILTER_MATCH_ETHER_TYPE |
2266                                 EFX_FILTER_MATCH_IP_PROTO) ||
2267                            sfc_flow_is_match_with_vids(match_flags,
2268                                 EFX_FILTER_MATCH_ETHER_TYPE |
2269                                 EFX_FILTER_MATCH_IP_PROTO |
2270                                 EFX_FILTER_MATCH_LOC_MAC)) {
2271                         ip_proto = spec_filter->filters[i].efs_ip_proto;
2272                         if (filter->supports_rem_or_local_port_filter &&
2273                             (ip_proto == EFX_IPPROTO_TCP ||
2274                              ip_proto == EFX_IPPROTO_UDP))
2275                                 return B_TRUE;
2276                 }
2277         }
2278
2279         return B_FALSE;
2280 }
2281
2282 static int
2283 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2284                               struct rte_flow *flow,
2285                               struct rte_flow_error *error)
2286 {
2287         struct sfc_flow_spec *spec = &flow->spec;
2288         struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2289         efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2290         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2291         int rc;
2292
2293         /* Initialize the first filter spec with template */
2294         spec_filter->filters[0] = *spec_tmpl;
2295         spec_filter->count = 1;
2296
2297         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2298                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2299                 if (rc != 0)
2300                         return rc;
2301         }
2302
2303         if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2304                 rte_flow_error_set(error, ENOTSUP,
2305                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2306                         "The flow rule pattern is unsupported");
2307                 return -rte_errno;
2308         }
2309
2310         return 0;
2311 }
2312
2313 static int
2314 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2315                              const struct rte_flow_item pattern[],
2316                              const struct rte_flow_action actions[],
2317                              struct rte_flow *flow,
2318                              struct rte_flow_error *error)
2319 {
2320         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2321         int rc;
2322
2323         rc = sfc_flow_parse_pattern(pattern, flow, error);
2324         if (rc != 0)
2325                 goto fail_bad_value;
2326
2327         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2328         if (rc != 0)
2329                 goto fail_bad_value;
2330
2331         rc = sfc_flow_validate_match_flags(sa, flow, error);
2332         if (rc != 0)
2333                 goto fail_bad_value;
2334
2335         return 0;
2336
2337 fail_bad_value:
2338         return rc;
2339 }
2340
2341 static int
2342 sfc_flow_parse(struct rte_eth_dev *dev,
2343                const struct rte_flow_attr *attr,
2344                const struct rte_flow_item pattern[],
2345                const struct rte_flow_action actions[],
2346                struct rte_flow *flow,
2347                struct rte_flow_error *error)
2348 {
2349         const struct sfc_flow_ops_by_spec *ops;
2350         int rc;
2351
2352         rc = sfc_flow_parse_attr(attr, flow, error);
2353         if (rc != 0)
2354                 return rc;
2355
2356         ops = sfc_flow_get_ops_by_spec(flow);
2357         if (ops == NULL || ops->parse == NULL) {
2358                 rte_flow_error_set(error, ENOTSUP,
2359                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2360                                    "No backend to handle this flow");
2361                 return -rte_errno;
2362         }
2363
2364         return ops->parse(dev, pattern, actions, flow, error);
2365 }
2366
2367 static struct rte_flow *
2368 sfc_flow_zmalloc(struct rte_flow_error *error)
2369 {
2370         struct rte_flow *flow;
2371
2372         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2373         if (flow == NULL) {
2374                 rte_flow_error_set(error, ENOMEM,
2375                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2376                                    "Failed to allocate memory");
2377         }
2378
2379         return flow;
2380 }
2381
2382 static void
2383 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2384 {
2385         rte_free(flow);
2386 }
2387
2388 static int
2389 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2390                 struct rte_flow_error *error)
2391 {
2392         const struct sfc_flow_ops_by_spec *ops;
2393         int rc;
2394
2395         ops = sfc_flow_get_ops_by_spec(flow);
2396         if (ops == NULL || ops->insert == NULL) {
2397                 rte_flow_error_set(error, ENOTSUP,
2398                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2399                                    "No backend to handle this flow");
2400                 return rte_errno;
2401         }
2402
2403         rc = ops->insert(sa, flow);
2404         if (rc != 0) {
2405                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2406                                    NULL, "Failed to insert the flow rule");
2407         }
2408
2409         return rc;
2410 }
2411
2412 static int
2413 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2414                 struct rte_flow_error *error)
2415 {
2416         const struct sfc_flow_ops_by_spec *ops;
2417         int rc;
2418
2419         ops = sfc_flow_get_ops_by_spec(flow);
2420         if (ops == NULL || ops->remove == NULL) {
2421                 rte_flow_error_set(error, ENOTSUP,
2422                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2423                                    "No backend to handle this flow");
2424                 return rte_errno;
2425         }
2426
2427         rc = ops->remove(sa, flow);
2428         if (rc != 0) {
2429                 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2430                                    NULL, "Failed to remove the flow rule");
2431         }
2432
2433         return rc;
2434 }
2435
2436 static int
2437 sfc_flow_validate(struct rte_eth_dev *dev,
2438                   const struct rte_flow_attr *attr,
2439                   const struct rte_flow_item pattern[],
2440                   const struct rte_flow_action actions[],
2441                   struct rte_flow_error *error)
2442 {
2443         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2444         struct rte_flow *flow;
2445         int rc;
2446
2447         flow = sfc_flow_zmalloc(error);
2448         if (flow == NULL)
2449                 return -rte_errno;
2450
2451         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2452
2453         sfc_flow_free(sa, flow);
2454
2455         return rc;
2456 }
2457
2458 static struct rte_flow *
2459 sfc_flow_create(struct rte_eth_dev *dev,
2460                 const struct rte_flow_attr *attr,
2461                 const struct rte_flow_item pattern[],
2462                 const struct rte_flow_action actions[],
2463                 struct rte_flow_error *error)
2464 {
2465         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2466         struct rte_flow *flow = NULL;
2467         int rc;
2468
2469         flow = sfc_flow_zmalloc(error);
2470         if (flow == NULL)
2471                 goto fail_no_mem;
2472
2473         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2474         if (rc != 0)
2475                 goto fail_bad_value;
2476
2477         sfc_adapter_lock(sa);
2478
2479         TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2480
2481         if (sa->state == SFC_ADAPTER_STARTED) {
2482                 rc = sfc_flow_insert(sa, flow, error);
2483                 if (rc != 0)
2484                         goto fail_flow_insert;
2485         }
2486
2487         sfc_adapter_unlock(sa);
2488
2489         return flow;
2490
2491 fail_flow_insert:
2492         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2493
2494 fail_bad_value:
2495         sfc_flow_free(sa, flow);
2496         sfc_adapter_unlock(sa);
2497
2498 fail_no_mem:
2499         return NULL;
2500 }
2501
2502 static int
2503 sfc_flow_destroy(struct rte_eth_dev *dev,
2504                  struct rte_flow *flow,
2505                  struct rte_flow_error *error)
2506 {
2507         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2508         struct rte_flow *flow_ptr;
2509         int rc = EINVAL;
2510
2511         sfc_adapter_lock(sa);
2512
2513         TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2514                 if (flow_ptr == flow)
2515                         rc = 0;
2516         }
2517         if (rc != 0) {
2518                 rte_flow_error_set(error, rc,
2519                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2520                                    "Failed to find flow rule to destroy");
2521                 goto fail_bad_value;
2522         }
2523
2524         if (sa->state == SFC_ADAPTER_STARTED)
2525                 rc = sfc_flow_remove(sa, flow, error);
2526
2527         TAILQ_REMOVE(&sa->flow_list, flow, entries);
2528         sfc_flow_free(sa, flow);
2529
2530 fail_bad_value:
2531         sfc_adapter_unlock(sa);
2532
2533         return -rc;
2534 }
2535
2536 static int
2537 sfc_flow_flush(struct rte_eth_dev *dev,
2538                struct rte_flow_error *error)
2539 {
2540         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2541         struct rte_flow *flow;
2542         int ret = 0;
2543
2544         sfc_adapter_lock(sa);
2545
2546         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2547                 if (sa->state == SFC_ADAPTER_STARTED) {
2548                         int rc;
2549
2550                         rc = sfc_flow_remove(sa, flow, error);
2551                         if (rc != 0)
2552                                 ret = rc;
2553                 }
2554
2555                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2556                 sfc_flow_free(sa, flow);
2557         }
2558
2559         sfc_adapter_unlock(sa);
2560
2561         return -ret;
2562 }
2563
2564 static int
2565 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2566                  struct rte_flow_error *error)
2567 {
2568         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2569         int ret = 0;
2570
2571         sfc_adapter_lock(sa);
2572         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2573                 rte_flow_error_set(error, EBUSY,
2574                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2575                                    NULL, "please close the port first");
2576                 ret = -rte_errno;
2577         } else {
2578                 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2579         }
2580         sfc_adapter_unlock(sa);
2581
2582         return ret;
2583 }
2584
2585 const struct rte_flow_ops sfc_flow_ops = {
2586         .validate = sfc_flow_validate,
2587         .create = sfc_flow_create,
2588         .destroy = sfc_flow_destroy,
2589         .flush = sfc_flow_flush,
2590         .query = NULL,
2591         .isolate = sfc_flow_isolate,
2592 };
2593
2594 void
2595 sfc_flow_init(struct sfc_adapter *sa)
2596 {
2597         SFC_ASSERT(sfc_adapter_is_locked(sa));
2598
2599         TAILQ_INIT(&sa->flow_list);
2600 }
2601
2602 void
2603 sfc_flow_fini(struct sfc_adapter *sa)
2604 {
2605         struct rte_flow *flow;
2606
2607         SFC_ASSERT(sfc_adapter_is_locked(sa));
2608
2609         while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2610                 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2611                 sfc_flow_free(sa, flow);
2612         }
2613 }
2614
2615 void
2616 sfc_flow_stop(struct sfc_adapter *sa)
2617 {
2618         struct rte_flow *flow;
2619
2620         SFC_ASSERT(sfc_adapter_is_locked(sa));
2621
2622         TAILQ_FOREACH(flow, &sa->flow_list, entries)
2623                 sfc_flow_remove(sa, flow, NULL);
2624 }
2625
2626 int
2627 sfc_flow_start(struct sfc_adapter *sa)
2628 {
2629         struct rte_flow *flow;
2630         int rc = 0;
2631
2632         sfc_log_init(sa, "entry");
2633
2634         SFC_ASSERT(sfc_adapter_is_locked(sa));
2635
2636         TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2637                 rc = sfc_flow_insert(sa, flow, NULL);
2638                 if (rc != 0)
2639                         goto fail_bad_flow;
2640         }
2641
2642         sfc_log_init(sa, "done");
2643
2644 fail_bad_flow:
2645         return rc;
2646 }