net/sfc: multiply of specs w/o inner frame destination MAC
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25
26 /*
27  * At now flow API is implemented in such a manner that each
28  * flow rule is converted to one or more hardware filters.
29  * All elements of flow rule (attributes, pattern items, actions)
30  * correspond to one or more fields in the efx_filter_spec_s structure
31  * that is responsible for the hardware filter.
32  * If some required field is unset in the flow rule, then a handful
33  * of filter copies will be created to cover all possible values
34  * of such a field.
35  */
36
37 enum sfc_flow_item_layers {
38         SFC_FLOW_ITEM_ANY_LAYER,
39         SFC_FLOW_ITEM_START_LAYER,
40         SFC_FLOW_ITEM_L2,
41         SFC_FLOW_ITEM_L3,
42         SFC_FLOW_ITEM_L4,
43 };
44
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46                                   efx_filter_spec_t *spec,
47                                   struct rte_flow_error *error);
48
49 struct sfc_flow_item {
50         enum rte_flow_item_type type;           /* Type of item */
51         enum sfc_flow_item_layers layer;        /* Layer of item */
52         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
53         sfc_flow_item_parse *parse;             /* Parsing function */
54 };
55
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
66
67 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
68                                      unsigned int filters_count_for_one_val,
69                                      struct rte_flow_error *error);
70
71 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
72                                         efx_filter_spec_t *spec,
73                                         struct sfc_filter *filter);
74
75 struct sfc_flow_copy_flag {
76         /* EFX filter specification match flag */
77         efx_filter_match_flags_t flag;
78         /* Number of values of corresponding field */
79         unsigned int vals_count;
80         /* Function to set values in specifications */
81         sfc_flow_spec_set_vals *set_vals;
82         /*
83          * Function to check that the specification is suitable
84          * for adding this match flag
85          */
86         sfc_flow_spec_check *spec_check;
87 };
88
89 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
90 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
91 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
92
93 static boolean_t
94 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
95 {
96         uint8_t sum = 0;
97         unsigned int i;
98
99         for (i = 0; i < size; i++)
100                 sum |= buf[i];
101
102         return (sum == 0) ? B_TRUE : B_FALSE;
103 }
104
105 /*
106  * Validate item and prepare structures spec and mask for parsing
107  */
108 static int
109 sfc_flow_parse_init(const struct rte_flow_item *item,
110                     const void **spec_ptr,
111                     const void **mask_ptr,
112                     const void *supp_mask,
113                     const void *def_mask,
114                     unsigned int size,
115                     struct rte_flow_error *error)
116 {
117         const uint8_t *spec;
118         const uint8_t *mask;
119         const uint8_t *last;
120         uint8_t match;
121         uint8_t supp;
122         unsigned int i;
123
124         if (item == NULL) {
125                 rte_flow_error_set(error, EINVAL,
126                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
127                                    "NULL item");
128                 return -rte_errno;
129         }
130
131         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
132                 rte_flow_error_set(error, EINVAL,
133                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
134                                    "Mask or last is set without spec");
135                 return -rte_errno;
136         }
137
138         /*
139          * If "mask" is not set, default mask is used,
140          * but if default mask is NULL, "mask" should be set
141          */
142         if (item->mask == NULL) {
143                 if (def_mask == NULL) {
144                         rte_flow_error_set(error, EINVAL,
145                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
146                                 "Mask should be specified");
147                         return -rte_errno;
148                 }
149
150                 mask = def_mask;
151         } else {
152                 mask = item->mask;
153         }
154
155         spec = item->spec;
156         last = item->last;
157
158         if (spec == NULL)
159                 goto exit;
160
161         /*
162          * If field values in "last" are either 0 or equal to the corresponding
163          * values in "spec" then they are ignored
164          */
165         if (last != NULL &&
166             !sfc_flow_is_zero(last, size) &&
167             memcmp(last, spec, size) != 0) {
168                 rte_flow_error_set(error, ENOTSUP,
169                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
170                                    "Ranging is not supported");
171                 return -rte_errno;
172         }
173
174         if (supp_mask == NULL) {
175                 rte_flow_error_set(error, EINVAL,
176                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
177                         "Supported mask for item should be specified");
178                 return -rte_errno;
179         }
180
181         /* Check that mask and spec not asks for more match than supp_mask */
182         for (i = 0; i < size; i++) {
183                 match = spec[i] | mask[i];
184                 supp = ((const uint8_t *)supp_mask)[i];
185
186                 if ((match | supp) != supp) {
187                         rte_flow_error_set(error, ENOTSUP,
188                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
189                                            "Item's field is not supported");
190                         return -rte_errno;
191                 }
192         }
193
194 exit:
195         *spec_ptr = spec;
196         *mask_ptr = mask;
197         return 0;
198 }
199
200 /*
201  * Protocol parsers.
202  * Masking is not supported, so masks in items should be either
203  * full or empty (zeroed) and set only for supported fields which
204  * are specified in the supp_mask.
205  */
206
207 static int
208 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
209                     __rte_unused efx_filter_spec_t *efx_spec,
210                     __rte_unused struct rte_flow_error *error)
211 {
212         return 0;
213 }
214
215 /**
216  * Convert Ethernet item to EFX filter specification.
217  *
218  * @param item[in]
219  *   Item specification. Outer frame specification may only comprise
220  *   source/destination addresses and Ethertype field.
221  *   Inner frame specification may contain destination address only.
222  *   There is support for individual/group mask as well as for empty and full.
223  *   If the mask is NULL, default mask will be used. Ranging is not supported.
224  * @param efx_spec[in, out]
225  *   EFX filter specification to update.
226  * @param[out] error
227  *   Perform verbose error reporting if not NULL.
228  */
229 static int
230 sfc_flow_parse_eth(const struct rte_flow_item *item,
231                    efx_filter_spec_t *efx_spec,
232                    struct rte_flow_error *error)
233 {
234         int rc;
235         const struct rte_flow_item_eth *spec = NULL;
236         const struct rte_flow_item_eth *mask = NULL;
237         const struct rte_flow_item_eth supp_mask = {
238                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
239                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
240                 .type = 0xffff,
241         };
242         const struct rte_flow_item_eth ifrm_supp_mask = {
243                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
244         };
245         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
246                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
247         };
248         const struct rte_flow_item_eth *supp_mask_p;
249         const struct rte_flow_item_eth *def_mask_p;
250         uint8_t *loc_mac = NULL;
251         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
252                 EFX_TUNNEL_PROTOCOL_NONE);
253
254         if (is_ifrm) {
255                 supp_mask_p = &ifrm_supp_mask;
256                 def_mask_p = &ifrm_supp_mask;
257                 loc_mac = efx_spec->efs_ifrm_loc_mac;
258         } else {
259                 supp_mask_p = &supp_mask;
260                 def_mask_p = &rte_flow_item_eth_mask;
261                 loc_mac = efx_spec->efs_loc_mac;
262         }
263
264         rc = sfc_flow_parse_init(item,
265                                  (const void **)&spec,
266                                  (const void **)&mask,
267                                  supp_mask_p, def_mask_p,
268                                  sizeof(struct rte_flow_item_eth),
269                                  error);
270         if (rc != 0)
271                 return rc;
272
273         /* If "spec" is not set, could be any Ethernet */
274         if (spec == NULL)
275                 return 0;
276
277         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
278                 efx_spec->efs_match_flags |= is_ifrm ?
279                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
280                         EFX_FILTER_MATCH_LOC_MAC;
281                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
282                            EFX_MAC_ADDR_LEN);
283         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
284                           EFX_MAC_ADDR_LEN) == 0) {
285                 if (is_unicast_ether_addr(&spec->dst))
286                         efx_spec->efs_match_flags |= is_ifrm ?
287                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
288                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
289                 else
290                         efx_spec->efs_match_flags |= is_ifrm ?
291                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
292                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
293         } else if (!is_zero_ether_addr(&mask->dst)) {
294                 goto fail_bad_mask;
295         }
296
297         /*
298          * ifrm_supp_mask ensures that the source address and
299          * ethertype masks are equal to zero in inner frame,
300          * so these fields are filled in only for the outer frame
301          */
302         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
303                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
304                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
305                            EFX_MAC_ADDR_LEN);
306         } else if (!is_zero_ether_addr(&mask->src)) {
307                 goto fail_bad_mask;
308         }
309
310         /*
311          * Ether type is in big-endian byte order in item and
312          * in little-endian in efx_spec, so byte swap is used
313          */
314         if (mask->type == supp_mask.type) {
315                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
316                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
317         } else if (mask->type != 0) {
318                 goto fail_bad_mask;
319         }
320
321         return 0;
322
323 fail_bad_mask:
324         rte_flow_error_set(error, EINVAL,
325                            RTE_FLOW_ERROR_TYPE_ITEM, item,
326                            "Bad mask in the ETH pattern item");
327         return -rte_errno;
328 }
329
330 /**
331  * Convert VLAN item to EFX filter specification.
332  *
333  * @param item[in]
334  *   Item specification. Only VID field is supported.
335  *   The mask can not be NULL. Ranging is not supported.
336  * @param efx_spec[in, out]
337  *   EFX filter specification to update.
338  * @param[out] error
339  *   Perform verbose error reporting if not NULL.
340  */
341 static int
342 sfc_flow_parse_vlan(const struct rte_flow_item *item,
343                     efx_filter_spec_t *efx_spec,
344                     struct rte_flow_error *error)
345 {
346         int rc;
347         uint16_t vid;
348         const struct rte_flow_item_vlan *spec = NULL;
349         const struct rte_flow_item_vlan *mask = NULL;
350         const struct rte_flow_item_vlan supp_mask = {
351                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
352         };
353
354         rc = sfc_flow_parse_init(item,
355                                  (const void **)&spec,
356                                  (const void **)&mask,
357                                  &supp_mask,
358                                  NULL,
359                                  sizeof(struct rte_flow_item_vlan),
360                                  error);
361         if (rc != 0)
362                 return rc;
363
364         /*
365          * VID is in big-endian byte order in item and
366          * in little-endian in efx_spec, so byte swap is used.
367          * If two VLAN items are included, the first matches
368          * the outer tag and the next matches the inner tag.
369          */
370         if (mask->tci == supp_mask.tci) {
371                 vid = rte_bswap16(spec->tci);
372
373                 if (!(efx_spec->efs_match_flags &
374                       EFX_FILTER_MATCH_OUTER_VID)) {
375                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
376                         efx_spec->efs_outer_vid = vid;
377                 } else if (!(efx_spec->efs_match_flags &
378                              EFX_FILTER_MATCH_INNER_VID)) {
379                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
380                         efx_spec->efs_inner_vid = vid;
381                 } else {
382                         rte_flow_error_set(error, EINVAL,
383                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
384                                            "More than two VLAN items");
385                         return -rte_errno;
386                 }
387         } else {
388                 rte_flow_error_set(error, EINVAL,
389                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
390                                    "VLAN ID in TCI match is required");
391                 return -rte_errno;
392         }
393
394         return 0;
395 }
396
397 /**
398  * Convert IPv4 item to EFX filter specification.
399  *
400  * @param item[in]
401  *   Item specification. Only source and destination addresses and
402  *   protocol fields are supported. If the mask is NULL, default
403  *   mask will be used. Ranging is not supported.
404  * @param efx_spec[in, out]
405  *   EFX filter specification to update.
406  * @param[out] error
407  *   Perform verbose error reporting if not NULL.
408  */
409 static int
410 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
411                     efx_filter_spec_t *efx_spec,
412                     struct rte_flow_error *error)
413 {
414         int rc;
415         const struct rte_flow_item_ipv4 *spec = NULL;
416         const struct rte_flow_item_ipv4 *mask = NULL;
417         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
418         const struct rte_flow_item_ipv4 supp_mask = {
419                 .hdr = {
420                         .src_addr = 0xffffffff,
421                         .dst_addr = 0xffffffff,
422                         .next_proto_id = 0xff,
423                 }
424         };
425
426         rc = sfc_flow_parse_init(item,
427                                  (const void **)&spec,
428                                  (const void **)&mask,
429                                  &supp_mask,
430                                  &rte_flow_item_ipv4_mask,
431                                  sizeof(struct rte_flow_item_ipv4),
432                                  error);
433         if (rc != 0)
434                 return rc;
435
436         /*
437          * Filtering by IPv4 source and destination addresses requires
438          * the appropriate ETHER_TYPE in hardware filters
439          */
440         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
441                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
442                 efx_spec->efs_ether_type = ether_type_ipv4;
443         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
444                 rte_flow_error_set(error, EINVAL,
445                         RTE_FLOW_ERROR_TYPE_ITEM, item,
446                         "Ethertype in pattern with IPV4 item should be appropriate");
447                 return -rte_errno;
448         }
449
450         if (spec == NULL)
451                 return 0;
452
453         /*
454          * IPv4 addresses are in big-endian byte order in item and in
455          * efx_spec
456          */
457         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
458                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
459                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
460         } else if (mask->hdr.src_addr != 0) {
461                 goto fail_bad_mask;
462         }
463
464         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
465                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
466                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
467         } else if (mask->hdr.dst_addr != 0) {
468                 goto fail_bad_mask;
469         }
470
471         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
472                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
473                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
474         } else if (mask->hdr.next_proto_id != 0) {
475                 goto fail_bad_mask;
476         }
477
478         return 0;
479
480 fail_bad_mask:
481         rte_flow_error_set(error, EINVAL,
482                            RTE_FLOW_ERROR_TYPE_ITEM, item,
483                            "Bad mask in the IPV4 pattern item");
484         return -rte_errno;
485 }
486
487 /**
488  * Convert IPv6 item to EFX filter specification.
489  *
490  * @param item[in]
491  *   Item specification. Only source and destination addresses and
492  *   next header fields are supported. If the mask is NULL, default
493  *   mask will be used. Ranging is not supported.
494  * @param efx_spec[in, out]
495  *   EFX filter specification to update.
496  * @param[out] error
497  *   Perform verbose error reporting if not NULL.
498  */
499 static int
500 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
501                     efx_filter_spec_t *efx_spec,
502                     struct rte_flow_error *error)
503 {
504         int rc;
505         const struct rte_flow_item_ipv6 *spec = NULL;
506         const struct rte_flow_item_ipv6 *mask = NULL;
507         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
508         const struct rte_flow_item_ipv6 supp_mask = {
509                 .hdr = {
510                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
511                                       0xff, 0xff, 0xff, 0xff,
512                                       0xff, 0xff, 0xff, 0xff,
513                                       0xff, 0xff, 0xff, 0xff },
514                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
515                                       0xff, 0xff, 0xff, 0xff,
516                                       0xff, 0xff, 0xff, 0xff,
517                                       0xff, 0xff, 0xff, 0xff },
518                         .proto = 0xff,
519                 }
520         };
521
522         rc = sfc_flow_parse_init(item,
523                                  (const void **)&spec,
524                                  (const void **)&mask,
525                                  &supp_mask,
526                                  &rte_flow_item_ipv6_mask,
527                                  sizeof(struct rte_flow_item_ipv6),
528                                  error);
529         if (rc != 0)
530                 return rc;
531
532         /*
533          * Filtering by IPv6 source and destination addresses requires
534          * the appropriate ETHER_TYPE in hardware filters
535          */
536         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
537                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
538                 efx_spec->efs_ether_type = ether_type_ipv6;
539         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
540                 rte_flow_error_set(error, EINVAL,
541                         RTE_FLOW_ERROR_TYPE_ITEM, item,
542                         "Ethertype in pattern with IPV6 item should be appropriate");
543                 return -rte_errno;
544         }
545
546         if (spec == NULL)
547                 return 0;
548
549         /*
550          * IPv6 addresses are in big-endian byte order in item and in
551          * efx_spec
552          */
553         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
554                    sizeof(mask->hdr.src_addr)) == 0) {
555                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
556
557                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
558                                  sizeof(spec->hdr.src_addr));
559                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
560                            sizeof(efx_spec->efs_rem_host));
561         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
562                                      sizeof(mask->hdr.src_addr))) {
563                 goto fail_bad_mask;
564         }
565
566         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
567                    sizeof(mask->hdr.dst_addr)) == 0) {
568                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
569
570                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
571                                  sizeof(spec->hdr.dst_addr));
572                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
573                            sizeof(efx_spec->efs_loc_host));
574         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
575                                      sizeof(mask->hdr.dst_addr))) {
576                 goto fail_bad_mask;
577         }
578
579         if (mask->hdr.proto == supp_mask.hdr.proto) {
580                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
581                 efx_spec->efs_ip_proto = spec->hdr.proto;
582         } else if (mask->hdr.proto != 0) {
583                 goto fail_bad_mask;
584         }
585
586         return 0;
587
588 fail_bad_mask:
589         rte_flow_error_set(error, EINVAL,
590                            RTE_FLOW_ERROR_TYPE_ITEM, item,
591                            "Bad mask in the IPV6 pattern item");
592         return -rte_errno;
593 }
594
595 /**
596  * Convert TCP item to EFX filter specification.
597  *
598  * @param item[in]
599  *   Item specification. Only source and destination ports fields
600  *   are supported. If the mask is NULL, default mask will be used.
601  *   Ranging is not supported.
602  * @param efx_spec[in, out]
603  *   EFX filter specification to update.
604  * @param[out] error
605  *   Perform verbose error reporting if not NULL.
606  */
607 static int
608 sfc_flow_parse_tcp(const struct rte_flow_item *item,
609                    efx_filter_spec_t *efx_spec,
610                    struct rte_flow_error *error)
611 {
612         int rc;
613         const struct rte_flow_item_tcp *spec = NULL;
614         const struct rte_flow_item_tcp *mask = NULL;
615         const struct rte_flow_item_tcp supp_mask = {
616                 .hdr = {
617                         .src_port = 0xffff,
618                         .dst_port = 0xffff,
619                 }
620         };
621
622         rc = sfc_flow_parse_init(item,
623                                  (const void **)&spec,
624                                  (const void **)&mask,
625                                  &supp_mask,
626                                  &rte_flow_item_tcp_mask,
627                                  sizeof(struct rte_flow_item_tcp),
628                                  error);
629         if (rc != 0)
630                 return rc;
631
632         /*
633          * Filtering by TCP source and destination ports requires
634          * the appropriate IP_PROTO in hardware filters
635          */
636         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
637                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
638                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
639         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
640                 rte_flow_error_set(error, EINVAL,
641                         RTE_FLOW_ERROR_TYPE_ITEM, item,
642                         "IP proto in pattern with TCP item should be appropriate");
643                 return -rte_errno;
644         }
645
646         if (spec == NULL)
647                 return 0;
648
649         /*
650          * Source and destination ports are in big-endian byte order in item and
651          * in little-endian in efx_spec, so byte swap is used
652          */
653         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
654                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
655                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
656         } else if (mask->hdr.src_port != 0) {
657                 goto fail_bad_mask;
658         }
659
660         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
661                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
662                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
663         } else if (mask->hdr.dst_port != 0) {
664                 goto fail_bad_mask;
665         }
666
667         return 0;
668
669 fail_bad_mask:
670         rte_flow_error_set(error, EINVAL,
671                            RTE_FLOW_ERROR_TYPE_ITEM, item,
672                            "Bad mask in the TCP pattern item");
673         return -rte_errno;
674 }
675
676 /**
677  * Convert UDP item to EFX filter specification.
678  *
679  * @param item[in]
680  *   Item specification. Only source and destination ports fields
681  *   are supported. If the mask is NULL, default mask will be used.
682  *   Ranging is not supported.
683  * @param efx_spec[in, out]
684  *   EFX filter specification to update.
685  * @param[out] error
686  *   Perform verbose error reporting if not NULL.
687  */
688 static int
689 sfc_flow_parse_udp(const struct rte_flow_item *item,
690                    efx_filter_spec_t *efx_spec,
691                    struct rte_flow_error *error)
692 {
693         int rc;
694         const struct rte_flow_item_udp *spec = NULL;
695         const struct rte_flow_item_udp *mask = NULL;
696         const struct rte_flow_item_udp supp_mask = {
697                 .hdr = {
698                         .src_port = 0xffff,
699                         .dst_port = 0xffff,
700                 }
701         };
702
703         rc = sfc_flow_parse_init(item,
704                                  (const void **)&spec,
705                                  (const void **)&mask,
706                                  &supp_mask,
707                                  &rte_flow_item_udp_mask,
708                                  sizeof(struct rte_flow_item_udp),
709                                  error);
710         if (rc != 0)
711                 return rc;
712
713         /*
714          * Filtering by UDP source and destination ports requires
715          * the appropriate IP_PROTO in hardware filters
716          */
717         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
718                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
719                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
720         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
721                 rte_flow_error_set(error, EINVAL,
722                         RTE_FLOW_ERROR_TYPE_ITEM, item,
723                         "IP proto in pattern with UDP item should be appropriate");
724                 return -rte_errno;
725         }
726
727         if (spec == NULL)
728                 return 0;
729
730         /*
731          * Source and destination ports are in big-endian byte order in item and
732          * in little-endian in efx_spec, so byte swap is used
733          */
734         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
735                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
736                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
737         } else if (mask->hdr.src_port != 0) {
738                 goto fail_bad_mask;
739         }
740
741         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
742                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
743                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
744         } else if (mask->hdr.dst_port != 0) {
745                 goto fail_bad_mask;
746         }
747
748         return 0;
749
750 fail_bad_mask:
751         rte_flow_error_set(error, EINVAL,
752                            RTE_FLOW_ERROR_TYPE_ITEM, item,
753                            "Bad mask in the UDP pattern item");
754         return -rte_errno;
755 }
756
757 /*
758  * Filters for encapsulated packets match based on the EtherType and IP
759  * protocol in the outer frame.
760  */
761 static int
762 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
763                                         efx_filter_spec_t *efx_spec,
764                                         uint8_t ip_proto,
765                                         struct rte_flow_error *error)
766 {
767         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
768                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
769                 efx_spec->efs_ip_proto = ip_proto;
770         } else if (efx_spec->efs_ip_proto != ip_proto) {
771                 switch (ip_proto) {
772                 case EFX_IPPROTO_UDP:
773                         rte_flow_error_set(error, EINVAL,
774                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
775                                 "Outer IP header protocol must be UDP "
776                                 "in VxLAN/GENEVE pattern");
777                         return -rte_errno;
778
779                 case EFX_IPPROTO_GRE:
780                         rte_flow_error_set(error, EINVAL,
781                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
782                                 "Outer IP header protocol must be GRE "
783                                 "in NVGRE pattern");
784                         return -rte_errno;
785
786                 default:
787                         rte_flow_error_set(error, EINVAL,
788                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
789                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
790                                 "are supported");
791                         return -rte_errno;
792                 }
793         }
794
795         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
796             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
797             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
798                 rte_flow_error_set(error, EINVAL,
799                         RTE_FLOW_ERROR_TYPE_ITEM, item,
800                         "Outer frame EtherType in pattern with tunneling "
801                         "must be IPv4 or IPv6");
802                 return -rte_errno;
803         }
804
805         return 0;
806 }
807
808 static int
809 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
810                                   const uint8_t *vni_or_vsid_val,
811                                   const uint8_t *vni_or_vsid_mask,
812                                   const struct rte_flow_item *item,
813                                   struct rte_flow_error *error)
814 {
815         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
816                 0xff, 0xff, 0xff
817         };
818
819         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
820                    EFX_VNI_OR_VSID_LEN) == 0) {
821                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
822                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
823                            EFX_VNI_OR_VSID_LEN);
824         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
825                 rte_flow_error_set(error, EINVAL,
826                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
827                                    "Unsupported VNI/VSID mask");
828                 return -rte_errno;
829         }
830
831         return 0;
832 }
833
834 /**
835  * Convert VXLAN item to EFX filter specification.
836  *
837  * @param item[in]
838  *   Item specification. Only VXLAN network identifier field is supported.
839  *   If the mask is NULL, default mask will be used.
840  *   Ranging is not supported.
841  * @param efx_spec[in, out]
842  *   EFX filter specification to update.
843  * @param[out] error
844  *   Perform verbose error reporting if not NULL.
845  */
846 static int
847 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
848                      efx_filter_spec_t *efx_spec,
849                      struct rte_flow_error *error)
850 {
851         int rc;
852         const struct rte_flow_item_vxlan *spec = NULL;
853         const struct rte_flow_item_vxlan *mask = NULL;
854         const struct rte_flow_item_vxlan supp_mask = {
855                 .vni = { 0xff, 0xff, 0xff }
856         };
857
858         rc = sfc_flow_parse_init(item,
859                                  (const void **)&spec,
860                                  (const void **)&mask,
861                                  &supp_mask,
862                                  &rte_flow_item_vxlan_mask,
863                                  sizeof(struct rte_flow_item_vxlan),
864                                  error);
865         if (rc != 0)
866                 return rc;
867
868         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
869                                                      EFX_IPPROTO_UDP, error);
870         if (rc != 0)
871                 return rc;
872
873         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
874         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
875
876         if (spec == NULL)
877                 return 0;
878
879         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
880                                                mask->vni, item, error);
881
882         return rc;
883 }
884
885 /**
886  * Convert GENEVE item to EFX filter specification.
887  *
888  * @param item[in]
889  *   Item specification. Only Virtual Network Identifier and protocol type
890  *   fields are supported. But protocol type can be only Ethernet (0x6558).
891  *   If the mask is NULL, default mask will be used.
892  *   Ranging is not supported.
893  * @param efx_spec[in, out]
894  *   EFX filter specification to update.
895  * @param[out] error
896  *   Perform verbose error reporting if not NULL.
897  */
898 static int
899 sfc_flow_parse_geneve(const struct rte_flow_item *item,
900                       efx_filter_spec_t *efx_spec,
901                       struct rte_flow_error *error)
902 {
903         int rc;
904         const struct rte_flow_item_geneve *spec = NULL;
905         const struct rte_flow_item_geneve *mask = NULL;
906         const struct rte_flow_item_geneve supp_mask = {
907                 .protocol = RTE_BE16(0xffff),
908                 .vni = { 0xff, 0xff, 0xff }
909         };
910
911         rc = sfc_flow_parse_init(item,
912                                  (const void **)&spec,
913                                  (const void **)&mask,
914                                  &supp_mask,
915                                  &rte_flow_item_geneve_mask,
916                                  sizeof(struct rte_flow_item_geneve),
917                                  error);
918         if (rc != 0)
919                 return rc;
920
921         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
922                                                      EFX_IPPROTO_UDP, error);
923         if (rc != 0)
924                 return rc;
925
926         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
927         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
928
929         if (spec == NULL)
930                 return 0;
931
932         if (mask->protocol == supp_mask.protocol) {
933                 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
934                         rte_flow_error_set(error, EINVAL,
935                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
936                                 "GENEVE encap. protocol must be Ethernet "
937                                 "(0x6558) in the GENEVE pattern item");
938                         return -rte_errno;
939                 }
940         } else if (mask->protocol != 0) {
941                 rte_flow_error_set(error, EINVAL,
942                         RTE_FLOW_ERROR_TYPE_ITEM, item,
943                         "Unsupported mask for GENEVE encap. protocol");
944                 return -rte_errno;
945         }
946
947         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
948                                                mask->vni, item, error);
949
950         return rc;
951 }
952
953 /**
954  * Convert NVGRE item to EFX filter specification.
955  *
956  * @param item[in]
957  *   Item specification. Only virtual subnet ID field is supported.
958  *   If the mask is NULL, default mask will be used.
959  *   Ranging is not supported.
960  * @param efx_spec[in, out]
961  *   EFX filter specification to update.
962  * @param[out] error
963  *   Perform verbose error reporting if not NULL.
964  */
965 static int
966 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
967                      efx_filter_spec_t *efx_spec,
968                      struct rte_flow_error *error)
969 {
970         int rc;
971         const struct rte_flow_item_nvgre *spec = NULL;
972         const struct rte_flow_item_nvgre *mask = NULL;
973         const struct rte_flow_item_nvgre supp_mask = {
974                 .tni = { 0xff, 0xff, 0xff }
975         };
976
977         rc = sfc_flow_parse_init(item,
978                                  (const void **)&spec,
979                                  (const void **)&mask,
980                                  &supp_mask,
981                                  &rte_flow_item_nvgre_mask,
982                                  sizeof(struct rte_flow_item_nvgre),
983                                  error);
984         if (rc != 0)
985                 return rc;
986
987         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
988                                                      EFX_IPPROTO_GRE, error);
989         if (rc != 0)
990                 return rc;
991
992         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
993         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
994
995         if (spec == NULL)
996                 return 0;
997
998         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
999                                                mask->tni, item, error);
1000
1001         return rc;
1002 }
1003
1004 static const struct sfc_flow_item sfc_flow_items[] = {
1005         {
1006                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1007                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1008                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1009                 .parse = sfc_flow_parse_void,
1010         },
1011         {
1012                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1013                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1014                 .layer = SFC_FLOW_ITEM_L2,
1015                 .parse = sfc_flow_parse_eth,
1016         },
1017         {
1018                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1019                 .prev_layer = SFC_FLOW_ITEM_L2,
1020                 .layer = SFC_FLOW_ITEM_L2,
1021                 .parse = sfc_flow_parse_vlan,
1022         },
1023         {
1024                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1025                 .prev_layer = SFC_FLOW_ITEM_L2,
1026                 .layer = SFC_FLOW_ITEM_L3,
1027                 .parse = sfc_flow_parse_ipv4,
1028         },
1029         {
1030                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1031                 .prev_layer = SFC_FLOW_ITEM_L2,
1032                 .layer = SFC_FLOW_ITEM_L3,
1033                 .parse = sfc_flow_parse_ipv6,
1034         },
1035         {
1036                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1037                 .prev_layer = SFC_FLOW_ITEM_L3,
1038                 .layer = SFC_FLOW_ITEM_L4,
1039                 .parse = sfc_flow_parse_tcp,
1040         },
1041         {
1042                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1043                 .prev_layer = SFC_FLOW_ITEM_L3,
1044                 .layer = SFC_FLOW_ITEM_L4,
1045                 .parse = sfc_flow_parse_udp,
1046         },
1047         {
1048                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1049                 .prev_layer = SFC_FLOW_ITEM_L4,
1050                 .layer = SFC_FLOW_ITEM_START_LAYER,
1051                 .parse = sfc_flow_parse_vxlan,
1052         },
1053         {
1054                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1055                 .prev_layer = SFC_FLOW_ITEM_L4,
1056                 .layer = SFC_FLOW_ITEM_START_LAYER,
1057                 .parse = sfc_flow_parse_geneve,
1058         },
1059         {
1060                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1061                 .prev_layer = SFC_FLOW_ITEM_L3,
1062                 .layer = SFC_FLOW_ITEM_START_LAYER,
1063                 .parse = sfc_flow_parse_nvgre,
1064         },
1065 };
1066
1067 /*
1068  * Protocol-independent flow API support
1069  */
1070 static int
1071 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1072                     struct rte_flow *flow,
1073                     struct rte_flow_error *error)
1074 {
1075         if (attr == NULL) {
1076                 rte_flow_error_set(error, EINVAL,
1077                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1078                                    "NULL attribute");
1079                 return -rte_errno;
1080         }
1081         if (attr->group != 0) {
1082                 rte_flow_error_set(error, ENOTSUP,
1083                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1084                                    "Groups are not supported");
1085                 return -rte_errno;
1086         }
1087         if (attr->priority != 0) {
1088                 rte_flow_error_set(error, ENOTSUP,
1089                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1090                                    "Priorities are not supported");
1091                 return -rte_errno;
1092         }
1093         if (attr->egress != 0) {
1094                 rte_flow_error_set(error, ENOTSUP,
1095                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1096                                    "Egress is not supported");
1097                 return -rte_errno;
1098         }
1099         if (attr->ingress == 0) {
1100                 rte_flow_error_set(error, ENOTSUP,
1101                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1102                                    "Only ingress is supported");
1103                 return -rte_errno;
1104         }
1105
1106         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1107         flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1108
1109         return 0;
1110 }
1111
1112 /* Get item from array sfc_flow_items */
1113 static const struct sfc_flow_item *
1114 sfc_flow_get_item(enum rte_flow_item_type type)
1115 {
1116         unsigned int i;
1117
1118         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1119                 if (sfc_flow_items[i].type == type)
1120                         return &sfc_flow_items[i];
1121
1122         return NULL;
1123 }
1124
1125 static int
1126 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1127                        struct rte_flow *flow,
1128                        struct rte_flow_error *error)
1129 {
1130         int rc;
1131         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1132         boolean_t is_ifrm = B_FALSE;
1133         const struct sfc_flow_item *item;
1134
1135         if (pattern == NULL) {
1136                 rte_flow_error_set(error, EINVAL,
1137                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1138                                    "NULL pattern");
1139                 return -rte_errno;
1140         }
1141
1142         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1143                 item = sfc_flow_get_item(pattern->type);
1144                 if (item == NULL) {
1145                         rte_flow_error_set(error, ENOTSUP,
1146                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1147                                            "Unsupported pattern item");
1148                         return -rte_errno;
1149                 }
1150
1151                 /*
1152                  * Omitting one or several protocol layers at the beginning
1153                  * of pattern is supported
1154                  */
1155                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1156                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1157                     item->prev_layer != prev_layer) {
1158                         rte_flow_error_set(error, ENOTSUP,
1159                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1160                                            "Unexpected sequence of pattern items");
1161                         return -rte_errno;
1162                 }
1163
1164                 /*
1165                  * Allow only VOID and ETH pattern items in the inner frame.
1166                  * Also check that there is only one tunneling protocol.
1167                  */
1168                 switch (item->type) {
1169                 case RTE_FLOW_ITEM_TYPE_VOID:
1170                 case RTE_FLOW_ITEM_TYPE_ETH:
1171                         break;
1172
1173                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1174                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1175                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1176                         if (is_ifrm) {
1177                                 rte_flow_error_set(error, EINVAL,
1178                                         RTE_FLOW_ERROR_TYPE_ITEM,
1179                                         pattern,
1180                                         "More than one tunneling protocol");
1181                                 return -rte_errno;
1182                         }
1183                         is_ifrm = B_TRUE;
1184                         break;
1185
1186                 default:
1187                         if (is_ifrm) {
1188                                 rte_flow_error_set(error, EINVAL,
1189                                         RTE_FLOW_ERROR_TYPE_ITEM,
1190                                         pattern,
1191                                         "There is an unsupported pattern item "
1192                                         "in the inner frame");
1193                                 return -rte_errno;
1194                         }
1195                         break;
1196                 }
1197
1198                 rc = item->parse(pattern, &flow->spec.template, error);
1199                 if (rc != 0)
1200                         return rc;
1201
1202                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1203                         prev_layer = item->layer;
1204         }
1205
1206         return 0;
1207 }
1208
1209 static int
1210 sfc_flow_parse_queue(struct sfc_adapter *sa,
1211                      const struct rte_flow_action_queue *queue,
1212                      struct rte_flow *flow)
1213 {
1214         struct sfc_rxq *rxq;
1215
1216         if (queue->index >= sa->rxq_count)
1217                 return -EINVAL;
1218
1219         rxq = sa->rxq_info[queue->index].rxq;
1220         flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1221
1222         return 0;
1223 }
1224
1225 #if EFSYS_OPT_RX_SCALE
1226 static int
1227 sfc_flow_parse_rss(struct sfc_adapter *sa,
1228                    const struct rte_flow_action_rss *rss,
1229                    struct rte_flow *flow)
1230 {
1231         unsigned int rxq_sw_index;
1232         struct sfc_rxq *rxq;
1233         unsigned int rxq_hw_index_min;
1234         unsigned int rxq_hw_index_max;
1235         const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1236         uint64_t rss_hf;
1237         uint8_t *rss_key = NULL;
1238         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1239         unsigned int i;
1240
1241         if (rss->num == 0)
1242                 return -EINVAL;
1243
1244         rxq_sw_index = sa->rxq_count - 1;
1245         rxq = sa->rxq_info[rxq_sw_index].rxq;
1246         rxq_hw_index_min = rxq->hw_index;
1247         rxq_hw_index_max = 0;
1248
1249         for (i = 0; i < rss->num; ++i) {
1250                 rxq_sw_index = rss->queue[i];
1251
1252                 if (rxq_sw_index >= sa->rxq_count)
1253                         return -EINVAL;
1254
1255                 rxq = sa->rxq_info[rxq_sw_index].rxq;
1256
1257                 if (rxq->hw_index < rxq_hw_index_min)
1258                         rxq_hw_index_min = rxq->hw_index;
1259
1260                 if (rxq->hw_index > rxq_hw_index_max)
1261                         rxq_hw_index_max = rxq->hw_index;
1262         }
1263
1264         rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1265         if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1266                 return -EINVAL;
1267
1268         if (rss_conf != NULL) {
1269                 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1270                         return -EINVAL;
1271
1272                 rss_key = rss_conf->rss_key;
1273         } else {
1274                 rss_key = sa->rss_key;
1275         }
1276
1277         flow->rss = B_TRUE;
1278
1279         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1280         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1281         sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1282         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1283
1284         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1285                 unsigned int rxq_sw_index = rss->queue[i % rss->num];
1286                 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1287
1288                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1289         }
1290
1291         return 0;
1292 }
1293 #endif /* EFSYS_OPT_RX_SCALE */
1294
1295 static int
1296 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1297                     unsigned int filters_count)
1298 {
1299         unsigned int i;
1300         int ret = 0;
1301
1302         for (i = 0; i < filters_count; i++) {
1303                 int rc;
1304
1305                 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1306                 if (ret == 0 && rc != 0) {
1307                         sfc_err(sa, "failed to remove filter specification "
1308                                 "(rc = %d)", rc);
1309                         ret = rc;
1310                 }
1311         }
1312
1313         return ret;
1314 }
1315
1316 static int
1317 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1318 {
1319         unsigned int i;
1320         int rc = 0;
1321
1322         for (i = 0; i < spec->count; i++) {
1323                 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1324                 if (rc != 0) {
1325                         sfc_flow_spec_flush(sa, spec, i);
1326                         break;
1327                 }
1328         }
1329
1330         return rc;
1331 }
1332
1333 static int
1334 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1335 {
1336         return sfc_flow_spec_flush(sa, spec, spec->count);
1337 }
1338
1339 static int
1340 sfc_flow_filter_insert(struct sfc_adapter *sa,
1341                        struct rte_flow *flow)
1342 {
1343 #if EFSYS_OPT_RX_SCALE
1344         struct sfc_flow_rss *rss = &flow->rss_conf;
1345         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1346         unsigned int i;
1347         int rc = 0;
1348
1349         if (flow->rss) {
1350                 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1351                                               rss->rxq_hw_index_min + 1,
1352                                               EFX_MAXRSS);
1353
1354                 rc = efx_rx_scale_context_alloc(sa->nic,
1355                                                 EFX_RX_SCALE_EXCLUSIVE,
1356                                                 rss_spread,
1357                                                 &efs_rss_context);
1358                 if (rc != 0)
1359                         goto fail_scale_context_alloc;
1360
1361                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1362                                            EFX_RX_HASHALG_TOEPLITZ,
1363                                            rss->rss_hash_types, B_TRUE);
1364                 if (rc != 0)
1365                         goto fail_scale_mode_set;
1366
1367                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1368                                           rss->rss_key,
1369                                           sizeof(sa->rss_key));
1370                 if (rc != 0)
1371                         goto fail_scale_key_set;
1372
1373                 /*
1374                  * At this point, fully elaborated filter specifications
1375                  * have been produced from the template. To make sure that
1376                  * RSS behaviour is consistent between them, set the same
1377                  * RSS context value everywhere.
1378                  */
1379                 for (i = 0; i < flow->spec.count; i++) {
1380                         efx_filter_spec_t *spec = &flow->spec.filters[i];
1381
1382                         spec->efs_rss_context = efs_rss_context;
1383                         spec->efs_dmaq_id = rss->rxq_hw_index_min;
1384                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1385                 }
1386         }
1387
1388         rc = sfc_flow_spec_insert(sa, &flow->spec);
1389         if (rc != 0)
1390                 goto fail_filter_insert;
1391
1392         if (flow->rss) {
1393                 /*
1394                  * Scale table is set after filter insertion because
1395                  * the table entries are relative to the base RxQ ID
1396                  * and the latter is submitted to the HW by means of
1397                  * inserting a filter, so by the time of the request
1398                  * the HW knows all the information needed to verify
1399                  * the table entries, and the operation will succeed
1400                  */
1401                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1402                                           rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1403                 if (rc != 0)
1404                         goto fail_scale_tbl_set;
1405         }
1406
1407         return 0;
1408
1409 fail_scale_tbl_set:
1410         sfc_flow_spec_remove(sa, &flow->spec);
1411
1412 fail_filter_insert:
1413 fail_scale_key_set:
1414 fail_scale_mode_set:
1415         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1416                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1417
1418 fail_scale_context_alloc:
1419         return rc;
1420 #else /* !EFSYS_OPT_RX_SCALE */
1421         return sfc_flow_spec_insert(sa, &flow->spec);
1422 #endif /* EFSYS_OPT_RX_SCALE */
1423 }
1424
1425 static int
1426 sfc_flow_filter_remove(struct sfc_adapter *sa,
1427                        struct rte_flow *flow)
1428 {
1429         int rc = 0;
1430
1431         rc = sfc_flow_spec_remove(sa, &flow->spec);
1432         if (rc != 0)
1433                 return rc;
1434
1435 #if EFSYS_OPT_RX_SCALE
1436         if (flow->rss) {
1437                 /*
1438                  * All specifications for a given flow rule have the same RSS
1439                  * context, so that RSS context value is taken from the first
1440                  * filter specification
1441                  */
1442                 efx_filter_spec_t *spec = &flow->spec.filters[0];
1443
1444                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1445         }
1446 #endif /* EFSYS_OPT_RX_SCALE */
1447
1448         return rc;
1449 }
1450
1451 static int
1452 sfc_flow_parse_actions(struct sfc_adapter *sa,
1453                        const struct rte_flow_action actions[],
1454                        struct rte_flow *flow,
1455                        struct rte_flow_error *error)
1456 {
1457         int rc;
1458         boolean_t is_specified = B_FALSE;
1459
1460         if (actions == NULL) {
1461                 rte_flow_error_set(error, EINVAL,
1462                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1463                                    "NULL actions");
1464                 return -rte_errno;
1465         }
1466
1467         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1468                 switch (actions->type) {
1469                 case RTE_FLOW_ACTION_TYPE_VOID:
1470                         break;
1471
1472                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1473                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1474                         if (rc != 0) {
1475                                 rte_flow_error_set(error, EINVAL,
1476                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1477                                         "Bad QUEUE action");
1478                                 return -rte_errno;
1479                         }
1480
1481                         is_specified = B_TRUE;
1482                         break;
1483
1484 #if EFSYS_OPT_RX_SCALE
1485                 case RTE_FLOW_ACTION_TYPE_RSS:
1486                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1487                         if (rc != 0) {
1488                                 rte_flow_error_set(error, rc,
1489                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1490                                         "Bad RSS action");
1491                                 return -rte_errno;
1492                         }
1493
1494                         is_specified = B_TRUE;
1495                         break;
1496 #endif /* EFSYS_OPT_RX_SCALE */
1497
1498                 default:
1499                         rte_flow_error_set(error, ENOTSUP,
1500                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1501                                            "Action is not supported");
1502                         return -rte_errno;
1503                 }
1504         }
1505
1506         if (!is_specified) {
1507                 rte_flow_error_set(error, EINVAL,
1508                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1509                                    "Action is unspecified");
1510                 return -rte_errno;
1511         }
1512
1513         return 0;
1514 }
1515
1516 /**
1517  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1518  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1519  * specifications after copying.
1520  *
1521  * @param spec[in, out]
1522  *   SFC flow specification to update.
1523  * @param filters_count_for_one_val[in]
1524  *   How many specifications should have the same EtherType value, what is the
1525  *   number of specifications before copying.
1526  * @param error[out]
1527  *   Perform verbose error reporting if not NULL.
1528  */
1529 static int
1530 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1531                         unsigned int filters_count_for_one_val,
1532                         struct rte_flow_error *error)
1533 {
1534         unsigned int i;
1535         static const uint16_t vals[] = {
1536                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1537         };
1538
1539         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1540                 rte_flow_error_set(error, EINVAL,
1541                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1542                         "Number of specifications is incorrect "
1543                         "while copying by Ethertype");
1544                 return -rte_errno;
1545         }
1546
1547         for (i = 0; i < spec->count; i++) {
1548                 spec->filters[i].efs_match_flags |=
1549                         EFX_FILTER_MATCH_ETHER_TYPE;
1550
1551                 /*
1552                  * The check above ensures that
1553                  * filters_count_for_one_val is not 0
1554                  */
1555                 spec->filters[i].efs_ether_type =
1556                         vals[i / filters_count_for_one_val];
1557         }
1558
1559         return 0;
1560 }
1561
1562 /**
1563  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1564  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1565  * specifications after copying.
1566  *
1567  * @param spec[in, out]
1568  *   SFC flow specification to update.
1569  * @param filters_count_for_one_val[in]
1570  *   How many specifications should have the same match flag, what is the
1571  *   number of specifications before copying.
1572  * @param error[out]
1573  *   Perform verbose error reporting if not NULL.
1574  */
1575 static int
1576 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1577                                     unsigned int filters_count_for_one_val,
1578                                     struct rte_flow_error *error)
1579 {
1580         unsigned int i;
1581         static const efx_filter_match_flags_t vals[] = {
1582                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1583                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1584         };
1585
1586         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1587                 rte_flow_error_set(error, EINVAL,
1588                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1589                         "Number of specifications is incorrect while copying "
1590                         "by inner frame unknown destination flags");
1591                 return -rte_errno;
1592         }
1593
1594         for (i = 0; i < spec->count; i++) {
1595                 /* The check above ensures that divisor can't be zero here */
1596                 spec->filters[i].efs_match_flags |=
1597                         vals[i / filters_count_for_one_val];
1598         }
1599
1600         return 0;
1601 }
1602
1603 /**
1604  * Check that the following conditions are met:
1605  * - the specification corresponds to a filter for encapsulated traffic
1606  * - the list of supported filters has a filter
1607  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1608  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1609  *   be inserted.
1610  *
1611  * @param match[in]
1612  *   The match flags of filter.
1613  * @param spec[in]
1614  *   Specification to be supplemented.
1615  * @param filter[in]
1616  *   SFC filter with list of supported filters.
1617  */
1618 static boolean_t
1619 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1620                                       efx_filter_spec_t *spec,
1621                                       struct sfc_filter *filter)
1622 {
1623         unsigned int i;
1624         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1625         efx_filter_match_flags_t match_mcast_dst;
1626
1627         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1628                 return B_FALSE;
1629
1630         match_mcast_dst =
1631                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1632                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1633         for (i = 0; i < filter->supported_match_num; i++) {
1634                 if (match_mcast_dst == filter->supported_match[i])
1635                         return B_TRUE;
1636         }
1637
1638         return B_FALSE;
1639 }
1640
1641 /* Match flags that can be automatically added to filters */
1642 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1643         {
1644                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1645                 .vals_count = 2,
1646                 .set_vals = sfc_flow_set_ethertypes,
1647                 .spec_check = NULL,
1648         },
1649         {
1650                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1651                 .vals_count = 2,
1652                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1653                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1654         },
1655 };
1656
1657 /* Get item from array sfc_flow_copy_flags */
1658 static const struct sfc_flow_copy_flag *
1659 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1660 {
1661         unsigned int i;
1662
1663         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1664                 if (sfc_flow_copy_flags[i].flag == flag)
1665                         return &sfc_flow_copy_flags[i];
1666         }
1667
1668         return NULL;
1669 }
1670
1671 /**
1672  * Make copies of the specifications, set match flag and values
1673  * of the field that corresponds to it.
1674  *
1675  * @param spec[in, out]
1676  *   SFC flow specification to update.
1677  * @param flag[in]
1678  *   The match flag to add.
1679  * @param error[out]
1680  *   Perform verbose error reporting if not NULL.
1681  */
1682 static int
1683 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1684                              efx_filter_match_flags_t flag,
1685                              struct rte_flow_error *error)
1686 {
1687         unsigned int i;
1688         unsigned int new_filters_count;
1689         unsigned int filters_count_for_one_val;
1690         const struct sfc_flow_copy_flag *copy_flag;
1691         int rc;
1692
1693         copy_flag = sfc_flow_get_copy_flag(flag);
1694         if (copy_flag == NULL) {
1695                 rte_flow_error_set(error, ENOTSUP,
1696                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1697                                    "Unsupported spec field for copying");
1698                 return -rte_errno;
1699         }
1700
1701         new_filters_count = spec->count * copy_flag->vals_count;
1702         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1703                 rte_flow_error_set(error, EINVAL,
1704                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1705                         "Too much EFX specifications in the flow rule");
1706                 return -rte_errno;
1707         }
1708
1709         /* Copy filters specifications */
1710         for (i = spec->count; i < new_filters_count; i++)
1711                 spec->filters[i] = spec->filters[i - spec->count];
1712
1713         filters_count_for_one_val = spec->count;
1714         spec->count = new_filters_count;
1715
1716         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1717         if (rc != 0)
1718                 return rc;
1719
1720         return 0;
1721 }
1722
1723 /**
1724  * Check that the given set of match flags missing in the original filter spec
1725  * could be covered by adding spec copies which specify the corresponding
1726  * flags and packet field values to match.
1727  *
1728  * @param miss_flags[in]
1729  *   Flags that are missing until the supported filter.
1730  * @param spec[in]
1731  *   Specification to be supplemented.
1732  * @param filter[in]
1733  *   SFC filter.
1734  *
1735  * @return
1736  *   Number of specifications after copy or 0, if the flags can not be added.
1737  */
1738 static unsigned int
1739 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1740                              efx_filter_spec_t *spec,
1741                              struct sfc_filter *filter)
1742 {
1743         unsigned int i;
1744         efx_filter_match_flags_t copy_flags = 0;
1745         efx_filter_match_flags_t flag;
1746         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1747         sfc_flow_spec_check *check;
1748         unsigned int multiplier = 1;
1749
1750         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1751                 flag = sfc_flow_copy_flags[i].flag;
1752                 check = sfc_flow_copy_flags[i].spec_check;
1753                 if ((flag & miss_flags) == flag) {
1754                         if (check != NULL && (!check(match, spec, filter)))
1755                                 continue;
1756
1757                         copy_flags |= flag;
1758                         multiplier *= sfc_flow_copy_flags[i].vals_count;
1759                 }
1760         }
1761
1762         if (copy_flags == miss_flags)
1763                 return multiplier;
1764
1765         return 0;
1766 }
1767
1768 /**
1769  * Attempt to supplement the specification template to the minimally
1770  * supported set of match flags. To do this, it is necessary to copy
1771  * the specifications, filling them with the values of fields that
1772  * correspond to the missing flags.
1773  * The necessary and sufficient filter is built from the fewest number
1774  * of copies which could be made to cover the minimally required set
1775  * of flags.
1776  *
1777  * @param sa[in]
1778  *   SFC adapter.
1779  * @param spec[in, out]
1780  *   SFC flow specification to update.
1781  * @param error[out]
1782  *   Perform verbose error reporting if not NULL.
1783  */
1784 static int
1785 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
1786                                struct sfc_flow_spec *spec,
1787                                struct rte_flow_error *error)
1788 {
1789         struct sfc_filter *filter = &sa->filter;
1790         efx_filter_match_flags_t miss_flags;
1791         efx_filter_match_flags_t min_miss_flags = 0;
1792         efx_filter_match_flags_t match;
1793         unsigned int min_multiplier = UINT_MAX;
1794         unsigned int multiplier;
1795         unsigned int i;
1796         int rc;
1797
1798         match = spec->template.efs_match_flags;
1799         for (i = 0; i < filter->supported_match_num; i++) {
1800                 if ((match & filter->supported_match[i]) == match) {
1801                         miss_flags = filter->supported_match[i] & (~match);
1802                         multiplier = sfc_flow_check_missing_flags(miss_flags,
1803                                 &spec->template, filter);
1804                         if (multiplier > 0) {
1805                                 if (multiplier <= min_multiplier) {
1806                                         min_multiplier = multiplier;
1807                                         min_miss_flags = miss_flags;
1808                                 }
1809                         }
1810                 }
1811         }
1812
1813         if (min_multiplier == UINT_MAX) {
1814                 rte_flow_error_set(error, ENOTSUP,
1815                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1816                                    "Flow rule pattern is not supported");
1817                 return -rte_errno;
1818         }
1819
1820         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1821                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
1822
1823                 if ((flag & min_miss_flags) == flag) {
1824                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
1825                         if (rc != 0)
1826                                 return rc;
1827                 }
1828         }
1829
1830         return 0;
1831 }
1832
1833 static int
1834 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
1835                               struct rte_flow *flow,
1836                               struct rte_flow_error *error)
1837 {
1838         efx_filter_spec_t *spec_tmpl = &flow->spec.template;
1839         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
1840         int rc;
1841
1842         /* Initialize the first filter spec with template */
1843         flow->spec.filters[0] = *spec_tmpl;
1844         flow->spec.count = 1;
1845
1846         if (!sfc_filter_is_match_supported(sa, match_flags)) {
1847                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
1848                 if (rc != 0)
1849                         return rc;
1850         }
1851
1852         return 0;
1853 }
1854
1855 static int
1856 sfc_flow_parse(struct rte_eth_dev *dev,
1857                const struct rte_flow_attr *attr,
1858                const struct rte_flow_item pattern[],
1859                const struct rte_flow_action actions[],
1860                struct rte_flow *flow,
1861                struct rte_flow_error *error)
1862 {
1863         struct sfc_adapter *sa = dev->data->dev_private;
1864         int rc;
1865
1866         rc = sfc_flow_parse_attr(attr, flow, error);
1867         if (rc != 0)
1868                 goto fail_bad_value;
1869
1870         rc = sfc_flow_parse_pattern(pattern, flow, error);
1871         if (rc != 0)
1872                 goto fail_bad_value;
1873
1874         rc = sfc_flow_parse_actions(sa, actions, flow, error);
1875         if (rc != 0)
1876                 goto fail_bad_value;
1877
1878         rc = sfc_flow_validate_match_flags(sa, flow, error);
1879         if (rc != 0)
1880                 goto fail_bad_value;
1881
1882         return 0;
1883
1884 fail_bad_value:
1885         return rc;
1886 }
1887
1888 static int
1889 sfc_flow_validate(struct rte_eth_dev *dev,
1890                   const struct rte_flow_attr *attr,
1891                   const struct rte_flow_item pattern[],
1892                   const struct rte_flow_action actions[],
1893                   struct rte_flow_error *error)
1894 {
1895         struct rte_flow flow;
1896
1897         memset(&flow, 0, sizeof(flow));
1898
1899         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1900 }
1901
1902 static struct rte_flow *
1903 sfc_flow_create(struct rte_eth_dev *dev,
1904                 const struct rte_flow_attr *attr,
1905                 const struct rte_flow_item pattern[],
1906                 const struct rte_flow_action actions[],
1907                 struct rte_flow_error *error)
1908 {
1909         struct sfc_adapter *sa = dev->data->dev_private;
1910         struct rte_flow *flow = NULL;
1911         int rc;
1912
1913         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1914         if (flow == NULL) {
1915                 rte_flow_error_set(error, ENOMEM,
1916                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1917                                    "Failed to allocate memory");
1918                 goto fail_no_mem;
1919         }
1920
1921         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1922         if (rc != 0)
1923                 goto fail_bad_value;
1924
1925         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1926
1927         sfc_adapter_lock(sa);
1928
1929         if (sa->state == SFC_ADAPTER_STARTED) {
1930                 rc = sfc_flow_filter_insert(sa, flow);
1931                 if (rc != 0) {
1932                         rte_flow_error_set(error, rc,
1933                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1934                                 "Failed to insert filter");
1935                         goto fail_filter_insert;
1936                 }
1937         }
1938
1939         sfc_adapter_unlock(sa);
1940
1941         return flow;
1942
1943 fail_filter_insert:
1944         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1945
1946 fail_bad_value:
1947         rte_free(flow);
1948         sfc_adapter_unlock(sa);
1949
1950 fail_no_mem:
1951         return NULL;
1952 }
1953
1954 static int
1955 sfc_flow_remove(struct sfc_adapter *sa,
1956                 struct rte_flow *flow,
1957                 struct rte_flow_error *error)
1958 {
1959         int rc = 0;
1960
1961         SFC_ASSERT(sfc_adapter_is_locked(sa));
1962
1963         if (sa->state == SFC_ADAPTER_STARTED) {
1964                 rc = sfc_flow_filter_remove(sa, flow);
1965                 if (rc != 0)
1966                         rte_flow_error_set(error, rc,
1967                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1968                                 "Failed to destroy flow rule");
1969         }
1970
1971         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1972         rte_free(flow);
1973
1974         return rc;
1975 }
1976
1977 static int
1978 sfc_flow_destroy(struct rte_eth_dev *dev,
1979                  struct rte_flow *flow,
1980                  struct rte_flow_error *error)
1981 {
1982         struct sfc_adapter *sa = dev->data->dev_private;
1983         struct rte_flow *flow_ptr;
1984         int rc = EINVAL;
1985
1986         sfc_adapter_lock(sa);
1987
1988         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1989                 if (flow_ptr == flow)
1990                         rc = 0;
1991         }
1992         if (rc != 0) {
1993                 rte_flow_error_set(error, rc,
1994                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1995                                    "Failed to find flow rule to destroy");
1996                 goto fail_bad_value;
1997         }
1998
1999         rc = sfc_flow_remove(sa, flow, error);
2000
2001 fail_bad_value:
2002         sfc_adapter_unlock(sa);
2003
2004         return -rc;
2005 }
2006
2007 static int
2008 sfc_flow_flush(struct rte_eth_dev *dev,
2009                struct rte_flow_error *error)
2010 {
2011         struct sfc_adapter *sa = dev->data->dev_private;
2012         struct rte_flow *flow;
2013         int rc = 0;
2014         int ret = 0;
2015
2016         sfc_adapter_lock(sa);
2017
2018         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2019                 rc = sfc_flow_remove(sa, flow, error);
2020                 if (rc != 0)
2021                         ret = rc;
2022         }
2023
2024         sfc_adapter_unlock(sa);
2025
2026         return -ret;
2027 }
2028
2029 static int
2030 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2031                  struct rte_flow_error *error)
2032 {
2033         struct sfc_adapter *sa = dev->data->dev_private;
2034         struct sfc_port *port = &sa->port;
2035         int ret = 0;
2036
2037         sfc_adapter_lock(sa);
2038         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2039                 rte_flow_error_set(error, EBUSY,
2040                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2041                                    NULL, "please close the port first");
2042                 ret = -rte_errno;
2043         } else {
2044                 port->isolated = (enable) ? B_TRUE : B_FALSE;
2045         }
2046         sfc_adapter_unlock(sa);
2047
2048         return ret;
2049 }
2050
2051 const struct rte_flow_ops sfc_flow_ops = {
2052         .validate = sfc_flow_validate,
2053         .create = sfc_flow_create,
2054         .destroy = sfc_flow_destroy,
2055         .flush = sfc_flow_flush,
2056         .query = NULL,
2057         .isolate = sfc_flow_isolate,
2058 };
2059
2060 void
2061 sfc_flow_init(struct sfc_adapter *sa)
2062 {
2063         SFC_ASSERT(sfc_adapter_is_locked(sa));
2064
2065         TAILQ_INIT(&sa->filter.flow_list);
2066 }
2067
2068 void
2069 sfc_flow_fini(struct sfc_adapter *sa)
2070 {
2071         struct rte_flow *flow;
2072
2073         SFC_ASSERT(sfc_adapter_is_locked(sa));
2074
2075         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2076                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2077                 rte_free(flow);
2078         }
2079 }
2080
2081 void
2082 sfc_flow_stop(struct sfc_adapter *sa)
2083 {
2084         struct rte_flow *flow;
2085
2086         SFC_ASSERT(sfc_adapter_is_locked(sa));
2087
2088         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2089                 sfc_flow_filter_remove(sa, flow);
2090 }
2091
2092 int
2093 sfc_flow_start(struct sfc_adapter *sa)
2094 {
2095         struct rte_flow *flow;
2096         int rc = 0;
2097
2098         sfc_log_init(sa, "entry");
2099
2100         SFC_ASSERT(sfc_adapter_is_locked(sa));
2101
2102         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2103                 rc = sfc_flow_filter_insert(sa, flow);
2104                 if (rc != 0)
2105                         goto fail_bad_flow;
2106         }
2107
2108         sfc_log_init(sa, "done");
2109
2110 fail_bad_flow:
2111         return rc;
2112 }