net/sfc: multiply of specs with an unknown destination MAC
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25
26 /*
27  * At now flow API is implemented in such a manner that each
28  * flow rule is converted to one or more hardware filters.
29  * All elements of flow rule (attributes, pattern items, actions)
30  * correspond to one or more fields in the efx_filter_spec_s structure
31  * that is responsible for the hardware filter.
32  * If some required field is unset in the flow rule, then a handful
33  * of filter copies will be created to cover all possible values
34  * of such a field.
35  */
36
37 enum sfc_flow_item_layers {
38         SFC_FLOW_ITEM_ANY_LAYER,
39         SFC_FLOW_ITEM_START_LAYER,
40         SFC_FLOW_ITEM_L2,
41         SFC_FLOW_ITEM_L3,
42         SFC_FLOW_ITEM_L4,
43 };
44
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46                                   efx_filter_spec_t *spec,
47                                   struct rte_flow_error *error);
48
49 struct sfc_flow_item {
50         enum rte_flow_item_type type;           /* Type of item */
51         enum sfc_flow_item_layers layer;        /* Layer of item */
52         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
53         sfc_flow_item_parse *parse;             /* Parsing function */
54 };
55
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
66
67 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
68                                      unsigned int filters_count_for_one_val,
69                                      struct rte_flow_error *error);
70
71 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
72                                         efx_filter_spec_t *spec,
73                                         struct sfc_filter *filter);
74
75 struct sfc_flow_copy_flag {
76         /* EFX filter specification match flag */
77         efx_filter_match_flags_t flag;
78         /* Number of values of corresponding field */
79         unsigned int vals_count;
80         /* Function to set values in specifications */
81         sfc_flow_spec_set_vals *set_vals;
82         /*
83          * Function to check that the specification is suitable
84          * for adding this match flag
85          */
86         sfc_flow_spec_check *spec_check;
87 };
88
89 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
90 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
91 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
92 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
93 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
94
95 static boolean_t
96 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
97 {
98         uint8_t sum = 0;
99         unsigned int i;
100
101         for (i = 0; i < size; i++)
102                 sum |= buf[i];
103
104         return (sum == 0) ? B_TRUE : B_FALSE;
105 }
106
107 /*
108  * Validate item and prepare structures spec and mask for parsing
109  */
110 static int
111 sfc_flow_parse_init(const struct rte_flow_item *item,
112                     const void **spec_ptr,
113                     const void **mask_ptr,
114                     const void *supp_mask,
115                     const void *def_mask,
116                     unsigned int size,
117                     struct rte_flow_error *error)
118 {
119         const uint8_t *spec;
120         const uint8_t *mask;
121         const uint8_t *last;
122         uint8_t match;
123         uint8_t supp;
124         unsigned int i;
125
126         if (item == NULL) {
127                 rte_flow_error_set(error, EINVAL,
128                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
129                                    "NULL item");
130                 return -rte_errno;
131         }
132
133         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
134                 rte_flow_error_set(error, EINVAL,
135                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
136                                    "Mask or last is set without spec");
137                 return -rte_errno;
138         }
139
140         /*
141          * If "mask" is not set, default mask is used,
142          * but if default mask is NULL, "mask" should be set
143          */
144         if (item->mask == NULL) {
145                 if (def_mask == NULL) {
146                         rte_flow_error_set(error, EINVAL,
147                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
148                                 "Mask should be specified");
149                         return -rte_errno;
150                 }
151
152                 mask = def_mask;
153         } else {
154                 mask = item->mask;
155         }
156
157         spec = item->spec;
158         last = item->last;
159
160         if (spec == NULL)
161                 goto exit;
162
163         /*
164          * If field values in "last" are either 0 or equal to the corresponding
165          * values in "spec" then they are ignored
166          */
167         if (last != NULL &&
168             !sfc_flow_is_zero(last, size) &&
169             memcmp(last, spec, size) != 0) {
170                 rte_flow_error_set(error, ENOTSUP,
171                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
172                                    "Ranging is not supported");
173                 return -rte_errno;
174         }
175
176         if (supp_mask == NULL) {
177                 rte_flow_error_set(error, EINVAL,
178                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
179                         "Supported mask for item should be specified");
180                 return -rte_errno;
181         }
182
183         /* Check that mask and spec not asks for more match than supp_mask */
184         for (i = 0; i < size; i++) {
185                 match = spec[i] | mask[i];
186                 supp = ((const uint8_t *)supp_mask)[i];
187
188                 if ((match | supp) != supp) {
189                         rte_flow_error_set(error, ENOTSUP,
190                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
191                                            "Item's field is not supported");
192                         return -rte_errno;
193                 }
194         }
195
196 exit:
197         *spec_ptr = spec;
198         *mask_ptr = mask;
199         return 0;
200 }
201
202 /*
203  * Protocol parsers.
204  * Masking is not supported, so masks in items should be either
205  * full or empty (zeroed) and set only for supported fields which
206  * are specified in the supp_mask.
207  */
208
209 static int
210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
211                     __rte_unused efx_filter_spec_t *efx_spec,
212                     __rte_unused struct rte_flow_error *error)
213 {
214         return 0;
215 }
216
217 /**
218  * Convert Ethernet item to EFX filter specification.
219  *
220  * @param item[in]
221  *   Item specification. Outer frame specification may only comprise
222  *   source/destination addresses and Ethertype field.
223  *   Inner frame specification may contain destination address only.
224  *   There is support for individual/group mask as well as for empty and full.
225  *   If the mask is NULL, default mask will be used. Ranging is not supported.
226  * @param efx_spec[in, out]
227  *   EFX filter specification to update.
228  * @param[out] error
229  *   Perform verbose error reporting if not NULL.
230  */
231 static int
232 sfc_flow_parse_eth(const struct rte_flow_item *item,
233                    efx_filter_spec_t *efx_spec,
234                    struct rte_flow_error *error)
235 {
236         int rc;
237         const struct rte_flow_item_eth *spec = NULL;
238         const struct rte_flow_item_eth *mask = NULL;
239         const struct rte_flow_item_eth supp_mask = {
240                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
241                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242                 .type = 0xffff,
243         };
244         const struct rte_flow_item_eth ifrm_supp_mask = {
245                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
246         };
247         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
248                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
249         };
250         const struct rte_flow_item_eth *supp_mask_p;
251         const struct rte_flow_item_eth *def_mask_p;
252         uint8_t *loc_mac = NULL;
253         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
254                 EFX_TUNNEL_PROTOCOL_NONE);
255
256         if (is_ifrm) {
257                 supp_mask_p = &ifrm_supp_mask;
258                 def_mask_p = &ifrm_supp_mask;
259                 loc_mac = efx_spec->efs_ifrm_loc_mac;
260         } else {
261                 supp_mask_p = &supp_mask;
262                 def_mask_p = &rte_flow_item_eth_mask;
263                 loc_mac = efx_spec->efs_loc_mac;
264         }
265
266         rc = sfc_flow_parse_init(item,
267                                  (const void **)&spec,
268                                  (const void **)&mask,
269                                  supp_mask_p, def_mask_p,
270                                  sizeof(struct rte_flow_item_eth),
271                                  error);
272         if (rc != 0)
273                 return rc;
274
275         /* If "spec" is not set, could be any Ethernet */
276         if (spec == NULL)
277                 return 0;
278
279         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
280                 efx_spec->efs_match_flags |= is_ifrm ?
281                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
282                         EFX_FILTER_MATCH_LOC_MAC;
283                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
284                            EFX_MAC_ADDR_LEN);
285         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
286                           EFX_MAC_ADDR_LEN) == 0) {
287                 if (is_unicast_ether_addr(&spec->dst))
288                         efx_spec->efs_match_flags |= is_ifrm ?
289                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
290                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
291                 else
292                         efx_spec->efs_match_flags |= is_ifrm ?
293                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
294                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
295         } else if (!is_zero_ether_addr(&mask->dst)) {
296                 goto fail_bad_mask;
297         }
298
299         /*
300          * ifrm_supp_mask ensures that the source address and
301          * ethertype masks are equal to zero in inner frame,
302          * so these fields are filled in only for the outer frame
303          */
304         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
305                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
306                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
307                            EFX_MAC_ADDR_LEN);
308         } else if (!is_zero_ether_addr(&mask->src)) {
309                 goto fail_bad_mask;
310         }
311
312         /*
313          * Ether type is in big-endian byte order in item and
314          * in little-endian in efx_spec, so byte swap is used
315          */
316         if (mask->type == supp_mask.type) {
317                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
318                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
319         } else if (mask->type != 0) {
320                 goto fail_bad_mask;
321         }
322
323         return 0;
324
325 fail_bad_mask:
326         rte_flow_error_set(error, EINVAL,
327                            RTE_FLOW_ERROR_TYPE_ITEM, item,
328                            "Bad mask in the ETH pattern item");
329         return -rte_errno;
330 }
331
332 /**
333  * Convert VLAN item to EFX filter specification.
334  *
335  * @param item[in]
336  *   Item specification. Only VID field is supported.
337  *   The mask can not be NULL. Ranging is not supported.
338  * @param efx_spec[in, out]
339  *   EFX filter specification to update.
340  * @param[out] error
341  *   Perform verbose error reporting if not NULL.
342  */
343 static int
344 sfc_flow_parse_vlan(const struct rte_flow_item *item,
345                     efx_filter_spec_t *efx_spec,
346                     struct rte_flow_error *error)
347 {
348         int rc;
349         uint16_t vid;
350         const struct rte_flow_item_vlan *spec = NULL;
351         const struct rte_flow_item_vlan *mask = NULL;
352         const struct rte_flow_item_vlan supp_mask = {
353                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
354         };
355
356         rc = sfc_flow_parse_init(item,
357                                  (const void **)&spec,
358                                  (const void **)&mask,
359                                  &supp_mask,
360                                  NULL,
361                                  sizeof(struct rte_flow_item_vlan),
362                                  error);
363         if (rc != 0)
364                 return rc;
365
366         /*
367          * VID is in big-endian byte order in item and
368          * in little-endian in efx_spec, so byte swap is used.
369          * If two VLAN items are included, the first matches
370          * the outer tag and the next matches the inner tag.
371          */
372         if (mask->tci == supp_mask.tci) {
373                 vid = rte_bswap16(spec->tci);
374
375                 if (!(efx_spec->efs_match_flags &
376                       EFX_FILTER_MATCH_OUTER_VID)) {
377                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
378                         efx_spec->efs_outer_vid = vid;
379                 } else if (!(efx_spec->efs_match_flags &
380                              EFX_FILTER_MATCH_INNER_VID)) {
381                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
382                         efx_spec->efs_inner_vid = vid;
383                 } else {
384                         rte_flow_error_set(error, EINVAL,
385                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
386                                            "More than two VLAN items");
387                         return -rte_errno;
388                 }
389         } else {
390                 rte_flow_error_set(error, EINVAL,
391                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
392                                    "VLAN ID in TCI match is required");
393                 return -rte_errno;
394         }
395
396         return 0;
397 }
398
399 /**
400  * Convert IPv4 item to EFX filter specification.
401  *
402  * @param item[in]
403  *   Item specification. Only source and destination addresses and
404  *   protocol fields are supported. If the mask is NULL, default
405  *   mask will be used. Ranging is not supported.
406  * @param efx_spec[in, out]
407  *   EFX filter specification to update.
408  * @param[out] error
409  *   Perform verbose error reporting if not NULL.
410  */
411 static int
412 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
413                     efx_filter_spec_t *efx_spec,
414                     struct rte_flow_error *error)
415 {
416         int rc;
417         const struct rte_flow_item_ipv4 *spec = NULL;
418         const struct rte_flow_item_ipv4 *mask = NULL;
419         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
420         const struct rte_flow_item_ipv4 supp_mask = {
421                 .hdr = {
422                         .src_addr = 0xffffffff,
423                         .dst_addr = 0xffffffff,
424                         .next_proto_id = 0xff,
425                 }
426         };
427
428         rc = sfc_flow_parse_init(item,
429                                  (const void **)&spec,
430                                  (const void **)&mask,
431                                  &supp_mask,
432                                  &rte_flow_item_ipv4_mask,
433                                  sizeof(struct rte_flow_item_ipv4),
434                                  error);
435         if (rc != 0)
436                 return rc;
437
438         /*
439          * Filtering by IPv4 source and destination addresses requires
440          * the appropriate ETHER_TYPE in hardware filters
441          */
442         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
443                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
444                 efx_spec->efs_ether_type = ether_type_ipv4;
445         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
446                 rte_flow_error_set(error, EINVAL,
447                         RTE_FLOW_ERROR_TYPE_ITEM, item,
448                         "Ethertype in pattern with IPV4 item should be appropriate");
449                 return -rte_errno;
450         }
451
452         if (spec == NULL)
453                 return 0;
454
455         /*
456          * IPv4 addresses are in big-endian byte order in item and in
457          * efx_spec
458          */
459         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
460                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
461                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
462         } else if (mask->hdr.src_addr != 0) {
463                 goto fail_bad_mask;
464         }
465
466         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
467                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
468                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
469         } else if (mask->hdr.dst_addr != 0) {
470                 goto fail_bad_mask;
471         }
472
473         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
474                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
475                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
476         } else if (mask->hdr.next_proto_id != 0) {
477                 goto fail_bad_mask;
478         }
479
480         return 0;
481
482 fail_bad_mask:
483         rte_flow_error_set(error, EINVAL,
484                            RTE_FLOW_ERROR_TYPE_ITEM, item,
485                            "Bad mask in the IPV4 pattern item");
486         return -rte_errno;
487 }
488
489 /**
490  * Convert IPv6 item to EFX filter specification.
491  *
492  * @param item[in]
493  *   Item specification. Only source and destination addresses and
494  *   next header fields are supported. If the mask is NULL, default
495  *   mask will be used. Ranging is not supported.
496  * @param efx_spec[in, out]
497  *   EFX filter specification to update.
498  * @param[out] error
499  *   Perform verbose error reporting if not NULL.
500  */
501 static int
502 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
503                     efx_filter_spec_t *efx_spec,
504                     struct rte_flow_error *error)
505 {
506         int rc;
507         const struct rte_flow_item_ipv6 *spec = NULL;
508         const struct rte_flow_item_ipv6 *mask = NULL;
509         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
510         const struct rte_flow_item_ipv6 supp_mask = {
511                 .hdr = {
512                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
513                                       0xff, 0xff, 0xff, 0xff,
514                                       0xff, 0xff, 0xff, 0xff,
515                                       0xff, 0xff, 0xff, 0xff },
516                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
517                                       0xff, 0xff, 0xff, 0xff,
518                                       0xff, 0xff, 0xff, 0xff,
519                                       0xff, 0xff, 0xff, 0xff },
520                         .proto = 0xff,
521                 }
522         };
523
524         rc = sfc_flow_parse_init(item,
525                                  (const void **)&spec,
526                                  (const void **)&mask,
527                                  &supp_mask,
528                                  &rte_flow_item_ipv6_mask,
529                                  sizeof(struct rte_flow_item_ipv6),
530                                  error);
531         if (rc != 0)
532                 return rc;
533
534         /*
535          * Filtering by IPv6 source and destination addresses requires
536          * the appropriate ETHER_TYPE in hardware filters
537          */
538         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
539                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
540                 efx_spec->efs_ether_type = ether_type_ipv6;
541         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_ITEM, item,
544                         "Ethertype in pattern with IPV6 item should be appropriate");
545                 return -rte_errno;
546         }
547
548         if (spec == NULL)
549                 return 0;
550
551         /*
552          * IPv6 addresses are in big-endian byte order in item and in
553          * efx_spec
554          */
555         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
556                    sizeof(mask->hdr.src_addr)) == 0) {
557                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
558
559                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
560                                  sizeof(spec->hdr.src_addr));
561                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
562                            sizeof(efx_spec->efs_rem_host));
563         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
564                                      sizeof(mask->hdr.src_addr))) {
565                 goto fail_bad_mask;
566         }
567
568         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
569                    sizeof(mask->hdr.dst_addr)) == 0) {
570                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
571
572                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
573                                  sizeof(spec->hdr.dst_addr));
574                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
575                            sizeof(efx_spec->efs_loc_host));
576         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
577                                      sizeof(mask->hdr.dst_addr))) {
578                 goto fail_bad_mask;
579         }
580
581         if (mask->hdr.proto == supp_mask.hdr.proto) {
582                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
583                 efx_spec->efs_ip_proto = spec->hdr.proto;
584         } else if (mask->hdr.proto != 0) {
585                 goto fail_bad_mask;
586         }
587
588         return 0;
589
590 fail_bad_mask:
591         rte_flow_error_set(error, EINVAL,
592                            RTE_FLOW_ERROR_TYPE_ITEM, item,
593                            "Bad mask in the IPV6 pattern item");
594         return -rte_errno;
595 }
596
597 /**
598  * Convert TCP item to EFX filter specification.
599  *
600  * @param item[in]
601  *   Item specification. Only source and destination ports fields
602  *   are supported. If the mask is NULL, default mask will be used.
603  *   Ranging is not supported.
604  * @param efx_spec[in, out]
605  *   EFX filter specification to update.
606  * @param[out] error
607  *   Perform verbose error reporting if not NULL.
608  */
609 static int
610 sfc_flow_parse_tcp(const struct rte_flow_item *item,
611                    efx_filter_spec_t *efx_spec,
612                    struct rte_flow_error *error)
613 {
614         int rc;
615         const struct rte_flow_item_tcp *spec = NULL;
616         const struct rte_flow_item_tcp *mask = NULL;
617         const struct rte_flow_item_tcp supp_mask = {
618                 .hdr = {
619                         .src_port = 0xffff,
620                         .dst_port = 0xffff,
621                 }
622         };
623
624         rc = sfc_flow_parse_init(item,
625                                  (const void **)&spec,
626                                  (const void **)&mask,
627                                  &supp_mask,
628                                  &rte_flow_item_tcp_mask,
629                                  sizeof(struct rte_flow_item_tcp),
630                                  error);
631         if (rc != 0)
632                 return rc;
633
634         /*
635          * Filtering by TCP source and destination ports requires
636          * the appropriate IP_PROTO in hardware filters
637          */
638         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
639                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
640                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
641         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
642                 rte_flow_error_set(error, EINVAL,
643                         RTE_FLOW_ERROR_TYPE_ITEM, item,
644                         "IP proto in pattern with TCP item should be appropriate");
645                 return -rte_errno;
646         }
647
648         if (spec == NULL)
649                 return 0;
650
651         /*
652          * Source and destination ports are in big-endian byte order in item and
653          * in little-endian in efx_spec, so byte swap is used
654          */
655         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
656                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
657                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
658         } else if (mask->hdr.src_port != 0) {
659                 goto fail_bad_mask;
660         }
661
662         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
663                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
664                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
665         } else if (mask->hdr.dst_port != 0) {
666                 goto fail_bad_mask;
667         }
668
669         return 0;
670
671 fail_bad_mask:
672         rte_flow_error_set(error, EINVAL,
673                            RTE_FLOW_ERROR_TYPE_ITEM, item,
674                            "Bad mask in the TCP pattern item");
675         return -rte_errno;
676 }
677
678 /**
679  * Convert UDP item to EFX filter specification.
680  *
681  * @param item[in]
682  *   Item specification. Only source and destination ports fields
683  *   are supported. If the mask is NULL, default mask will be used.
684  *   Ranging is not supported.
685  * @param efx_spec[in, out]
686  *   EFX filter specification to update.
687  * @param[out] error
688  *   Perform verbose error reporting if not NULL.
689  */
690 static int
691 sfc_flow_parse_udp(const struct rte_flow_item *item,
692                    efx_filter_spec_t *efx_spec,
693                    struct rte_flow_error *error)
694 {
695         int rc;
696         const struct rte_flow_item_udp *spec = NULL;
697         const struct rte_flow_item_udp *mask = NULL;
698         const struct rte_flow_item_udp supp_mask = {
699                 .hdr = {
700                         .src_port = 0xffff,
701                         .dst_port = 0xffff,
702                 }
703         };
704
705         rc = sfc_flow_parse_init(item,
706                                  (const void **)&spec,
707                                  (const void **)&mask,
708                                  &supp_mask,
709                                  &rte_flow_item_udp_mask,
710                                  sizeof(struct rte_flow_item_udp),
711                                  error);
712         if (rc != 0)
713                 return rc;
714
715         /*
716          * Filtering by UDP source and destination ports requires
717          * the appropriate IP_PROTO in hardware filters
718          */
719         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
720                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
721                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
722         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
723                 rte_flow_error_set(error, EINVAL,
724                         RTE_FLOW_ERROR_TYPE_ITEM, item,
725                         "IP proto in pattern with UDP item should be appropriate");
726                 return -rte_errno;
727         }
728
729         if (spec == NULL)
730                 return 0;
731
732         /*
733          * Source and destination ports are in big-endian byte order in item and
734          * in little-endian in efx_spec, so byte swap is used
735          */
736         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
737                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
738                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
739         } else if (mask->hdr.src_port != 0) {
740                 goto fail_bad_mask;
741         }
742
743         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
744                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
745                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
746         } else if (mask->hdr.dst_port != 0) {
747                 goto fail_bad_mask;
748         }
749
750         return 0;
751
752 fail_bad_mask:
753         rte_flow_error_set(error, EINVAL,
754                            RTE_FLOW_ERROR_TYPE_ITEM, item,
755                            "Bad mask in the UDP pattern item");
756         return -rte_errno;
757 }
758
759 /*
760  * Filters for encapsulated packets match based on the EtherType and IP
761  * protocol in the outer frame.
762  */
763 static int
764 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
765                                         efx_filter_spec_t *efx_spec,
766                                         uint8_t ip_proto,
767                                         struct rte_flow_error *error)
768 {
769         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
770                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
771                 efx_spec->efs_ip_proto = ip_proto;
772         } else if (efx_spec->efs_ip_proto != ip_proto) {
773                 switch (ip_proto) {
774                 case EFX_IPPROTO_UDP:
775                         rte_flow_error_set(error, EINVAL,
776                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
777                                 "Outer IP header protocol must be UDP "
778                                 "in VxLAN/GENEVE pattern");
779                         return -rte_errno;
780
781                 case EFX_IPPROTO_GRE:
782                         rte_flow_error_set(error, EINVAL,
783                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
784                                 "Outer IP header protocol must be GRE "
785                                 "in NVGRE pattern");
786                         return -rte_errno;
787
788                 default:
789                         rte_flow_error_set(error, EINVAL,
790                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
791                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
792                                 "are supported");
793                         return -rte_errno;
794                 }
795         }
796
797         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
798             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
799             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
800                 rte_flow_error_set(error, EINVAL,
801                         RTE_FLOW_ERROR_TYPE_ITEM, item,
802                         "Outer frame EtherType in pattern with tunneling "
803                         "must be IPv4 or IPv6");
804                 return -rte_errno;
805         }
806
807         return 0;
808 }
809
810 static int
811 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
812                                   const uint8_t *vni_or_vsid_val,
813                                   const uint8_t *vni_or_vsid_mask,
814                                   const struct rte_flow_item *item,
815                                   struct rte_flow_error *error)
816 {
817         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
818                 0xff, 0xff, 0xff
819         };
820
821         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
822                    EFX_VNI_OR_VSID_LEN) == 0) {
823                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
824                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
825                            EFX_VNI_OR_VSID_LEN);
826         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
827                 rte_flow_error_set(error, EINVAL,
828                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
829                                    "Unsupported VNI/VSID mask");
830                 return -rte_errno;
831         }
832
833         return 0;
834 }
835
836 /**
837  * Convert VXLAN item to EFX filter specification.
838  *
839  * @param item[in]
840  *   Item specification. Only VXLAN network identifier field is supported.
841  *   If the mask is NULL, default mask will be used.
842  *   Ranging is not supported.
843  * @param efx_spec[in, out]
844  *   EFX filter specification to update.
845  * @param[out] error
846  *   Perform verbose error reporting if not NULL.
847  */
848 static int
849 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
850                      efx_filter_spec_t *efx_spec,
851                      struct rte_flow_error *error)
852 {
853         int rc;
854         const struct rte_flow_item_vxlan *spec = NULL;
855         const struct rte_flow_item_vxlan *mask = NULL;
856         const struct rte_flow_item_vxlan supp_mask = {
857                 .vni = { 0xff, 0xff, 0xff }
858         };
859
860         rc = sfc_flow_parse_init(item,
861                                  (const void **)&spec,
862                                  (const void **)&mask,
863                                  &supp_mask,
864                                  &rte_flow_item_vxlan_mask,
865                                  sizeof(struct rte_flow_item_vxlan),
866                                  error);
867         if (rc != 0)
868                 return rc;
869
870         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
871                                                      EFX_IPPROTO_UDP, error);
872         if (rc != 0)
873                 return rc;
874
875         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
876         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
877
878         if (spec == NULL)
879                 return 0;
880
881         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
882                                                mask->vni, item, error);
883
884         return rc;
885 }
886
887 /**
888  * Convert GENEVE item to EFX filter specification.
889  *
890  * @param item[in]
891  *   Item specification. Only Virtual Network Identifier and protocol type
892  *   fields are supported. But protocol type can be only Ethernet (0x6558).
893  *   If the mask is NULL, default mask will be used.
894  *   Ranging is not supported.
895  * @param efx_spec[in, out]
896  *   EFX filter specification to update.
897  * @param[out] error
898  *   Perform verbose error reporting if not NULL.
899  */
900 static int
901 sfc_flow_parse_geneve(const struct rte_flow_item *item,
902                       efx_filter_spec_t *efx_spec,
903                       struct rte_flow_error *error)
904 {
905         int rc;
906         const struct rte_flow_item_geneve *spec = NULL;
907         const struct rte_flow_item_geneve *mask = NULL;
908         const struct rte_flow_item_geneve supp_mask = {
909                 .protocol = RTE_BE16(0xffff),
910                 .vni = { 0xff, 0xff, 0xff }
911         };
912
913         rc = sfc_flow_parse_init(item,
914                                  (const void **)&spec,
915                                  (const void **)&mask,
916                                  &supp_mask,
917                                  &rte_flow_item_geneve_mask,
918                                  sizeof(struct rte_flow_item_geneve),
919                                  error);
920         if (rc != 0)
921                 return rc;
922
923         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
924                                                      EFX_IPPROTO_UDP, error);
925         if (rc != 0)
926                 return rc;
927
928         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
929         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
930
931         if (spec == NULL)
932                 return 0;
933
934         if (mask->protocol == supp_mask.protocol) {
935                 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
936                         rte_flow_error_set(error, EINVAL,
937                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
938                                 "GENEVE encap. protocol must be Ethernet "
939                                 "(0x6558) in the GENEVE pattern item");
940                         return -rte_errno;
941                 }
942         } else if (mask->protocol != 0) {
943                 rte_flow_error_set(error, EINVAL,
944                         RTE_FLOW_ERROR_TYPE_ITEM, item,
945                         "Unsupported mask for GENEVE encap. protocol");
946                 return -rte_errno;
947         }
948
949         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
950                                                mask->vni, item, error);
951
952         return rc;
953 }
954
955 /**
956  * Convert NVGRE item to EFX filter specification.
957  *
958  * @param item[in]
959  *   Item specification. Only virtual subnet ID field is supported.
960  *   If the mask is NULL, default mask will be used.
961  *   Ranging is not supported.
962  * @param efx_spec[in, out]
963  *   EFX filter specification to update.
964  * @param[out] error
965  *   Perform verbose error reporting if not NULL.
966  */
967 static int
968 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
969                      efx_filter_spec_t *efx_spec,
970                      struct rte_flow_error *error)
971 {
972         int rc;
973         const struct rte_flow_item_nvgre *spec = NULL;
974         const struct rte_flow_item_nvgre *mask = NULL;
975         const struct rte_flow_item_nvgre supp_mask = {
976                 .tni = { 0xff, 0xff, 0xff }
977         };
978
979         rc = sfc_flow_parse_init(item,
980                                  (const void **)&spec,
981                                  (const void **)&mask,
982                                  &supp_mask,
983                                  &rte_flow_item_nvgre_mask,
984                                  sizeof(struct rte_flow_item_nvgre),
985                                  error);
986         if (rc != 0)
987                 return rc;
988
989         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
990                                                      EFX_IPPROTO_GRE, error);
991         if (rc != 0)
992                 return rc;
993
994         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
995         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
996
997         if (spec == NULL)
998                 return 0;
999
1000         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1001                                                mask->tni, item, error);
1002
1003         return rc;
1004 }
1005
1006 static const struct sfc_flow_item sfc_flow_items[] = {
1007         {
1008                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1009                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1010                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1011                 .parse = sfc_flow_parse_void,
1012         },
1013         {
1014                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1015                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1016                 .layer = SFC_FLOW_ITEM_L2,
1017                 .parse = sfc_flow_parse_eth,
1018         },
1019         {
1020                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1021                 .prev_layer = SFC_FLOW_ITEM_L2,
1022                 .layer = SFC_FLOW_ITEM_L2,
1023                 .parse = sfc_flow_parse_vlan,
1024         },
1025         {
1026                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1027                 .prev_layer = SFC_FLOW_ITEM_L2,
1028                 .layer = SFC_FLOW_ITEM_L3,
1029                 .parse = sfc_flow_parse_ipv4,
1030         },
1031         {
1032                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1033                 .prev_layer = SFC_FLOW_ITEM_L2,
1034                 .layer = SFC_FLOW_ITEM_L3,
1035                 .parse = sfc_flow_parse_ipv6,
1036         },
1037         {
1038                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1039                 .prev_layer = SFC_FLOW_ITEM_L3,
1040                 .layer = SFC_FLOW_ITEM_L4,
1041                 .parse = sfc_flow_parse_tcp,
1042         },
1043         {
1044                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1045                 .prev_layer = SFC_FLOW_ITEM_L3,
1046                 .layer = SFC_FLOW_ITEM_L4,
1047                 .parse = sfc_flow_parse_udp,
1048         },
1049         {
1050                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1051                 .prev_layer = SFC_FLOW_ITEM_L4,
1052                 .layer = SFC_FLOW_ITEM_START_LAYER,
1053                 .parse = sfc_flow_parse_vxlan,
1054         },
1055         {
1056                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1057                 .prev_layer = SFC_FLOW_ITEM_L4,
1058                 .layer = SFC_FLOW_ITEM_START_LAYER,
1059                 .parse = sfc_flow_parse_geneve,
1060         },
1061         {
1062                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1063                 .prev_layer = SFC_FLOW_ITEM_L3,
1064                 .layer = SFC_FLOW_ITEM_START_LAYER,
1065                 .parse = sfc_flow_parse_nvgre,
1066         },
1067 };
1068
1069 /*
1070  * Protocol-independent flow API support
1071  */
1072 static int
1073 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1074                     struct rte_flow *flow,
1075                     struct rte_flow_error *error)
1076 {
1077         if (attr == NULL) {
1078                 rte_flow_error_set(error, EINVAL,
1079                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1080                                    "NULL attribute");
1081                 return -rte_errno;
1082         }
1083         if (attr->group != 0) {
1084                 rte_flow_error_set(error, ENOTSUP,
1085                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1086                                    "Groups are not supported");
1087                 return -rte_errno;
1088         }
1089         if (attr->priority != 0) {
1090                 rte_flow_error_set(error, ENOTSUP,
1091                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1092                                    "Priorities are not supported");
1093                 return -rte_errno;
1094         }
1095         if (attr->egress != 0) {
1096                 rte_flow_error_set(error, ENOTSUP,
1097                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1098                                    "Egress is not supported");
1099                 return -rte_errno;
1100         }
1101         if (attr->ingress == 0) {
1102                 rte_flow_error_set(error, ENOTSUP,
1103                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1104                                    "Only ingress is supported");
1105                 return -rte_errno;
1106         }
1107
1108         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1109         flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1110
1111         return 0;
1112 }
1113
1114 /* Get item from array sfc_flow_items */
1115 static const struct sfc_flow_item *
1116 sfc_flow_get_item(enum rte_flow_item_type type)
1117 {
1118         unsigned int i;
1119
1120         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1121                 if (sfc_flow_items[i].type == type)
1122                         return &sfc_flow_items[i];
1123
1124         return NULL;
1125 }
1126
1127 static int
1128 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1129                        struct rte_flow *flow,
1130                        struct rte_flow_error *error)
1131 {
1132         int rc;
1133         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1134         boolean_t is_ifrm = B_FALSE;
1135         const struct sfc_flow_item *item;
1136
1137         if (pattern == NULL) {
1138                 rte_flow_error_set(error, EINVAL,
1139                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1140                                    "NULL pattern");
1141                 return -rte_errno;
1142         }
1143
1144         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1145                 item = sfc_flow_get_item(pattern->type);
1146                 if (item == NULL) {
1147                         rte_flow_error_set(error, ENOTSUP,
1148                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1149                                            "Unsupported pattern item");
1150                         return -rte_errno;
1151                 }
1152
1153                 /*
1154                  * Omitting one or several protocol layers at the beginning
1155                  * of pattern is supported
1156                  */
1157                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1158                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1159                     item->prev_layer != prev_layer) {
1160                         rte_flow_error_set(error, ENOTSUP,
1161                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1162                                            "Unexpected sequence of pattern items");
1163                         return -rte_errno;
1164                 }
1165
1166                 /*
1167                  * Allow only VOID and ETH pattern items in the inner frame.
1168                  * Also check that there is only one tunneling protocol.
1169                  */
1170                 switch (item->type) {
1171                 case RTE_FLOW_ITEM_TYPE_VOID:
1172                 case RTE_FLOW_ITEM_TYPE_ETH:
1173                         break;
1174
1175                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1176                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1177                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1178                         if (is_ifrm) {
1179                                 rte_flow_error_set(error, EINVAL,
1180                                         RTE_FLOW_ERROR_TYPE_ITEM,
1181                                         pattern,
1182                                         "More than one tunneling protocol");
1183                                 return -rte_errno;
1184                         }
1185                         is_ifrm = B_TRUE;
1186                         break;
1187
1188                 default:
1189                         if (is_ifrm) {
1190                                 rte_flow_error_set(error, EINVAL,
1191                                         RTE_FLOW_ERROR_TYPE_ITEM,
1192                                         pattern,
1193                                         "There is an unsupported pattern item "
1194                                         "in the inner frame");
1195                                 return -rte_errno;
1196                         }
1197                         break;
1198                 }
1199
1200                 rc = item->parse(pattern, &flow->spec.template, error);
1201                 if (rc != 0)
1202                         return rc;
1203
1204                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1205                         prev_layer = item->layer;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int
1212 sfc_flow_parse_queue(struct sfc_adapter *sa,
1213                      const struct rte_flow_action_queue *queue,
1214                      struct rte_flow *flow)
1215 {
1216         struct sfc_rxq *rxq;
1217
1218         if (queue->index >= sa->rxq_count)
1219                 return -EINVAL;
1220
1221         rxq = sa->rxq_info[queue->index].rxq;
1222         flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1223
1224         return 0;
1225 }
1226
1227 #if EFSYS_OPT_RX_SCALE
1228 static int
1229 sfc_flow_parse_rss(struct sfc_adapter *sa,
1230                    const struct rte_flow_action_rss *rss,
1231                    struct rte_flow *flow)
1232 {
1233         unsigned int rxq_sw_index;
1234         struct sfc_rxq *rxq;
1235         unsigned int rxq_hw_index_min;
1236         unsigned int rxq_hw_index_max;
1237         const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1238         uint64_t rss_hf;
1239         uint8_t *rss_key = NULL;
1240         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1241         unsigned int i;
1242
1243         if (rss->num == 0)
1244                 return -EINVAL;
1245
1246         rxq_sw_index = sa->rxq_count - 1;
1247         rxq = sa->rxq_info[rxq_sw_index].rxq;
1248         rxq_hw_index_min = rxq->hw_index;
1249         rxq_hw_index_max = 0;
1250
1251         for (i = 0; i < rss->num; ++i) {
1252                 rxq_sw_index = rss->queue[i];
1253
1254                 if (rxq_sw_index >= sa->rxq_count)
1255                         return -EINVAL;
1256
1257                 rxq = sa->rxq_info[rxq_sw_index].rxq;
1258
1259                 if (rxq->hw_index < rxq_hw_index_min)
1260                         rxq_hw_index_min = rxq->hw_index;
1261
1262                 if (rxq->hw_index > rxq_hw_index_max)
1263                         rxq_hw_index_max = rxq->hw_index;
1264         }
1265
1266         rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1267         if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1268                 return -EINVAL;
1269
1270         if (rss_conf != NULL) {
1271                 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1272                         return -EINVAL;
1273
1274                 rss_key = rss_conf->rss_key;
1275         } else {
1276                 rss_key = sa->rss_key;
1277         }
1278
1279         flow->rss = B_TRUE;
1280
1281         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1282         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1283         sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1284         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1285
1286         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1287                 unsigned int rxq_sw_index = rss->queue[i % rss->num];
1288                 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1289
1290                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1291         }
1292
1293         return 0;
1294 }
1295 #endif /* EFSYS_OPT_RX_SCALE */
1296
1297 static int
1298 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1299                     unsigned int filters_count)
1300 {
1301         unsigned int i;
1302         int ret = 0;
1303
1304         for (i = 0; i < filters_count; i++) {
1305                 int rc;
1306
1307                 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1308                 if (ret == 0 && rc != 0) {
1309                         sfc_err(sa, "failed to remove filter specification "
1310                                 "(rc = %d)", rc);
1311                         ret = rc;
1312                 }
1313         }
1314
1315         return ret;
1316 }
1317
1318 static int
1319 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1320 {
1321         unsigned int i;
1322         int rc = 0;
1323
1324         for (i = 0; i < spec->count; i++) {
1325                 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1326                 if (rc != 0) {
1327                         sfc_flow_spec_flush(sa, spec, i);
1328                         break;
1329                 }
1330         }
1331
1332         return rc;
1333 }
1334
1335 static int
1336 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1337 {
1338         return sfc_flow_spec_flush(sa, spec, spec->count);
1339 }
1340
1341 static int
1342 sfc_flow_filter_insert(struct sfc_adapter *sa,
1343                        struct rte_flow *flow)
1344 {
1345 #if EFSYS_OPT_RX_SCALE
1346         struct sfc_flow_rss *rss = &flow->rss_conf;
1347         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1348         unsigned int i;
1349         int rc = 0;
1350
1351         if (flow->rss) {
1352                 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1353                                               rss->rxq_hw_index_min + 1,
1354                                               EFX_MAXRSS);
1355
1356                 rc = efx_rx_scale_context_alloc(sa->nic,
1357                                                 EFX_RX_SCALE_EXCLUSIVE,
1358                                                 rss_spread,
1359                                                 &efs_rss_context);
1360                 if (rc != 0)
1361                         goto fail_scale_context_alloc;
1362
1363                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1364                                            EFX_RX_HASHALG_TOEPLITZ,
1365                                            rss->rss_hash_types, B_TRUE);
1366                 if (rc != 0)
1367                         goto fail_scale_mode_set;
1368
1369                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1370                                           rss->rss_key,
1371                                           sizeof(sa->rss_key));
1372                 if (rc != 0)
1373                         goto fail_scale_key_set;
1374
1375                 /*
1376                  * At this point, fully elaborated filter specifications
1377                  * have been produced from the template. To make sure that
1378                  * RSS behaviour is consistent between them, set the same
1379                  * RSS context value everywhere.
1380                  */
1381                 for (i = 0; i < flow->spec.count; i++) {
1382                         efx_filter_spec_t *spec = &flow->spec.filters[i];
1383
1384                         spec->efs_rss_context = efs_rss_context;
1385                         spec->efs_dmaq_id = rss->rxq_hw_index_min;
1386                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1387                 }
1388         }
1389
1390         rc = sfc_flow_spec_insert(sa, &flow->spec);
1391         if (rc != 0)
1392                 goto fail_filter_insert;
1393
1394         if (flow->rss) {
1395                 /*
1396                  * Scale table is set after filter insertion because
1397                  * the table entries are relative to the base RxQ ID
1398                  * and the latter is submitted to the HW by means of
1399                  * inserting a filter, so by the time of the request
1400                  * the HW knows all the information needed to verify
1401                  * the table entries, and the operation will succeed
1402                  */
1403                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1404                                           rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1405                 if (rc != 0)
1406                         goto fail_scale_tbl_set;
1407         }
1408
1409         return 0;
1410
1411 fail_scale_tbl_set:
1412         sfc_flow_spec_remove(sa, &flow->spec);
1413
1414 fail_filter_insert:
1415 fail_scale_key_set:
1416 fail_scale_mode_set:
1417         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1418                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1419
1420 fail_scale_context_alloc:
1421         return rc;
1422 #else /* !EFSYS_OPT_RX_SCALE */
1423         return sfc_flow_spec_insert(sa, &flow->spec);
1424 #endif /* EFSYS_OPT_RX_SCALE */
1425 }
1426
1427 static int
1428 sfc_flow_filter_remove(struct sfc_adapter *sa,
1429                        struct rte_flow *flow)
1430 {
1431         int rc = 0;
1432
1433         rc = sfc_flow_spec_remove(sa, &flow->spec);
1434         if (rc != 0)
1435                 return rc;
1436
1437 #if EFSYS_OPT_RX_SCALE
1438         if (flow->rss) {
1439                 /*
1440                  * All specifications for a given flow rule have the same RSS
1441                  * context, so that RSS context value is taken from the first
1442                  * filter specification
1443                  */
1444                 efx_filter_spec_t *spec = &flow->spec.filters[0];
1445
1446                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1447         }
1448 #endif /* EFSYS_OPT_RX_SCALE */
1449
1450         return rc;
1451 }
1452
1453 static int
1454 sfc_flow_parse_actions(struct sfc_adapter *sa,
1455                        const struct rte_flow_action actions[],
1456                        struct rte_flow *flow,
1457                        struct rte_flow_error *error)
1458 {
1459         int rc;
1460         boolean_t is_specified = B_FALSE;
1461
1462         if (actions == NULL) {
1463                 rte_flow_error_set(error, EINVAL,
1464                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1465                                    "NULL actions");
1466                 return -rte_errno;
1467         }
1468
1469         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1470                 switch (actions->type) {
1471                 case RTE_FLOW_ACTION_TYPE_VOID:
1472                         break;
1473
1474                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1475                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1476                         if (rc != 0) {
1477                                 rte_flow_error_set(error, EINVAL,
1478                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1479                                         "Bad QUEUE action");
1480                                 return -rte_errno;
1481                         }
1482
1483                         is_specified = B_TRUE;
1484                         break;
1485
1486 #if EFSYS_OPT_RX_SCALE
1487                 case RTE_FLOW_ACTION_TYPE_RSS:
1488                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1489                         if (rc != 0) {
1490                                 rte_flow_error_set(error, rc,
1491                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1492                                         "Bad RSS action");
1493                                 return -rte_errno;
1494                         }
1495
1496                         is_specified = B_TRUE;
1497                         break;
1498 #endif /* EFSYS_OPT_RX_SCALE */
1499
1500                 default:
1501                         rte_flow_error_set(error, ENOTSUP,
1502                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1503                                            "Action is not supported");
1504                         return -rte_errno;
1505                 }
1506         }
1507
1508         if (!is_specified) {
1509                 rte_flow_error_set(error, EINVAL,
1510                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1511                                    "Action is unspecified");
1512                 return -rte_errno;
1513         }
1514
1515         return 0;
1516 }
1517
1518 /**
1519  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1520  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1521  * specifications after copying.
1522  *
1523  * @param spec[in, out]
1524  *   SFC flow specification to update.
1525  * @param filters_count_for_one_val[in]
1526  *   How many specifications should have the same match flag, what is the
1527  *   number of specifications before copying.
1528  * @param error[out]
1529  *   Perform verbose error reporting if not NULL.
1530  */
1531 static int
1532 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1533                                unsigned int filters_count_for_one_val,
1534                                struct rte_flow_error *error)
1535 {
1536         unsigned int i;
1537         static const efx_filter_match_flags_t vals[] = {
1538                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1539                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1540         };
1541
1542         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1543                 rte_flow_error_set(error, EINVAL,
1544                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1545                         "Number of specifications is incorrect while copying "
1546                         "by unknown destination flags");
1547                 return -rte_errno;
1548         }
1549
1550         for (i = 0; i < spec->count; i++) {
1551                 /* The check above ensures that divisor can't be zero here */
1552                 spec->filters[i].efs_match_flags |=
1553                         vals[i / filters_count_for_one_val];
1554         }
1555
1556         return 0;
1557 }
1558
1559 /**
1560  * Check that the following conditions are met:
1561  * - the list of supported filters has a filter
1562  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1563  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1564  *   be inserted.
1565  *
1566  * @param match[in]
1567  *   The match flags of filter.
1568  * @param spec[in]
1569  *   Specification to be supplemented.
1570  * @param filter[in]
1571  *   SFC filter with list of supported filters.
1572  */
1573 static boolean_t
1574 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1575                                  __rte_unused efx_filter_spec_t *spec,
1576                                  struct sfc_filter *filter)
1577 {
1578         unsigned int i;
1579         efx_filter_match_flags_t match_mcast_dst;
1580
1581         match_mcast_dst =
1582                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1583                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1584         for (i = 0; i < filter->supported_match_num; i++) {
1585                 if (match_mcast_dst == filter->supported_match[i])
1586                         return B_TRUE;
1587         }
1588
1589         return B_FALSE;
1590 }
1591
1592 /**
1593  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1594  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1595  * specifications after copying.
1596  *
1597  * @param spec[in, out]
1598  *   SFC flow specification to update.
1599  * @param filters_count_for_one_val[in]
1600  *   How many specifications should have the same EtherType value, what is the
1601  *   number of specifications before copying.
1602  * @param error[out]
1603  *   Perform verbose error reporting if not NULL.
1604  */
1605 static int
1606 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1607                         unsigned int filters_count_for_one_val,
1608                         struct rte_flow_error *error)
1609 {
1610         unsigned int i;
1611         static const uint16_t vals[] = {
1612                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1613         };
1614
1615         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1616                 rte_flow_error_set(error, EINVAL,
1617                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1618                         "Number of specifications is incorrect "
1619                         "while copying by Ethertype");
1620                 return -rte_errno;
1621         }
1622
1623         for (i = 0; i < spec->count; i++) {
1624                 spec->filters[i].efs_match_flags |=
1625                         EFX_FILTER_MATCH_ETHER_TYPE;
1626
1627                 /*
1628                  * The check above ensures that
1629                  * filters_count_for_one_val is not 0
1630                  */
1631                 spec->filters[i].efs_ether_type =
1632                         vals[i / filters_count_for_one_val];
1633         }
1634
1635         return 0;
1636 }
1637
1638 /**
1639  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1640  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1641  * specifications after copying.
1642  *
1643  * @param spec[in, out]
1644  *   SFC flow specification to update.
1645  * @param filters_count_for_one_val[in]
1646  *   How many specifications should have the same match flag, what is the
1647  *   number of specifications before copying.
1648  * @param error[out]
1649  *   Perform verbose error reporting if not NULL.
1650  */
1651 static int
1652 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1653                                     unsigned int filters_count_for_one_val,
1654                                     struct rte_flow_error *error)
1655 {
1656         unsigned int i;
1657         static const efx_filter_match_flags_t vals[] = {
1658                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1659                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1660         };
1661
1662         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1663                 rte_flow_error_set(error, EINVAL,
1664                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1665                         "Number of specifications is incorrect while copying "
1666                         "by inner frame unknown destination flags");
1667                 return -rte_errno;
1668         }
1669
1670         for (i = 0; i < spec->count; i++) {
1671                 /* The check above ensures that divisor can't be zero here */
1672                 spec->filters[i].efs_match_flags |=
1673                         vals[i / filters_count_for_one_val];
1674         }
1675
1676         return 0;
1677 }
1678
1679 /**
1680  * Check that the following conditions are met:
1681  * - the specification corresponds to a filter for encapsulated traffic
1682  * - the list of supported filters has a filter
1683  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1684  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1685  *   be inserted.
1686  *
1687  * @param match[in]
1688  *   The match flags of filter.
1689  * @param spec[in]
1690  *   Specification to be supplemented.
1691  * @param filter[in]
1692  *   SFC filter with list of supported filters.
1693  */
1694 static boolean_t
1695 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1696                                       efx_filter_spec_t *spec,
1697                                       struct sfc_filter *filter)
1698 {
1699         unsigned int i;
1700         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1701         efx_filter_match_flags_t match_mcast_dst;
1702
1703         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1704                 return B_FALSE;
1705
1706         match_mcast_dst =
1707                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1708                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1709         for (i = 0; i < filter->supported_match_num; i++) {
1710                 if (match_mcast_dst == filter->supported_match[i])
1711                         return B_TRUE;
1712         }
1713
1714         return B_FALSE;
1715 }
1716
1717 /*
1718  * Match flags that can be automatically added to filters.
1719  * Selecting the last minimum when searching for the copy flag ensures that the
1720  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1721  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1722  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1723  * filters.
1724  */
1725 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1726         {
1727                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1728                 .vals_count = 2,
1729                 .set_vals = sfc_flow_set_unknown_dst_flags,
1730                 .spec_check = sfc_flow_check_unknown_dst_flags,
1731         },
1732         {
1733                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1734                 .vals_count = 2,
1735                 .set_vals = sfc_flow_set_ethertypes,
1736                 .spec_check = NULL,
1737         },
1738         {
1739                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1740                 .vals_count = 2,
1741                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1742                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1743         },
1744 };
1745
1746 /* Get item from array sfc_flow_copy_flags */
1747 static const struct sfc_flow_copy_flag *
1748 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1749 {
1750         unsigned int i;
1751
1752         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1753                 if (sfc_flow_copy_flags[i].flag == flag)
1754                         return &sfc_flow_copy_flags[i];
1755         }
1756
1757         return NULL;
1758 }
1759
1760 /**
1761  * Make copies of the specifications, set match flag and values
1762  * of the field that corresponds to it.
1763  *
1764  * @param spec[in, out]
1765  *   SFC flow specification to update.
1766  * @param flag[in]
1767  *   The match flag to add.
1768  * @param error[out]
1769  *   Perform verbose error reporting if not NULL.
1770  */
1771 static int
1772 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1773                              efx_filter_match_flags_t flag,
1774                              struct rte_flow_error *error)
1775 {
1776         unsigned int i;
1777         unsigned int new_filters_count;
1778         unsigned int filters_count_for_one_val;
1779         const struct sfc_flow_copy_flag *copy_flag;
1780         int rc;
1781
1782         copy_flag = sfc_flow_get_copy_flag(flag);
1783         if (copy_flag == NULL) {
1784                 rte_flow_error_set(error, ENOTSUP,
1785                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1786                                    "Unsupported spec field for copying");
1787                 return -rte_errno;
1788         }
1789
1790         new_filters_count = spec->count * copy_flag->vals_count;
1791         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1792                 rte_flow_error_set(error, EINVAL,
1793                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1794                         "Too much EFX specifications in the flow rule");
1795                 return -rte_errno;
1796         }
1797
1798         /* Copy filters specifications */
1799         for (i = spec->count; i < new_filters_count; i++)
1800                 spec->filters[i] = spec->filters[i - spec->count];
1801
1802         filters_count_for_one_val = spec->count;
1803         spec->count = new_filters_count;
1804
1805         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1806         if (rc != 0)
1807                 return rc;
1808
1809         return 0;
1810 }
1811
1812 /**
1813  * Check that the given set of match flags missing in the original filter spec
1814  * could be covered by adding spec copies which specify the corresponding
1815  * flags and packet field values to match.
1816  *
1817  * @param miss_flags[in]
1818  *   Flags that are missing until the supported filter.
1819  * @param spec[in]
1820  *   Specification to be supplemented.
1821  * @param filter[in]
1822  *   SFC filter.
1823  *
1824  * @return
1825  *   Number of specifications after copy or 0, if the flags can not be added.
1826  */
1827 static unsigned int
1828 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1829                              efx_filter_spec_t *spec,
1830                              struct sfc_filter *filter)
1831 {
1832         unsigned int i;
1833         efx_filter_match_flags_t copy_flags = 0;
1834         efx_filter_match_flags_t flag;
1835         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1836         sfc_flow_spec_check *check;
1837         unsigned int multiplier = 1;
1838
1839         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1840                 flag = sfc_flow_copy_flags[i].flag;
1841                 check = sfc_flow_copy_flags[i].spec_check;
1842                 if ((flag & miss_flags) == flag) {
1843                         if (check != NULL && (!check(match, spec, filter)))
1844                                 continue;
1845
1846                         copy_flags |= flag;
1847                         multiplier *= sfc_flow_copy_flags[i].vals_count;
1848                 }
1849         }
1850
1851         if (copy_flags == miss_flags)
1852                 return multiplier;
1853
1854         return 0;
1855 }
1856
1857 /**
1858  * Attempt to supplement the specification template to the minimally
1859  * supported set of match flags. To do this, it is necessary to copy
1860  * the specifications, filling them with the values of fields that
1861  * correspond to the missing flags.
1862  * The necessary and sufficient filter is built from the fewest number
1863  * of copies which could be made to cover the minimally required set
1864  * of flags.
1865  *
1866  * @param sa[in]
1867  *   SFC adapter.
1868  * @param spec[in, out]
1869  *   SFC flow specification to update.
1870  * @param error[out]
1871  *   Perform verbose error reporting if not NULL.
1872  */
1873 static int
1874 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
1875                                struct sfc_flow_spec *spec,
1876                                struct rte_flow_error *error)
1877 {
1878         struct sfc_filter *filter = &sa->filter;
1879         efx_filter_match_flags_t miss_flags;
1880         efx_filter_match_flags_t min_miss_flags = 0;
1881         efx_filter_match_flags_t match;
1882         unsigned int min_multiplier = UINT_MAX;
1883         unsigned int multiplier;
1884         unsigned int i;
1885         int rc;
1886
1887         match = spec->template.efs_match_flags;
1888         for (i = 0; i < filter->supported_match_num; i++) {
1889                 if ((match & filter->supported_match[i]) == match) {
1890                         miss_flags = filter->supported_match[i] & (~match);
1891                         multiplier = sfc_flow_check_missing_flags(miss_flags,
1892                                 &spec->template, filter);
1893                         if (multiplier > 0) {
1894                                 if (multiplier <= min_multiplier) {
1895                                         min_multiplier = multiplier;
1896                                         min_miss_flags = miss_flags;
1897                                 }
1898                         }
1899                 }
1900         }
1901
1902         if (min_multiplier == UINT_MAX) {
1903                 rte_flow_error_set(error, ENOTSUP,
1904                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1905                                    "Flow rule pattern is not supported");
1906                 return -rte_errno;
1907         }
1908
1909         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1910                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
1911
1912                 if ((flag & min_miss_flags) == flag) {
1913                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
1914                         if (rc != 0)
1915                                 return rc;
1916                 }
1917         }
1918
1919         return 0;
1920 }
1921
1922 static int
1923 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
1924                               struct rte_flow *flow,
1925                               struct rte_flow_error *error)
1926 {
1927         efx_filter_spec_t *spec_tmpl = &flow->spec.template;
1928         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
1929         int rc;
1930
1931         /* Initialize the first filter spec with template */
1932         flow->spec.filters[0] = *spec_tmpl;
1933         flow->spec.count = 1;
1934
1935         if (!sfc_filter_is_match_supported(sa, match_flags)) {
1936                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
1937                 if (rc != 0)
1938                         return rc;
1939         }
1940
1941         return 0;
1942 }
1943
1944 static int
1945 sfc_flow_parse(struct rte_eth_dev *dev,
1946                const struct rte_flow_attr *attr,
1947                const struct rte_flow_item pattern[],
1948                const struct rte_flow_action actions[],
1949                struct rte_flow *flow,
1950                struct rte_flow_error *error)
1951 {
1952         struct sfc_adapter *sa = dev->data->dev_private;
1953         int rc;
1954
1955         rc = sfc_flow_parse_attr(attr, flow, error);
1956         if (rc != 0)
1957                 goto fail_bad_value;
1958
1959         rc = sfc_flow_parse_pattern(pattern, flow, error);
1960         if (rc != 0)
1961                 goto fail_bad_value;
1962
1963         rc = sfc_flow_parse_actions(sa, actions, flow, error);
1964         if (rc != 0)
1965                 goto fail_bad_value;
1966
1967         rc = sfc_flow_validate_match_flags(sa, flow, error);
1968         if (rc != 0)
1969                 goto fail_bad_value;
1970
1971         return 0;
1972
1973 fail_bad_value:
1974         return rc;
1975 }
1976
1977 static int
1978 sfc_flow_validate(struct rte_eth_dev *dev,
1979                   const struct rte_flow_attr *attr,
1980                   const struct rte_flow_item pattern[],
1981                   const struct rte_flow_action actions[],
1982                   struct rte_flow_error *error)
1983 {
1984         struct rte_flow flow;
1985
1986         memset(&flow, 0, sizeof(flow));
1987
1988         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1989 }
1990
1991 static struct rte_flow *
1992 sfc_flow_create(struct rte_eth_dev *dev,
1993                 const struct rte_flow_attr *attr,
1994                 const struct rte_flow_item pattern[],
1995                 const struct rte_flow_action actions[],
1996                 struct rte_flow_error *error)
1997 {
1998         struct sfc_adapter *sa = dev->data->dev_private;
1999         struct rte_flow *flow = NULL;
2000         int rc;
2001
2002         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2003         if (flow == NULL) {
2004                 rte_flow_error_set(error, ENOMEM,
2005                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2006                                    "Failed to allocate memory");
2007                 goto fail_no_mem;
2008         }
2009
2010         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2011         if (rc != 0)
2012                 goto fail_bad_value;
2013
2014         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2015
2016         sfc_adapter_lock(sa);
2017
2018         if (sa->state == SFC_ADAPTER_STARTED) {
2019                 rc = sfc_flow_filter_insert(sa, flow);
2020                 if (rc != 0) {
2021                         rte_flow_error_set(error, rc,
2022                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2023                                 "Failed to insert filter");
2024                         goto fail_filter_insert;
2025                 }
2026         }
2027
2028         sfc_adapter_unlock(sa);
2029
2030         return flow;
2031
2032 fail_filter_insert:
2033         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2034
2035 fail_bad_value:
2036         rte_free(flow);
2037         sfc_adapter_unlock(sa);
2038
2039 fail_no_mem:
2040         return NULL;
2041 }
2042
2043 static int
2044 sfc_flow_remove(struct sfc_adapter *sa,
2045                 struct rte_flow *flow,
2046                 struct rte_flow_error *error)
2047 {
2048         int rc = 0;
2049
2050         SFC_ASSERT(sfc_adapter_is_locked(sa));
2051
2052         if (sa->state == SFC_ADAPTER_STARTED) {
2053                 rc = sfc_flow_filter_remove(sa, flow);
2054                 if (rc != 0)
2055                         rte_flow_error_set(error, rc,
2056                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2057                                 "Failed to destroy flow rule");
2058         }
2059
2060         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2061         rte_free(flow);
2062
2063         return rc;
2064 }
2065
2066 static int
2067 sfc_flow_destroy(struct rte_eth_dev *dev,
2068                  struct rte_flow *flow,
2069                  struct rte_flow_error *error)
2070 {
2071         struct sfc_adapter *sa = dev->data->dev_private;
2072         struct rte_flow *flow_ptr;
2073         int rc = EINVAL;
2074
2075         sfc_adapter_lock(sa);
2076
2077         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2078                 if (flow_ptr == flow)
2079                         rc = 0;
2080         }
2081         if (rc != 0) {
2082                 rte_flow_error_set(error, rc,
2083                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2084                                    "Failed to find flow rule to destroy");
2085                 goto fail_bad_value;
2086         }
2087
2088         rc = sfc_flow_remove(sa, flow, error);
2089
2090 fail_bad_value:
2091         sfc_adapter_unlock(sa);
2092
2093         return -rc;
2094 }
2095
2096 static int
2097 sfc_flow_flush(struct rte_eth_dev *dev,
2098                struct rte_flow_error *error)
2099 {
2100         struct sfc_adapter *sa = dev->data->dev_private;
2101         struct rte_flow *flow;
2102         int rc = 0;
2103         int ret = 0;
2104
2105         sfc_adapter_lock(sa);
2106
2107         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2108                 rc = sfc_flow_remove(sa, flow, error);
2109                 if (rc != 0)
2110                         ret = rc;
2111         }
2112
2113         sfc_adapter_unlock(sa);
2114
2115         return -ret;
2116 }
2117
2118 static int
2119 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2120                  struct rte_flow_error *error)
2121 {
2122         struct sfc_adapter *sa = dev->data->dev_private;
2123         struct sfc_port *port = &sa->port;
2124         int ret = 0;
2125
2126         sfc_adapter_lock(sa);
2127         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2128                 rte_flow_error_set(error, EBUSY,
2129                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2130                                    NULL, "please close the port first");
2131                 ret = -rte_errno;
2132         } else {
2133                 port->isolated = (enable) ? B_TRUE : B_FALSE;
2134         }
2135         sfc_adapter_unlock(sa);
2136
2137         return ret;
2138 }
2139
2140 const struct rte_flow_ops sfc_flow_ops = {
2141         .validate = sfc_flow_validate,
2142         .create = sfc_flow_create,
2143         .destroy = sfc_flow_destroy,
2144         .flush = sfc_flow_flush,
2145         .query = NULL,
2146         .isolate = sfc_flow_isolate,
2147 };
2148
2149 void
2150 sfc_flow_init(struct sfc_adapter *sa)
2151 {
2152         SFC_ASSERT(sfc_adapter_is_locked(sa));
2153
2154         TAILQ_INIT(&sa->filter.flow_list);
2155 }
2156
2157 void
2158 sfc_flow_fini(struct sfc_adapter *sa)
2159 {
2160         struct rte_flow *flow;
2161
2162         SFC_ASSERT(sfc_adapter_is_locked(sa));
2163
2164         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2165                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2166                 rte_free(flow);
2167         }
2168 }
2169
2170 void
2171 sfc_flow_stop(struct sfc_adapter *sa)
2172 {
2173         struct rte_flow *flow;
2174
2175         SFC_ASSERT(sfc_adapter_is_locked(sa));
2176
2177         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2178                 sfc_flow_filter_remove(sa, flow);
2179 }
2180
2181 int
2182 sfc_flow_start(struct sfc_adapter *sa)
2183 {
2184         struct rte_flow *flow;
2185         int rc = 0;
2186
2187         sfc_log_init(sa, "entry");
2188
2189         SFC_ASSERT(sfc_adapter_is_locked(sa));
2190
2191         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2192                 rc = sfc_flow_filter_insert(sa, flow);
2193                 if (rc != 0)
2194                         goto fail_bad_flow;
2195         }
2196
2197         sfc_log_init(sa, "done");
2198
2199 fail_bad_flow:
2200         return rc;
2201 }