net/sfc: support DROP action in flow API
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25
26 /*
27  * At now flow API is implemented in such a manner that each
28  * flow rule is converted to one or more hardware filters.
29  * All elements of flow rule (attributes, pattern items, actions)
30  * correspond to one or more fields in the efx_filter_spec_s structure
31  * that is responsible for the hardware filter.
32  * If some required field is unset in the flow rule, then a handful
33  * of filter copies will be created to cover all possible values
34  * of such a field.
35  */
36
37 enum sfc_flow_item_layers {
38         SFC_FLOW_ITEM_ANY_LAYER,
39         SFC_FLOW_ITEM_START_LAYER,
40         SFC_FLOW_ITEM_L2,
41         SFC_FLOW_ITEM_L3,
42         SFC_FLOW_ITEM_L4,
43 };
44
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46                                   efx_filter_spec_t *spec,
47                                   struct rte_flow_error *error);
48
49 struct sfc_flow_item {
50         enum rte_flow_item_type type;           /* Type of item */
51         enum sfc_flow_item_layers layer;        /* Layer of item */
52         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
53         sfc_flow_item_parse *parse;             /* Parsing function */
54 };
55
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
66
67 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
68                                      unsigned int filters_count_for_one_val,
69                                      struct rte_flow_error *error);
70
71 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
72                                         efx_filter_spec_t *spec,
73                                         struct sfc_filter *filter);
74
75 struct sfc_flow_copy_flag {
76         /* EFX filter specification match flag */
77         efx_filter_match_flags_t flag;
78         /* Number of values of corresponding field */
79         unsigned int vals_count;
80         /* Function to set values in specifications */
81         sfc_flow_spec_set_vals *set_vals;
82         /*
83          * Function to check that the specification is suitable
84          * for adding this match flag
85          */
86         sfc_flow_spec_check *spec_check;
87 };
88
89 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
90 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
91 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
92 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
93 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
94
95 static boolean_t
96 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
97 {
98         uint8_t sum = 0;
99         unsigned int i;
100
101         for (i = 0; i < size; i++)
102                 sum |= buf[i];
103
104         return (sum == 0) ? B_TRUE : B_FALSE;
105 }
106
107 /*
108  * Validate item and prepare structures spec and mask for parsing
109  */
110 static int
111 sfc_flow_parse_init(const struct rte_flow_item *item,
112                     const void **spec_ptr,
113                     const void **mask_ptr,
114                     const void *supp_mask,
115                     const void *def_mask,
116                     unsigned int size,
117                     struct rte_flow_error *error)
118 {
119         const uint8_t *spec;
120         const uint8_t *mask;
121         const uint8_t *last;
122         uint8_t match;
123         uint8_t supp;
124         unsigned int i;
125
126         if (item == NULL) {
127                 rte_flow_error_set(error, EINVAL,
128                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
129                                    "NULL item");
130                 return -rte_errno;
131         }
132
133         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
134                 rte_flow_error_set(error, EINVAL,
135                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
136                                    "Mask or last is set without spec");
137                 return -rte_errno;
138         }
139
140         /*
141          * If "mask" is not set, default mask is used,
142          * but if default mask is NULL, "mask" should be set
143          */
144         if (item->mask == NULL) {
145                 if (def_mask == NULL) {
146                         rte_flow_error_set(error, EINVAL,
147                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
148                                 "Mask should be specified");
149                         return -rte_errno;
150                 }
151
152                 mask = def_mask;
153         } else {
154                 mask = item->mask;
155         }
156
157         spec = item->spec;
158         last = item->last;
159
160         if (spec == NULL)
161                 goto exit;
162
163         /*
164          * If field values in "last" are either 0 or equal to the corresponding
165          * values in "spec" then they are ignored
166          */
167         if (last != NULL &&
168             !sfc_flow_is_zero(last, size) &&
169             memcmp(last, spec, size) != 0) {
170                 rte_flow_error_set(error, ENOTSUP,
171                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
172                                    "Ranging is not supported");
173                 return -rte_errno;
174         }
175
176         if (supp_mask == NULL) {
177                 rte_flow_error_set(error, EINVAL,
178                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
179                         "Supported mask for item should be specified");
180                 return -rte_errno;
181         }
182
183         /* Check that mask and spec not asks for more match than supp_mask */
184         for (i = 0; i < size; i++) {
185                 match = spec[i] | mask[i];
186                 supp = ((const uint8_t *)supp_mask)[i];
187
188                 if ((match | supp) != supp) {
189                         rte_flow_error_set(error, ENOTSUP,
190                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
191                                            "Item's field is not supported");
192                         return -rte_errno;
193                 }
194         }
195
196 exit:
197         *spec_ptr = spec;
198         *mask_ptr = mask;
199         return 0;
200 }
201
202 /*
203  * Protocol parsers.
204  * Masking is not supported, so masks in items should be either
205  * full or empty (zeroed) and set only for supported fields which
206  * are specified in the supp_mask.
207  */
208
209 static int
210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
211                     __rte_unused efx_filter_spec_t *efx_spec,
212                     __rte_unused struct rte_flow_error *error)
213 {
214         return 0;
215 }
216
217 /**
218  * Convert Ethernet item to EFX filter specification.
219  *
220  * @param item[in]
221  *   Item specification. Outer frame specification may only comprise
222  *   source/destination addresses and Ethertype field.
223  *   Inner frame specification may contain destination address only.
224  *   There is support for individual/group mask as well as for empty and full.
225  *   If the mask is NULL, default mask will be used. Ranging is not supported.
226  * @param efx_spec[in, out]
227  *   EFX filter specification to update.
228  * @param[out] error
229  *   Perform verbose error reporting if not NULL.
230  */
231 static int
232 sfc_flow_parse_eth(const struct rte_flow_item *item,
233                    efx_filter_spec_t *efx_spec,
234                    struct rte_flow_error *error)
235 {
236         int rc;
237         const struct rte_flow_item_eth *spec = NULL;
238         const struct rte_flow_item_eth *mask = NULL;
239         const struct rte_flow_item_eth supp_mask = {
240                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
241                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242                 .type = 0xffff,
243         };
244         const struct rte_flow_item_eth ifrm_supp_mask = {
245                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
246         };
247         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
248                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
249         };
250         const struct rte_flow_item_eth *supp_mask_p;
251         const struct rte_flow_item_eth *def_mask_p;
252         uint8_t *loc_mac = NULL;
253         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
254                 EFX_TUNNEL_PROTOCOL_NONE);
255
256         if (is_ifrm) {
257                 supp_mask_p = &ifrm_supp_mask;
258                 def_mask_p = &ifrm_supp_mask;
259                 loc_mac = efx_spec->efs_ifrm_loc_mac;
260         } else {
261                 supp_mask_p = &supp_mask;
262                 def_mask_p = &rte_flow_item_eth_mask;
263                 loc_mac = efx_spec->efs_loc_mac;
264         }
265
266         rc = sfc_flow_parse_init(item,
267                                  (const void **)&spec,
268                                  (const void **)&mask,
269                                  supp_mask_p, def_mask_p,
270                                  sizeof(struct rte_flow_item_eth),
271                                  error);
272         if (rc != 0)
273                 return rc;
274
275         /* If "spec" is not set, could be any Ethernet */
276         if (spec == NULL)
277                 return 0;
278
279         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
280                 efx_spec->efs_match_flags |= is_ifrm ?
281                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
282                         EFX_FILTER_MATCH_LOC_MAC;
283                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
284                            EFX_MAC_ADDR_LEN);
285         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
286                           EFX_MAC_ADDR_LEN) == 0) {
287                 if (is_unicast_ether_addr(&spec->dst))
288                         efx_spec->efs_match_flags |= is_ifrm ?
289                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
290                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
291                 else
292                         efx_spec->efs_match_flags |= is_ifrm ?
293                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
294                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
295         } else if (!is_zero_ether_addr(&mask->dst)) {
296                 goto fail_bad_mask;
297         }
298
299         /*
300          * ifrm_supp_mask ensures that the source address and
301          * ethertype masks are equal to zero in inner frame,
302          * so these fields are filled in only for the outer frame
303          */
304         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
305                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
306                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
307                            EFX_MAC_ADDR_LEN);
308         } else if (!is_zero_ether_addr(&mask->src)) {
309                 goto fail_bad_mask;
310         }
311
312         /*
313          * Ether type is in big-endian byte order in item and
314          * in little-endian in efx_spec, so byte swap is used
315          */
316         if (mask->type == supp_mask.type) {
317                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
318                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
319         } else if (mask->type != 0) {
320                 goto fail_bad_mask;
321         }
322
323         return 0;
324
325 fail_bad_mask:
326         rte_flow_error_set(error, EINVAL,
327                            RTE_FLOW_ERROR_TYPE_ITEM, item,
328                            "Bad mask in the ETH pattern item");
329         return -rte_errno;
330 }
331
332 /**
333  * Convert VLAN item to EFX filter specification.
334  *
335  * @param item[in]
336  *   Item specification. Only VID field is supported.
337  *   The mask can not be NULL. Ranging is not supported.
338  * @param efx_spec[in, out]
339  *   EFX filter specification to update.
340  * @param[out] error
341  *   Perform verbose error reporting if not NULL.
342  */
343 static int
344 sfc_flow_parse_vlan(const struct rte_flow_item *item,
345                     efx_filter_spec_t *efx_spec,
346                     struct rte_flow_error *error)
347 {
348         int rc;
349         uint16_t vid;
350         const struct rte_flow_item_vlan *spec = NULL;
351         const struct rte_flow_item_vlan *mask = NULL;
352         const struct rte_flow_item_vlan supp_mask = {
353                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
354         };
355
356         rc = sfc_flow_parse_init(item,
357                                  (const void **)&spec,
358                                  (const void **)&mask,
359                                  &supp_mask,
360                                  NULL,
361                                  sizeof(struct rte_flow_item_vlan),
362                                  error);
363         if (rc != 0)
364                 return rc;
365
366         /*
367          * VID is in big-endian byte order in item and
368          * in little-endian in efx_spec, so byte swap is used.
369          * If two VLAN items are included, the first matches
370          * the outer tag and the next matches the inner tag.
371          */
372         if (mask->tci == supp_mask.tci) {
373                 vid = rte_bswap16(spec->tci);
374
375                 if (!(efx_spec->efs_match_flags &
376                       EFX_FILTER_MATCH_OUTER_VID)) {
377                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
378                         efx_spec->efs_outer_vid = vid;
379                 } else if (!(efx_spec->efs_match_flags &
380                              EFX_FILTER_MATCH_INNER_VID)) {
381                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
382                         efx_spec->efs_inner_vid = vid;
383                 } else {
384                         rte_flow_error_set(error, EINVAL,
385                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
386                                            "More than two VLAN items");
387                         return -rte_errno;
388                 }
389         } else {
390                 rte_flow_error_set(error, EINVAL,
391                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
392                                    "VLAN ID in TCI match is required");
393                 return -rte_errno;
394         }
395
396         return 0;
397 }
398
399 /**
400  * Convert IPv4 item to EFX filter specification.
401  *
402  * @param item[in]
403  *   Item specification. Only source and destination addresses and
404  *   protocol fields are supported. If the mask is NULL, default
405  *   mask will be used. Ranging is not supported.
406  * @param efx_spec[in, out]
407  *   EFX filter specification to update.
408  * @param[out] error
409  *   Perform verbose error reporting if not NULL.
410  */
411 static int
412 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
413                     efx_filter_spec_t *efx_spec,
414                     struct rte_flow_error *error)
415 {
416         int rc;
417         const struct rte_flow_item_ipv4 *spec = NULL;
418         const struct rte_flow_item_ipv4 *mask = NULL;
419         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
420         const struct rte_flow_item_ipv4 supp_mask = {
421                 .hdr = {
422                         .src_addr = 0xffffffff,
423                         .dst_addr = 0xffffffff,
424                         .next_proto_id = 0xff,
425                 }
426         };
427
428         rc = sfc_flow_parse_init(item,
429                                  (const void **)&spec,
430                                  (const void **)&mask,
431                                  &supp_mask,
432                                  &rte_flow_item_ipv4_mask,
433                                  sizeof(struct rte_flow_item_ipv4),
434                                  error);
435         if (rc != 0)
436                 return rc;
437
438         /*
439          * Filtering by IPv4 source and destination addresses requires
440          * the appropriate ETHER_TYPE in hardware filters
441          */
442         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
443                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
444                 efx_spec->efs_ether_type = ether_type_ipv4;
445         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
446                 rte_flow_error_set(error, EINVAL,
447                         RTE_FLOW_ERROR_TYPE_ITEM, item,
448                         "Ethertype in pattern with IPV4 item should be appropriate");
449                 return -rte_errno;
450         }
451
452         if (spec == NULL)
453                 return 0;
454
455         /*
456          * IPv4 addresses are in big-endian byte order in item and in
457          * efx_spec
458          */
459         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
460                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
461                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
462         } else if (mask->hdr.src_addr != 0) {
463                 goto fail_bad_mask;
464         }
465
466         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
467                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
468                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
469         } else if (mask->hdr.dst_addr != 0) {
470                 goto fail_bad_mask;
471         }
472
473         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
474                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
475                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
476         } else if (mask->hdr.next_proto_id != 0) {
477                 goto fail_bad_mask;
478         }
479
480         return 0;
481
482 fail_bad_mask:
483         rte_flow_error_set(error, EINVAL,
484                            RTE_FLOW_ERROR_TYPE_ITEM, item,
485                            "Bad mask in the IPV4 pattern item");
486         return -rte_errno;
487 }
488
489 /**
490  * Convert IPv6 item to EFX filter specification.
491  *
492  * @param item[in]
493  *   Item specification. Only source and destination addresses and
494  *   next header fields are supported. If the mask is NULL, default
495  *   mask will be used. Ranging is not supported.
496  * @param efx_spec[in, out]
497  *   EFX filter specification to update.
498  * @param[out] error
499  *   Perform verbose error reporting if not NULL.
500  */
501 static int
502 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
503                     efx_filter_spec_t *efx_spec,
504                     struct rte_flow_error *error)
505 {
506         int rc;
507         const struct rte_flow_item_ipv6 *spec = NULL;
508         const struct rte_flow_item_ipv6 *mask = NULL;
509         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
510         const struct rte_flow_item_ipv6 supp_mask = {
511                 .hdr = {
512                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
513                                       0xff, 0xff, 0xff, 0xff,
514                                       0xff, 0xff, 0xff, 0xff,
515                                       0xff, 0xff, 0xff, 0xff },
516                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
517                                       0xff, 0xff, 0xff, 0xff,
518                                       0xff, 0xff, 0xff, 0xff,
519                                       0xff, 0xff, 0xff, 0xff },
520                         .proto = 0xff,
521                 }
522         };
523
524         rc = sfc_flow_parse_init(item,
525                                  (const void **)&spec,
526                                  (const void **)&mask,
527                                  &supp_mask,
528                                  &rte_flow_item_ipv6_mask,
529                                  sizeof(struct rte_flow_item_ipv6),
530                                  error);
531         if (rc != 0)
532                 return rc;
533
534         /*
535          * Filtering by IPv6 source and destination addresses requires
536          * the appropriate ETHER_TYPE in hardware filters
537          */
538         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
539                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
540                 efx_spec->efs_ether_type = ether_type_ipv6;
541         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_ITEM, item,
544                         "Ethertype in pattern with IPV6 item should be appropriate");
545                 return -rte_errno;
546         }
547
548         if (spec == NULL)
549                 return 0;
550
551         /*
552          * IPv6 addresses are in big-endian byte order in item and in
553          * efx_spec
554          */
555         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
556                    sizeof(mask->hdr.src_addr)) == 0) {
557                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
558
559                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
560                                  sizeof(spec->hdr.src_addr));
561                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
562                            sizeof(efx_spec->efs_rem_host));
563         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
564                                      sizeof(mask->hdr.src_addr))) {
565                 goto fail_bad_mask;
566         }
567
568         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
569                    sizeof(mask->hdr.dst_addr)) == 0) {
570                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
571
572                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
573                                  sizeof(spec->hdr.dst_addr));
574                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
575                            sizeof(efx_spec->efs_loc_host));
576         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
577                                      sizeof(mask->hdr.dst_addr))) {
578                 goto fail_bad_mask;
579         }
580
581         if (mask->hdr.proto == supp_mask.hdr.proto) {
582                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
583                 efx_spec->efs_ip_proto = spec->hdr.proto;
584         } else if (mask->hdr.proto != 0) {
585                 goto fail_bad_mask;
586         }
587
588         return 0;
589
590 fail_bad_mask:
591         rte_flow_error_set(error, EINVAL,
592                            RTE_FLOW_ERROR_TYPE_ITEM, item,
593                            "Bad mask in the IPV6 pattern item");
594         return -rte_errno;
595 }
596
597 /**
598  * Convert TCP item to EFX filter specification.
599  *
600  * @param item[in]
601  *   Item specification. Only source and destination ports fields
602  *   are supported. If the mask is NULL, default mask will be used.
603  *   Ranging is not supported.
604  * @param efx_spec[in, out]
605  *   EFX filter specification to update.
606  * @param[out] error
607  *   Perform verbose error reporting if not NULL.
608  */
609 static int
610 sfc_flow_parse_tcp(const struct rte_flow_item *item,
611                    efx_filter_spec_t *efx_spec,
612                    struct rte_flow_error *error)
613 {
614         int rc;
615         const struct rte_flow_item_tcp *spec = NULL;
616         const struct rte_flow_item_tcp *mask = NULL;
617         const struct rte_flow_item_tcp supp_mask = {
618                 .hdr = {
619                         .src_port = 0xffff,
620                         .dst_port = 0xffff,
621                 }
622         };
623
624         rc = sfc_flow_parse_init(item,
625                                  (const void **)&spec,
626                                  (const void **)&mask,
627                                  &supp_mask,
628                                  &rte_flow_item_tcp_mask,
629                                  sizeof(struct rte_flow_item_tcp),
630                                  error);
631         if (rc != 0)
632                 return rc;
633
634         /*
635          * Filtering by TCP source and destination ports requires
636          * the appropriate IP_PROTO in hardware filters
637          */
638         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
639                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
640                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
641         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
642                 rte_flow_error_set(error, EINVAL,
643                         RTE_FLOW_ERROR_TYPE_ITEM, item,
644                         "IP proto in pattern with TCP item should be appropriate");
645                 return -rte_errno;
646         }
647
648         if (spec == NULL)
649                 return 0;
650
651         /*
652          * Source and destination ports are in big-endian byte order in item and
653          * in little-endian in efx_spec, so byte swap is used
654          */
655         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
656                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
657                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
658         } else if (mask->hdr.src_port != 0) {
659                 goto fail_bad_mask;
660         }
661
662         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
663                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
664                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
665         } else if (mask->hdr.dst_port != 0) {
666                 goto fail_bad_mask;
667         }
668
669         return 0;
670
671 fail_bad_mask:
672         rte_flow_error_set(error, EINVAL,
673                            RTE_FLOW_ERROR_TYPE_ITEM, item,
674                            "Bad mask in the TCP pattern item");
675         return -rte_errno;
676 }
677
678 /**
679  * Convert UDP item to EFX filter specification.
680  *
681  * @param item[in]
682  *   Item specification. Only source and destination ports fields
683  *   are supported. If the mask is NULL, default mask will be used.
684  *   Ranging is not supported.
685  * @param efx_spec[in, out]
686  *   EFX filter specification to update.
687  * @param[out] error
688  *   Perform verbose error reporting if not NULL.
689  */
690 static int
691 sfc_flow_parse_udp(const struct rte_flow_item *item,
692                    efx_filter_spec_t *efx_spec,
693                    struct rte_flow_error *error)
694 {
695         int rc;
696         const struct rte_flow_item_udp *spec = NULL;
697         const struct rte_flow_item_udp *mask = NULL;
698         const struct rte_flow_item_udp supp_mask = {
699                 .hdr = {
700                         .src_port = 0xffff,
701                         .dst_port = 0xffff,
702                 }
703         };
704
705         rc = sfc_flow_parse_init(item,
706                                  (const void **)&spec,
707                                  (const void **)&mask,
708                                  &supp_mask,
709                                  &rte_flow_item_udp_mask,
710                                  sizeof(struct rte_flow_item_udp),
711                                  error);
712         if (rc != 0)
713                 return rc;
714
715         /*
716          * Filtering by UDP source and destination ports requires
717          * the appropriate IP_PROTO in hardware filters
718          */
719         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
720                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
721                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
722         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
723                 rte_flow_error_set(error, EINVAL,
724                         RTE_FLOW_ERROR_TYPE_ITEM, item,
725                         "IP proto in pattern with UDP item should be appropriate");
726                 return -rte_errno;
727         }
728
729         if (spec == NULL)
730                 return 0;
731
732         /*
733          * Source and destination ports are in big-endian byte order in item and
734          * in little-endian in efx_spec, so byte swap is used
735          */
736         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
737                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
738                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
739         } else if (mask->hdr.src_port != 0) {
740                 goto fail_bad_mask;
741         }
742
743         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
744                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
745                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
746         } else if (mask->hdr.dst_port != 0) {
747                 goto fail_bad_mask;
748         }
749
750         return 0;
751
752 fail_bad_mask:
753         rte_flow_error_set(error, EINVAL,
754                            RTE_FLOW_ERROR_TYPE_ITEM, item,
755                            "Bad mask in the UDP pattern item");
756         return -rte_errno;
757 }
758
759 /*
760  * Filters for encapsulated packets match based on the EtherType and IP
761  * protocol in the outer frame.
762  */
763 static int
764 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
765                                         efx_filter_spec_t *efx_spec,
766                                         uint8_t ip_proto,
767                                         struct rte_flow_error *error)
768 {
769         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
770                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
771                 efx_spec->efs_ip_proto = ip_proto;
772         } else if (efx_spec->efs_ip_proto != ip_proto) {
773                 switch (ip_proto) {
774                 case EFX_IPPROTO_UDP:
775                         rte_flow_error_set(error, EINVAL,
776                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
777                                 "Outer IP header protocol must be UDP "
778                                 "in VxLAN/GENEVE pattern");
779                         return -rte_errno;
780
781                 case EFX_IPPROTO_GRE:
782                         rte_flow_error_set(error, EINVAL,
783                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
784                                 "Outer IP header protocol must be GRE "
785                                 "in NVGRE pattern");
786                         return -rte_errno;
787
788                 default:
789                         rte_flow_error_set(error, EINVAL,
790                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
791                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
792                                 "are supported");
793                         return -rte_errno;
794                 }
795         }
796
797         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
798             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
799             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
800                 rte_flow_error_set(error, EINVAL,
801                         RTE_FLOW_ERROR_TYPE_ITEM, item,
802                         "Outer frame EtherType in pattern with tunneling "
803                         "must be IPv4 or IPv6");
804                 return -rte_errno;
805         }
806
807         return 0;
808 }
809
810 static int
811 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
812                                   const uint8_t *vni_or_vsid_val,
813                                   const uint8_t *vni_or_vsid_mask,
814                                   const struct rte_flow_item *item,
815                                   struct rte_flow_error *error)
816 {
817         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
818                 0xff, 0xff, 0xff
819         };
820
821         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
822                    EFX_VNI_OR_VSID_LEN) == 0) {
823                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
824                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
825                            EFX_VNI_OR_VSID_LEN);
826         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
827                 rte_flow_error_set(error, EINVAL,
828                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
829                                    "Unsupported VNI/VSID mask");
830                 return -rte_errno;
831         }
832
833         return 0;
834 }
835
836 /**
837  * Convert VXLAN item to EFX filter specification.
838  *
839  * @param item[in]
840  *   Item specification. Only VXLAN network identifier field is supported.
841  *   If the mask is NULL, default mask will be used.
842  *   Ranging is not supported.
843  * @param efx_spec[in, out]
844  *   EFX filter specification to update.
845  * @param[out] error
846  *   Perform verbose error reporting if not NULL.
847  */
848 static int
849 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
850                      efx_filter_spec_t *efx_spec,
851                      struct rte_flow_error *error)
852 {
853         int rc;
854         const struct rte_flow_item_vxlan *spec = NULL;
855         const struct rte_flow_item_vxlan *mask = NULL;
856         const struct rte_flow_item_vxlan supp_mask = {
857                 .vni = { 0xff, 0xff, 0xff }
858         };
859
860         rc = sfc_flow_parse_init(item,
861                                  (const void **)&spec,
862                                  (const void **)&mask,
863                                  &supp_mask,
864                                  &rte_flow_item_vxlan_mask,
865                                  sizeof(struct rte_flow_item_vxlan),
866                                  error);
867         if (rc != 0)
868                 return rc;
869
870         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
871                                                      EFX_IPPROTO_UDP, error);
872         if (rc != 0)
873                 return rc;
874
875         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
876         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
877
878         if (spec == NULL)
879                 return 0;
880
881         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
882                                                mask->vni, item, error);
883
884         return rc;
885 }
886
887 /**
888  * Convert GENEVE item to EFX filter specification.
889  *
890  * @param item[in]
891  *   Item specification. Only Virtual Network Identifier and protocol type
892  *   fields are supported. But protocol type can be only Ethernet (0x6558).
893  *   If the mask is NULL, default mask will be used.
894  *   Ranging is not supported.
895  * @param efx_spec[in, out]
896  *   EFX filter specification to update.
897  * @param[out] error
898  *   Perform verbose error reporting if not NULL.
899  */
900 static int
901 sfc_flow_parse_geneve(const struct rte_flow_item *item,
902                       efx_filter_spec_t *efx_spec,
903                       struct rte_flow_error *error)
904 {
905         int rc;
906         const struct rte_flow_item_geneve *spec = NULL;
907         const struct rte_flow_item_geneve *mask = NULL;
908         const struct rte_flow_item_geneve supp_mask = {
909                 .protocol = RTE_BE16(0xffff),
910                 .vni = { 0xff, 0xff, 0xff }
911         };
912
913         rc = sfc_flow_parse_init(item,
914                                  (const void **)&spec,
915                                  (const void **)&mask,
916                                  &supp_mask,
917                                  &rte_flow_item_geneve_mask,
918                                  sizeof(struct rte_flow_item_geneve),
919                                  error);
920         if (rc != 0)
921                 return rc;
922
923         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
924                                                      EFX_IPPROTO_UDP, error);
925         if (rc != 0)
926                 return rc;
927
928         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
929         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
930
931         if (spec == NULL)
932                 return 0;
933
934         if (mask->protocol == supp_mask.protocol) {
935                 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
936                         rte_flow_error_set(error, EINVAL,
937                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
938                                 "GENEVE encap. protocol must be Ethernet "
939                                 "(0x6558) in the GENEVE pattern item");
940                         return -rte_errno;
941                 }
942         } else if (mask->protocol != 0) {
943                 rte_flow_error_set(error, EINVAL,
944                         RTE_FLOW_ERROR_TYPE_ITEM, item,
945                         "Unsupported mask for GENEVE encap. protocol");
946                 return -rte_errno;
947         }
948
949         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
950                                                mask->vni, item, error);
951
952         return rc;
953 }
954
955 /**
956  * Convert NVGRE item to EFX filter specification.
957  *
958  * @param item[in]
959  *   Item specification. Only virtual subnet ID field is supported.
960  *   If the mask is NULL, default mask will be used.
961  *   Ranging is not supported.
962  * @param efx_spec[in, out]
963  *   EFX filter specification to update.
964  * @param[out] error
965  *   Perform verbose error reporting if not NULL.
966  */
967 static int
968 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
969                      efx_filter_spec_t *efx_spec,
970                      struct rte_flow_error *error)
971 {
972         int rc;
973         const struct rte_flow_item_nvgre *spec = NULL;
974         const struct rte_flow_item_nvgre *mask = NULL;
975         const struct rte_flow_item_nvgre supp_mask = {
976                 .tni = { 0xff, 0xff, 0xff }
977         };
978
979         rc = sfc_flow_parse_init(item,
980                                  (const void **)&spec,
981                                  (const void **)&mask,
982                                  &supp_mask,
983                                  &rte_flow_item_nvgre_mask,
984                                  sizeof(struct rte_flow_item_nvgre),
985                                  error);
986         if (rc != 0)
987                 return rc;
988
989         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
990                                                      EFX_IPPROTO_GRE, error);
991         if (rc != 0)
992                 return rc;
993
994         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
995         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
996
997         if (spec == NULL)
998                 return 0;
999
1000         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1001                                                mask->tni, item, error);
1002
1003         return rc;
1004 }
1005
1006 static const struct sfc_flow_item sfc_flow_items[] = {
1007         {
1008                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1009                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1010                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1011                 .parse = sfc_flow_parse_void,
1012         },
1013         {
1014                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1015                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1016                 .layer = SFC_FLOW_ITEM_L2,
1017                 .parse = sfc_flow_parse_eth,
1018         },
1019         {
1020                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1021                 .prev_layer = SFC_FLOW_ITEM_L2,
1022                 .layer = SFC_FLOW_ITEM_L2,
1023                 .parse = sfc_flow_parse_vlan,
1024         },
1025         {
1026                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1027                 .prev_layer = SFC_FLOW_ITEM_L2,
1028                 .layer = SFC_FLOW_ITEM_L3,
1029                 .parse = sfc_flow_parse_ipv4,
1030         },
1031         {
1032                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1033                 .prev_layer = SFC_FLOW_ITEM_L2,
1034                 .layer = SFC_FLOW_ITEM_L3,
1035                 .parse = sfc_flow_parse_ipv6,
1036         },
1037         {
1038                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1039                 .prev_layer = SFC_FLOW_ITEM_L3,
1040                 .layer = SFC_FLOW_ITEM_L4,
1041                 .parse = sfc_flow_parse_tcp,
1042         },
1043         {
1044                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1045                 .prev_layer = SFC_FLOW_ITEM_L3,
1046                 .layer = SFC_FLOW_ITEM_L4,
1047                 .parse = sfc_flow_parse_udp,
1048         },
1049         {
1050                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1051                 .prev_layer = SFC_FLOW_ITEM_L4,
1052                 .layer = SFC_FLOW_ITEM_START_LAYER,
1053                 .parse = sfc_flow_parse_vxlan,
1054         },
1055         {
1056                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1057                 .prev_layer = SFC_FLOW_ITEM_L4,
1058                 .layer = SFC_FLOW_ITEM_START_LAYER,
1059                 .parse = sfc_flow_parse_geneve,
1060         },
1061         {
1062                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1063                 .prev_layer = SFC_FLOW_ITEM_L3,
1064                 .layer = SFC_FLOW_ITEM_START_LAYER,
1065                 .parse = sfc_flow_parse_nvgre,
1066         },
1067 };
1068
1069 /*
1070  * Protocol-independent flow API support
1071  */
1072 static int
1073 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1074                     struct rte_flow *flow,
1075                     struct rte_flow_error *error)
1076 {
1077         if (attr == NULL) {
1078                 rte_flow_error_set(error, EINVAL,
1079                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1080                                    "NULL attribute");
1081                 return -rte_errno;
1082         }
1083         if (attr->group != 0) {
1084                 rte_flow_error_set(error, ENOTSUP,
1085                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1086                                    "Groups are not supported");
1087                 return -rte_errno;
1088         }
1089         if (attr->priority != 0) {
1090                 rte_flow_error_set(error, ENOTSUP,
1091                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1092                                    "Priorities are not supported");
1093                 return -rte_errno;
1094         }
1095         if (attr->egress != 0) {
1096                 rte_flow_error_set(error, ENOTSUP,
1097                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1098                                    "Egress is not supported");
1099                 return -rte_errno;
1100         }
1101         if (attr->ingress == 0) {
1102                 rte_flow_error_set(error, ENOTSUP,
1103                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1104                                    "Only ingress is supported");
1105                 return -rte_errno;
1106         }
1107
1108         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1109         flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1110
1111         return 0;
1112 }
1113
1114 /* Get item from array sfc_flow_items */
1115 static const struct sfc_flow_item *
1116 sfc_flow_get_item(enum rte_flow_item_type type)
1117 {
1118         unsigned int i;
1119
1120         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1121                 if (sfc_flow_items[i].type == type)
1122                         return &sfc_flow_items[i];
1123
1124         return NULL;
1125 }
1126
1127 static int
1128 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1129                        struct rte_flow *flow,
1130                        struct rte_flow_error *error)
1131 {
1132         int rc;
1133         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1134         boolean_t is_ifrm = B_FALSE;
1135         const struct sfc_flow_item *item;
1136
1137         if (pattern == NULL) {
1138                 rte_flow_error_set(error, EINVAL,
1139                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1140                                    "NULL pattern");
1141                 return -rte_errno;
1142         }
1143
1144         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1145                 item = sfc_flow_get_item(pattern->type);
1146                 if (item == NULL) {
1147                         rte_flow_error_set(error, ENOTSUP,
1148                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1149                                            "Unsupported pattern item");
1150                         return -rte_errno;
1151                 }
1152
1153                 /*
1154                  * Omitting one or several protocol layers at the beginning
1155                  * of pattern is supported
1156                  */
1157                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1158                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1159                     item->prev_layer != prev_layer) {
1160                         rte_flow_error_set(error, ENOTSUP,
1161                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1162                                            "Unexpected sequence of pattern items");
1163                         return -rte_errno;
1164                 }
1165
1166                 /*
1167                  * Allow only VOID and ETH pattern items in the inner frame.
1168                  * Also check that there is only one tunneling protocol.
1169                  */
1170                 switch (item->type) {
1171                 case RTE_FLOW_ITEM_TYPE_VOID:
1172                 case RTE_FLOW_ITEM_TYPE_ETH:
1173                         break;
1174
1175                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1176                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1177                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1178                         if (is_ifrm) {
1179                                 rte_flow_error_set(error, EINVAL,
1180                                         RTE_FLOW_ERROR_TYPE_ITEM,
1181                                         pattern,
1182                                         "More than one tunneling protocol");
1183                                 return -rte_errno;
1184                         }
1185                         is_ifrm = B_TRUE;
1186                         break;
1187
1188                 default:
1189                         if (is_ifrm) {
1190                                 rte_flow_error_set(error, EINVAL,
1191                                         RTE_FLOW_ERROR_TYPE_ITEM,
1192                                         pattern,
1193                                         "There is an unsupported pattern item "
1194                                         "in the inner frame");
1195                                 return -rte_errno;
1196                         }
1197                         break;
1198                 }
1199
1200                 rc = item->parse(pattern, &flow->spec.template, error);
1201                 if (rc != 0)
1202                         return rc;
1203
1204                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1205                         prev_layer = item->layer;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int
1212 sfc_flow_parse_queue(struct sfc_adapter *sa,
1213                      const struct rte_flow_action_queue *queue,
1214                      struct rte_flow *flow)
1215 {
1216         struct sfc_rxq *rxq;
1217
1218         if (queue->index >= sa->rxq_count)
1219                 return -EINVAL;
1220
1221         rxq = sa->rxq_info[queue->index].rxq;
1222         flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1223
1224         return 0;
1225 }
1226
1227 #if EFSYS_OPT_RX_SCALE
1228 static int
1229 sfc_flow_parse_rss(struct sfc_adapter *sa,
1230                    const struct rte_flow_action_rss *rss,
1231                    struct rte_flow *flow)
1232 {
1233         unsigned int rxq_sw_index;
1234         struct sfc_rxq *rxq;
1235         unsigned int rxq_hw_index_min;
1236         unsigned int rxq_hw_index_max;
1237         const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1238         uint64_t rss_hf;
1239         uint8_t *rss_key = NULL;
1240         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1241         unsigned int i;
1242
1243         if (rss->num == 0)
1244                 return -EINVAL;
1245
1246         rxq_sw_index = sa->rxq_count - 1;
1247         rxq = sa->rxq_info[rxq_sw_index].rxq;
1248         rxq_hw_index_min = rxq->hw_index;
1249         rxq_hw_index_max = 0;
1250
1251         for (i = 0; i < rss->num; ++i) {
1252                 rxq_sw_index = rss->queue[i];
1253
1254                 if (rxq_sw_index >= sa->rxq_count)
1255                         return -EINVAL;
1256
1257                 rxq = sa->rxq_info[rxq_sw_index].rxq;
1258
1259                 if (rxq->hw_index < rxq_hw_index_min)
1260                         rxq_hw_index_min = rxq->hw_index;
1261
1262                 if (rxq->hw_index > rxq_hw_index_max)
1263                         rxq_hw_index_max = rxq->hw_index;
1264         }
1265
1266         rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1267         if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1268                 return -EINVAL;
1269
1270         if (rss_conf != NULL) {
1271                 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1272                         return -EINVAL;
1273
1274                 rss_key = rss_conf->rss_key;
1275         } else {
1276                 rss_key = sa->rss_key;
1277         }
1278
1279         flow->rss = B_TRUE;
1280
1281         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1282         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1283         sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1284         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1285
1286         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1287                 unsigned int rxq_sw_index = rss->queue[i % rss->num];
1288                 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1289
1290                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1291         }
1292
1293         return 0;
1294 }
1295 #endif /* EFSYS_OPT_RX_SCALE */
1296
1297 static int
1298 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1299                     unsigned int filters_count)
1300 {
1301         unsigned int i;
1302         int ret = 0;
1303
1304         for (i = 0; i < filters_count; i++) {
1305                 int rc;
1306
1307                 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1308                 if (ret == 0 && rc != 0) {
1309                         sfc_err(sa, "failed to remove filter specification "
1310                                 "(rc = %d)", rc);
1311                         ret = rc;
1312                 }
1313         }
1314
1315         return ret;
1316 }
1317
1318 static int
1319 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1320 {
1321         unsigned int i;
1322         int rc = 0;
1323
1324         for (i = 0; i < spec->count; i++) {
1325                 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1326                 if (rc != 0) {
1327                         sfc_flow_spec_flush(sa, spec, i);
1328                         break;
1329                 }
1330         }
1331
1332         return rc;
1333 }
1334
1335 static int
1336 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1337 {
1338         return sfc_flow_spec_flush(sa, spec, spec->count);
1339 }
1340
1341 static int
1342 sfc_flow_filter_insert(struct sfc_adapter *sa,
1343                        struct rte_flow *flow)
1344 {
1345 #if EFSYS_OPT_RX_SCALE
1346         struct sfc_flow_rss *rss = &flow->rss_conf;
1347         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1348         unsigned int i;
1349         int rc = 0;
1350
1351         if (flow->rss) {
1352                 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1353                                               rss->rxq_hw_index_min + 1,
1354                                               EFX_MAXRSS);
1355
1356                 rc = efx_rx_scale_context_alloc(sa->nic,
1357                                                 EFX_RX_SCALE_EXCLUSIVE,
1358                                                 rss_spread,
1359                                                 &efs_rss_context);
1360                 if (rc != 0)
1361                         goto fail_scale_context_alloc;
1362
1363                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1364                                            EFX_RX_HASHALG_TOEPLITZ,
1365                                            rss->rss_hash_types, B_TRUE);
1366                 if (rc != 0)
1367                         goto fail_scale_mode_set;
1368
1369                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1370                                           rss->rss_key,
1371                                           sizeof(sa->rss_key));
1372                 if (rc != 0)
1373                         goto fail_scale_key_set;
1374
1375                 /*
1376                  * At this point, fully elaborated filter specifications
1377                  * have been produced from the template. To make sure that
1378                  * RSS behaviour is consistent between them, set the same
1379                  * RSS context value everywhere.
1380                  */
1381                 for (i = 0; i < flow->spec.count; i++) {
1382                         efx_filter_spec_t *spec = &flow->spec.filters[i];
1383
1384                         spec->efs_rss_context = efs_rss_context;
1385                         spec->efs_dmaq_id = rss->rxq_hw_index_min;
1386                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1387                 }
1388         }
1389
1390         rc = sfc_flow_spec_insert(sa, &flow->spec);
1391         if (rc != 0)
1392                 goto fail_filter_insert;
1393
1394         if (flow->rss) {
1395                 /*
1396                  * Scale table is set after filter insertion because
1397                  * the table entries are relative to the base RxQ ID
1398                  * and the latter is submitted to the HW by means of
1399                  * inserting a filter, so by the time of the request
1400                  * the HW knows all the information needed to verify
1401                  * the table entries, and the operation will succeed
1402                  */
1403                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1404                                           rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1405                 if (rc != 0)
1406                         goto fail_scale_tbl_set;
1407         }
1408
1409         return 0;
1410
1411 fail_scale_tbl_set:
1412         sfc_flow_spec_remove(sa, &flow->spec);
1413
1414 fail_filter_insert:
1415 fail_scale_key_set:
1416 fail_scale_mode_set:
1417         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1418                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1419
1420 fail_scale_context_alloc:
1421         return rc;
1422 #else /* !EFSYS_OPT_RX_SCALE */
1423         return sfc_flow_spec_insert(sa, &flow->spec);
1424 #endif /* EFSYS_OPT_RX_SCALE */
1425 }
1426
1427 static int
1428 sfc_flow_filter_remove(struct sfc_adapter *sa,
1429                        struct rte_flow *flow)
1430 {
1431         int rc = 0;
1432
1433         rc = sfc_flow_spec_remove(sa, &flow->spec);
1434         if (rc != 0)
1435                 return rc;
1436
1437 #if EFSYS_OPT_RX_SCALE
1438         if (flow->rss) {
1439                 /*
1440                  * All specifications for a given flow rule have the same RSS
1441                  * context, so that RSS context value is taken from the first
1442                  * filter specification
1443                  */
1444                 efx_filter_spec_t *spec = &flow->spec.filters[0];
1445
1446                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1447         }
1448 #endif /* EFSYS_OPT_RX_SCALE */
1449
1450         return rc;
1451 }
1452
1453 static int
1454 sfc_flow_parse_actions(struct sfc_adapter *sa,
1455                        const struct rte_flow_action actions[],
1456                        struct rte_flow *flow,
1457                        struct rte_flow_error *error)
1458 {
1459         int rc;
1460         boolean_t is_specified = B_FALSE;
1461
1462         if (actions == NULL) {
1463                 rte_flow_error_set(error, EINVAL,
1464                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1465                                    "NULL actions");
1466                 return -rte_errno;
1467         }
1468
1469         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1470                 switch (actions->type) {
1471                 case RTE_FLOW_ACTION_TYPE_VOID:
1472                         break;
1473
1474                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1475                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1476                         if (rc != 0) {
1477                                 rte_flow_error_set(error, EINVAL,
1478                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1479                                         "Bad QUEUE action");
1480                                 return -rte_errno;
1481                         }
1482
1483                         is_specified = B_TRUE;
1484                         break;
1485
1486 #if EFSYS_OPT_RX_SCALE
1487                 case RTE_FLOW_ACTION_TYPE_RSS:
1488                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1489                         if (rc != 0) {
1490                                 rte_flow_error_set(error, rc,
1491                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1492                                         "Bad RSS action");
1493                                 return -rte_errno;
1494                         }
1495
1496                         is_specified = B_TRUE;
1497                         break;
1498 #endif /* EFSYS_OPT_RX_SCALE */
1499
1500                 case RTE_FLOW_ACTION_TYPE_DROP:
1501                         flow->spec.template.efs_dmaq_id =
1502                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1503
1504                         is_specified = B_TRUE;
1505                         break;
1506
1507                 default:
1508                         rte_flow_error_set(error, ENOTSUP,
1509                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1510                                            "Action is not supported");
1511                         return -rte_errno;
1512                 }
1513         }
1514
1515         if (!is_specified) {
1516                 rte_flow_error_set(error, EINVAL,
1517                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1518                                    "Action is unspecified");
1519                 return -rte_errno;
1520         }
1521
1522         return 0;
1523 }
1524
1525 /**
1526  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1527  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1528  * specifications after copying.
1529  *
1530  * @param spec[in, out]
1531  *   SFC flow specification to update.
1532  * @param filters_count_for_one_val[in]
1533  *   How many specifications should have the same match flag, what is the
1534  *   number of specifications before copying.
1535  * @param error[out]
1536  *   Perform verbose error reporting if not NULL.
1537  */
1538 static int
1539 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1540                                unsigned int filters_count_for_one_val,
1541                                struct rte_flow_error *error)
1542 {
1543         unsigned int i;
1544         static const efx_filter_match_flags_t vals[] = {
1545                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1546                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1547         };
1548
1549         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1550                 rte_flow_error_set(error, EINVAL,
1551                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1552                         "Number of specifications is incorrect while copying "
1553                         "by unknown destination flags");
1554                 return -rte_errno;
1555         }
1556
1557         for (i = 0; i < spec->count; i++) {
1558                 /* The check above ensures that divisor can't be zero here */
1559                 spec->filters[i].efs_match_flags |=
1560                         vals[i / filters_count_for_one_val];
1561         }
1562
1563         return 0;
1564 }
1565
1566 /**
1567  * Check that the following conditions are met:
1568  * - the list of supported filters has a filter
1569  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1570  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1571  *   be inserted.
1572  *
1573  * @param match[in]
1574  *   The match flags of filter.
1575  * @param spec[in]
1576  *   Specification to be supplemented.
1577  * @param filter[in]
1578  *   SFC filter with list of supported filters.
1579  */
1580 static boolean_t
1581 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1582                                  __rte_unused efx_filter_spec_t *spec,
1583                                  struct sfc_filter *filter)
1584 {
1585         unsigned int i;
1586         efx_filter_match_flags_t match_mcast_dst;
1587
1588         match_mcast_dst =
1589                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1590                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1591         for (i = 0; i < filter->supported_match_num; i++) {
1592                 if (match_mcast_dst == filter->supported_match[i])
1593                         return B_TRUE;
1594         }
1595
1596         return B_FALSE;
1597 }
1598
1599 /**
1600  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1601  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1602  * specifications after copying.
1603  *
1604  * @param spec[in, out]
1605  *   SFC flow specification to update.
1606  * @param filters_count_for_one_val[in]
1607  *   How many specifications should have the same EtherType value, what is the
1608  *   number of specifications before copying.
1609  * @param error[out]
1610  *   Perform verbose error reporting if not NULL.
1611  */
1612 static int
1613 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1614                         unsigned int filters_count_for_one_val,
1615                         struct rte_flow_error *error)
1616 {
1617         unsigned int i;
1618         static const uint16_t vals[] = {
1619                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1620         };
1621
1622         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1623                 rte_flow_error_set(error, EINVAL,
1624                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1625                         "Number of specifications is incorrect "
1626                         "while copying by Ethertype");
1627                 return -rte_errno;
1628         }
1629
1630         for (i = 0; i < spec->count; i++) {
1631                 spec->filters[i].efs_match_flags |=
1632                         EFX_FILTER_MATCH_ETHER_TYPE;
1633
1634                 /*
1635                  * The check above ensures that
1636                  * filters_count_for_one_val is not 0
1637                  */
1638                 spec->filters[i].efs_ether_type =
1639                         vals[i / filters_count_for_one_val];
1640         }
1641
1642         return 0;
1643 }
1644
1645 /**
1646  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1647  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1648  * specifications after copying.
1649  *
1650  * @param spec[in, out]
1651  *   SFC flow specification to update.
1652  * @param filters_count_for_one_val[in]
1653  *   How many specifications should have the same match flag, what is the
1654  *   number of specifications before copying.
1655  * @param error[out]
1656  *   Perform verbose error reporting if not NULL.
1657  */
1658 static int
1659 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1660                                     unsigned int filters_count_for_one_val,
1661                                     struct rte_flow_error *error)
1662 {
1663         unsigned int i;
1664         static const efx_filter_match_flags_t vals[] = {
1665                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1666                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1667         };
1668
1669         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1670                 rte_flow_error_set(error, EINVAL,
1671                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1672                         "Number of specifications is incorrect while copying "
1673                         "by inner frame unknown destination flags");
1674                 return -rte_errno;
1675         }
1676
1677         for (i = 0; i < spec->count; i++) {
1678                 /* The check above ensures that divisor can't be zero here */
1679                 spec->filters[i].efs_match_flags |=
1680                         vals[i / filters_count_for_one_val];
1681         }
1682
1683         return 0;
1684 }
1685
1686 /**
1687  * Check that the following conditions are met:
1688  * - the specification corresponds to a filter for encapsulated traffic
1689  * - the list of supported filters has a filter
1690  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1691  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1692  *   be inserted.
1693  *
1694  * @param match[in]
1695  *   The match flags of filter.
1696  * @param spec[in]
1697  *   Specification to be supplemented.
1698  * @param filter[in]
1699  *   SFC filter with list of supported filters.
1700  */
1701 static boolean_t
1702 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1703                                       efx_filter_spec_t *spec,
1704                                       struct sfc_filter *filter)
1705 {
1706         unsigned int i;
1707         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1708         efx_filter_match_flags_t match_mcast_dst;
1709
1710         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1711                 return B_FALSE;
1712
1713         match_mcast_dst =
1714                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1715                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1716         for (i = 0; i < filter->supported_match_num; i++) {
1717                 if (match_mcast_dst == filter->supported_match[i])
1718                         return B_TRUE;
1719         }
1720
1721         return B_FALSE;
1722 }
1723
1724 /*
1725  * Match flags that can be automatically added to filters.
1726  * Selecting the last minimum when searching for the copy flag ensures that the
1727  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1728  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1729  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1730  * filters.
1731  */
1732 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1733         {
1734                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1735                 .vals_count = 2,
1736                 .set_vals = sfc_flow_set_unknown_dst_flags,
1737                 .spec_check = sfc_flow_check_unknown_dst_flags,
1738         },
1739         {
1740                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1741                 .vals_count = 2,
1742                 .set_vals = sfc_flow_set_ethertypes,
1743                 .spec_check = NULL,
1744         },
1745         {
1746                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1747                 .vals_count = 2,
1748                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1749                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1750         },
1751 };
1752
1753 /* Get item from array sfc_flow_copy_flags */
1754 static const struct sfc_flow_copy_flag *
1755 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1756 {
1757         unsigned int i;
1758
1759         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1760                 if (sfc_flow_copy_flags[i].flag == flag)
1761                         return &sfc_flow_copy_flags[i];
1762         }
1763
1764         return NULL;
1765 }
1766
1767 /**
1768  * Make copies of the specifications, set match flag and values
1769  * of the field that corresponds to it.
1770  *
1771  * @param spec[in, out]
1772  *   SFC flow specification to update.
1773  * @param flag[in]
1774  *   The match flag to add.
1775  * @param error[out]
1776  *   Perform verbose error reporting if not NULL.
1777  */
1778 static int
1779 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1780                              efx_filter_match_flags_t flag,
1781                              struct rte_flow_error *error)
1782 {
1783         unsigned int i;
1784         unsigned int new_filters_count;
1785         unsigned int filters_count_for_one_val;
1786         const struct sfc_flow_copy_flag *copy_flag;
1787         int rc;
1788
1789         copy_flag = sfc_flow_get_copy_flag(flag);
1790         if (copy_flag == NULL) {
1791                 rte_flow_error_set(error, ENOTSUP,
1792                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1793                                    "Unsupported spec field for copying");
1794                 return -rte_errno;
1795         }
1796
1797         new_filters_count = spec->count * copy_flag->vals_count;
1798         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1799                 rte_flow_error_set(error, EINVAL,
1800                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1801                         "Too much EFX specifications in the flow rule");
1802                 return -rte_errno;
1803         }
1804
1805         /* Copy filters specifications */
1806         for (i = spec->count; i < new_filters_count; i++)
1807                 spec->filters[i] = spec->filters[i - spec->count];
1808
1809         filters_count_for_one_val = spec->count;
1810         spec->count = new_filters_count;
1811
1812         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1813         if (rc != 0)
1814                 return rc;
1815
1816         return 0;
1817 }
1818
1819 /**
1820  * Check that the given set of match flags missing in the original filter spec
1821  * could be covered by adding spec copies which specify the corresponding
1822  * flags and packet field values to match.
1823  *
1824  * @param miss_flags[in]
1825  *   Flags that are missing until the supported filter.
1826  * @param spec[in]
1827  *   Specification to be supplemented.
1828  * @param filter[in]
1829  *   SFC filter.
1830  *
1831  * @return
1832  *   Number of specifications after copy or 0, if the flags can not be added.
1833  */
1834 static unsigned int
1835 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1836                              efx_filter_spec_t *spec,
1837                              struct sfc_filter *filter)
1838 {
1839         unsigned int i;
1840         efx_filter_match_flags_t copy_flags = 0;
1841         efx_filter_match_flags_t flag;
1842         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1843         sfc_flow_spec_check *check;
1844         unsigned int multiplier = 1;
1845
1846         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1847                 flag = sfc_flow_copy_flags[i].flag;
1848                 check = sfc_flow_copy_flags[i].spec_check;
1849                 if ((flag & miss_flags) == flag) {
1850                         if (check != NULL && (!check(match, spec, filter)))
1851                                 continue;
1852
1853                         copy_flags |= flag;
1854                         multiplier *= sfc_flow_copy_flags[i].vals_count;
1855                 }
1856         }
1857
1858         if (copy_flags == miss_flags)
1859                 return multiplier;
1860
1861         return 0;
1862 }
1863
1864 /**
1865  * Attempt to supplement the specification template to the minimally
1866  * supported set of match flags. To do this, it is necessary to copy
1867  * the specifications, filling them with the values of fields that
1868  * correspond to the missing flags.
1869  * The necessary and sufficient filter is built from the fewest number
1870  * of copies which could be made to cover the minimally required set
1871  * of flags.
1872  *
1873  * @param sa[in]
1874  *   SFC adapter.
1875  * @param spec[in, out]
1876  *   SFC flow specification to update.
1877  * @param error[out]
1878  *   Perform verbose error reporting if not NULL.
1879  */
1880 static int
1881 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
1882                                struct sfc_flow_spec *spec,
1883                                struct rte_flow_error *error)
1884 {
1885         struct sfc_filter *filter = &sa->filter;
1886         efx_filter_match_flags_t miss_flags;
1887         efx_filter_match_flags_t min_miss_flags = 0;
1888         efx_filter_match_flags_t match;
1889         unsigned int min_multiplier = UINT_MAX;
1890         unsigned int multiplier;
1891         unsigned int i;
1892         int rc;
1893
1894         match = spec->template.efs_match_flags;
1895         for (i = 0; i < filter->supported_match_num; i++) {
1896                 if ((match & filter->supported_match[i]) == match) {
1897                         miss_flags = filter->supported_match[i] & (~match);
1898                         multiplier = sfc_flow_check_missing_flags(miss_flags,
1899                                 &spec->template, filter);
1900                         if (multiplier > 0) {
1901                                 if (multiplier <= min_multiplier) {
1902                                         min_multiplier = multiplier;
1903                                         min_miss_flags = miss_flags;
1904                                 }
1905                         }
1906                 }
1907         }
1908
1909         if (min_multiplier == UINT_MAX) {
1910                 rte_flow_error_set(error, ENOTSUP,
1911                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1912                                    "Flow rule pattern is not supported");
1913                 return -rte_errno;
1914         }
1915
1916         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1917                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
1918
1919                 if ((flag & min_miss_flags) == flag) {
1920                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
1921                         if (rc != 0)
1922                                 return rc;
1923                 }
1924         }
1925
1926         return 0;
1927 }
1928
1929 /**
1930  * Check that set of match flags is referred to by a filter. Filter is
1931  * described by match flags with the ability to add OUTER_VID and INNER_VID
1932  * flags.
1933  *
1934  * @param match_flags[in]
1935  *   Set of match flags.
1936  * @param flags_pattern[in]
1937  *   Pattern of filter match flags.
1938  */
1939 static boolean_t
1940 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
1941                             efx_filter_match_flags_t flags_pattern)
1942 {
1943         if ((match_flags & flags_pattern) != flags_pattern)
1944                 return B_FALSE;
1945
1946         switch (match_flags & ~flags_pattern) {
1947         case 0:
1948         case EFX_FILTER_MATCH_OUTER_VID:
1949         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
1950                 return B_TRUE;
1951         default:
1952                 return B_FALSE;
1953         }
1954 }
1955
1956 /**
1957  * Check whether the spec maps to a hardware filter which is known to be
1958  * ineffective despite being valid.
1959  *
1960  * @param spec[in]
1961  *   SFC flow specification.
1962  */
1963 static boolean_t
1964 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
1965 {
1966         unsigned int i;
1967         uint16_t ether_type;
1968         uint8_t ip_proto;
1969         efx_filter_match_flags_t match_flags;
1970
1971         for (i = 0; i < spec->count; i++) {
1972                 match_flags = spec->filters[i].efs_match_flags;
1973
1974                 if (sfc_flow_is_match_with_vids(match_flags,
1975                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
1976                     sfc_flow_is_match_with_vids(match_flags,
1977                                                 EFX_FILTER_MATCH_ETHER_TYPE |
1978                                                 EFX_FILTER_MATCH_LOC_MAC)) {
1979                         ether_type = spec->filters[i].efs_ether_type;
1980                         if (ether_type == EFX_ETHER_TYPE_IPV4 ||
1981                             ether_type == EFX_ETHER_TYPE_IPV6)
1982                                 return B_TRUE;
1983                 } else if (sfc_flow_is_match_with_vids(match_flags,
1984                                 EFX_FILTER_MATCH_ETHER_TYPE |
1985                                 EFX_FILTER_MATCH_IP_PROTO) ||
1986                            sfc_flow_is_match_with_vids(match_flags,
1987                                 EFX_FILTER_MATCH_ETHER_TYPE |
1988                                 EFX_FILTER_MATCH_IP_PROTO |
1989                                 EFX_FILTER_MATCH_LOC_MAC)) {
1990                         ip_proto = spec->filters[i].efs_ip_proto;
1991                         if (ip_proto == EFX_IPPROTO_TCP ||
1992                             ip_proto == EFX_IPPROTO_UDP)
1993                                 return B_TRUE;
1994                 }
1995         }
1996
1997         return B_FALSE;
1998 }
1999
2000 static int
2001 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2002                               struct rte_flow *flow,
2003                               struct rte_flow_error *error)
2004 {
2005         efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2006         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2007         int rc;
2008
2009         /* Initialize the first filter spec with template */
2010         flow->spec.filters[0] = *spec_tmpl;
2011         flow->spec.count = 1;
2012
2013         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2014                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2015                 if (rc != 0)
2016                         return rc;
2017         }
2018
2019         if (sfc_flow_is_match_flags_exception(&flow->spec)) {
2020                 rte_flow_error_set(error, ENOTSUP,
2021                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2022                         "The flow rule pattern is unsupported");
2023                 return -rte_errno;
2024         }
2025
2026         return 0;
2027 }
2028
2029 static int
2030 sfc_flow_parse(struct rte_eth_dev *dev,
2031                const struct rte_flow_attr *attr,
2032                const struct rte_flow_item pattern[],
2033                const struct rte_flow_action actions[],
2034                struct rte_flow *flow,
2035                struct rte_flow_error *error)
2036 {
2037         struct sfc_adapter *sa = dev->data->dev_private;
2038         int rc;
2039
2040         rc = sfc_flow_parse_attr(attr, flow, error);
2041         if (rc != 0)
2042                 goto fail_bad_value;
2043
2044         rc = sfc_flow_parse_pattern(pattern, flow, error);
2045         if (rc != 0)
2046                 goto fail_bad_value;
2047
2048         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2049         if (rc != 0)
2050                 goto fail_bad_value;
2051
2052         rc = sfc_flow_validate_match_flags(sa, flow, error);
2053         if (rc != 0)
2054                 goto fail_bad_value;
2055
2056         return 0;
2057
2058 fail_bad_value:
2059         return rc;
2060 }
2061
2062 static int
2063 sfc_flow_validate(struct rte_eth_dev *dev,
2064                   const struct rte_flow_attr *attr,
2065                   const struct rte_flow_item pattern[],
2066                   const struct rte_flow_action actions[],
2067                   struct rte_flow_error *error)
2068 {
2069         struct rte_flow flow;
2070
2071         memset(&flow, 0, sizeof(flow));
2072
2073         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2074 }
2075
2076 static struct rte_flow *
2077 sfc_flow_create(struct rte_eth_dev *dev,
2078                 const struct rte_flow_attr *attr,
2079                 const struct rte_flow_item pattern[],
2080                 const struct rte_flow_action actions[],
2081                 struct rte_flow_error *error)
2082 {
2083         struct sfc_adapter *sa = dev->data->dev_private;
2084         struct rte_flow *flow = NULL;
2085         int rc;
2086
2087         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2088         if (flow == NULL) {
2089                 rte_flow_error_set(error, ENOMEM,
2090                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2091                                    "Failed to allocate memory");
2092                 goto fail_no_mem;
2093         }
2094
2095         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2096         if (rc != 0)
2097                 goto fail_bad_value;
2098
2099         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2100
2101         sfc_adapter_lock(sa);
2102
2103         if (sa->state == SFC_ADAPTER_STARTED) {
2104                 rc = sfc_flow_filter_insert(sa, flow);
2105                 if (rc != 0) {
2106                         rte_flow_error_set(error, rc,
2107                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2108                                 "Failed to insert filter");
2109                         goto fail_filter_insert;
2110                 }
2111         }
2112
2113         sfc_adapter_unlock(sa);
2114
2115         return flow;
2116
2117 fail_filter_insert:
2118         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2119
2120 fail_bad_value:
2121         rte_free(flow);
2122         sfc_adapter_unlock(sa);
2123
2124 fail_no_mem:
2125         return NULL;
2126 }
2127
2128 static int
2129 sfc_flow_remove(struct sfc_adapter *sa,
2130                 struct rte_flow *flow,
2131                 struct rte_flow_error *error)
2132 {
2133         int rc = 0;
2134
2135         SFC_ASSERT(sfc_adapter_is_locked(sa));
2136
2137         if (sa->state == SFC_ADAPTER_STARTED) {
2138                 rc = sfc_flow_filter_remove(sa, flow);
2139                 if (rc != 0)
2140                         rte_flow_error_set(error, rc,
2141                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2142                                 "Failed to destroy flow rule");
2143         }
2144
2145         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2146         rte_free(flow);
2147
2148         return rc;
2149 }
2150
2151 static int
2152 sfc_flow_destroy(struct rte_eth_dev *dev,
2153                  struct rte_flow *flow,
2154                  struct rte_flow_error *error)
2155 {
2156         struct sfc_adapter *sa = dev->data->dev_private;
2157         struct rte_flow *flow_ptr;
2158         int rc = EINVAL;
2159
2160         sfc_adapter_lock(sa);
2161
2162         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2163                 if (flow_ptr == flow)
2164                         rc = 0;
2165         }
2166         if (rc != 0) {
2167                 rte_flow_error_set(error, rc,
2168                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2169                                    "Failed to find flow rule to destroy");
2170                 goto fail_bad_value;
2171         }
2172
2173         rc = sfc_flow_remove(sa, flow, error);
2174
2175 fail_bad_value:
2176         sfc_adapter_unlock(sa);
2177
2178         return -rc;
2179 }
2180
2181 static int
2182 sfc_flow_flush(struct rte_eth_dev *dev,
2183                struct rte_flow_error *error)
2184 {
2185         struct sfc_adapter *sa = dev->data->dev_private;
2186         struct rte_flow *flow;
2187         int rc = 0;
2188         int ret = 0;
2189
2190         sfc_adapter_lock(sa);
2191
2192         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2193                 rc = sfc_flow_remove(sa, flow, error);
2194                 if (rc != 0)
2195                         ret = rc;
2196         }
2197
2198         sfc_adapter_unlock(sa);
2199
2200         return -ret;
2201 }
2202
2203 static int
2204 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2205                  struct rte_flow_error *error)
2206 {
2207         struct sfc_adapter *sa = dev->data->dev_private;
2208         struct sfc_port *port = &sa->port;
2209         int ret = 0;
2210
2211         sfc_adapter_lock(sa);
2212         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2213                 rte_flow_error_set(error, EBUSY,
2214                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2215                                    NULL, "please close the port first");
2216                 ret = -rte_errno;
2217         } else {
2218                 port->isolated = (enable) ? B_TRUE : B_FALSE;
2219         }
2220         sfc_adapter_unlock(sa);
2221
2222         return ret;
2223 }
2224
2225 const struct rte_flow_ops sfc_flow_ops = {
2226         .validate = sfc_flow_validate,
2227         .create = sfc_flow_create,
2228         .destroy = sfc_flow_destroy,
2229         .flush = sfc_flow_flush,
2230         .query = NULL,
2231         .isolate = sfc_flow_isolate,
2232 };
2233
2234 void
2235 sfc_flow_init(struct sfc_adapter *sa)
2236 {
2237         SFC_ASSERT(sfc_adapter_is_locked(sa));
2238
2239         TAILQ_INIT(&sa->filter.flow_list);
2240 }
2241
2242 void
2243 sfc_flow_fini(struct sfc_adapter *sa)
2244 {
2245         struct rte_flow *flow;
2246
2247         SFC_ASSERT(sfc_adapter_is_locked(sa));
2248
2249         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2250                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2251                 rte_free(flow);
2252         }
2253 }
2254
2255 void
2256 sfc_flow_stop(struct sfc_adapter *sa)
2257 {
2258         struct rte_flow *flow;
2259
2260         SFC_ASSERT(sfc_adapter_is_locked(sa));
2261
2262         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2263                 sfc_flow_filter_remove(sa, flow);
2264 }
2265
2266 int
2267 sfc_flow_start(struct sfc_adapter *sa)
2268 {
2269         struct rte_flow *flow;
2270         int rc = 0;
2271
2272         sfc_log_init(sa, "entry");
2273
2274         SFC_ASSERT(sfc_adapter_is_locked(sa));
2275
2276         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2277                 rc = sfc_flow_filter_insert(sa, flow);
2278                 if (rc != 0)
2279                         goto fail_bad_flow;
2280         }
2281
2282         sfc_log_init(sa, "done");
2283
2284 fail_bad_flow:
2285         return rc;
2286 }