ethdev: add encap level to RSS flow API action
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25
26 /*
27  * At now flow API is implemented in such a manner that each
28  * flow rule is converted to one or more hardware filters.
29  * All elements of flow rule (attributes, pattern items, actions)
30  * correspond to one or more fields in the efx_filter_spec_s structure
31  * that is responsible for the hardware filter.
32  * If some required field is unset in the flow rule, then a handful
33  * of filter copies will be created to cover all possible values
34  * of such a field.
35  */
36
37 enum sfc_flow_item_layers {
38         SFC_FLOW_ITEM_ANY_LAYER,
39         SFC_FLOW_ITEM_START_LAYER,
40         SFC_FLOW_ITEM_L2,
41         SFC_FLOW_ITEM_L3,
42         SFC_FLOW_ITEM_L4,
43 };
44
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46                                   efx_filter_spec_t *spec,
47                                   struct rte_flow_error *error);
48
49 struct sfc_flow_item {
50         enum rte_flow_item_type type;           /* Type of item */
51         enum sfc_flow_item_layers layer;        /* Layer of item */
52         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
53         sfc_flow_item_parse *parse;             /* Parsing function */
54 };
55
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
66
67 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
68                                      unsigned int filters_count_for_one_val,
69                                      struct rte_flow_error *error);
70
71 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
72                                         efx_filter_spec_t *spec,
73                                         struct sfc_filter *filter);
74
75 struct sfc_flow_copy_flag {
76         /* EFX filter specification match flag */
77         efx_filter_match_flags_t flag;
78         /* Number of values of corresponding field */
79         unsigned int vals_count;
80         /* Function to set values in specifications */
81         sfc_flow_spec_set_vals *set_vals;
82         /*
83          * Function to check that the specification is suitable
84          * for adding this match flag
85          */
86         sfc_flow_spec_check *spec_check;
87 };
88
89 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
90 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
91 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
92 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
93 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
94
95 static boolean_t
96 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
97 {
98         uint8_t sum = 0;
99         unsigned int i;
100
101         for (i = 0; i < size; i++)
102                 sum |= buf[i];
103
104         return (sum == 0) ? B_TRUE : B_FALSE;
105 }
106
107 /*
108  * Validate item and prepare structures spec and mask for parsing
109  */
110 static int
111 sfc_flow_parse_init(const struct rte_flow_item *item,
112                     const void **spec_ptr,
113                     const void **mask_ptr,
114                     const void *supp_mask,
115                     const void *def_mask,
116                     unsigned int size,
117                     struct rte_flow_error *error)
118 {
119         const uint8_t *spec;
120         const uint8_t *mask;
121         const uint8_t *last;
122         uint8_t match;
123         uint8_t supp;
124         unsigned int i;
125
126         if (item == NULL) {
127                 rte_flow_error_set(error, EINVAL,
128                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
129                                    "NULL item");
130                 return -rte_errno;
131         }
132
133         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
134                 rte_flow_error_set(error, EINVAL,
135                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
136                                    "Mask or last is set without spec");
137                 return -rte_errno;
138         }
139
140         /*
141          * If "mask" is not set, default mask is used,
142          * but if default mask is NULL, "mask" should be set
143          */
144         if (item->mask == NULL) {
145                 if (def_mask == NULL) {
146                         rte_flow_error_set(error, EINVAL,
147                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
148                                 "Mask should be specified");
149                         return -rte_errno;
150                 }
151
152                 mask = def_mask;
153         } else {
154                 mask = item->mask;
155         }
156
157         spec = item->spec;
158         last = item->last;
159
160         if (spec == NULL)
161                 goto exit;
162
163         /*
164          * If field values in "last" are either 0 or equal to the corresponding
165          * values in "spec" then they are ignored
166          */
167         if (last != NULL &&
168             !sfc_flow_is_zero(last, size) &&
169             memcmp(last, spec, size) != 0) {
170                 rte_flow_error_set(error, ENOTSUP,
171                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
172                                    "Ranging is not supported");
173                 return -rte_errno;
174         }
175
176         if (supp_mask == NULL) {
177                 rte_flow_error_set(error, EINVAL,
178                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
179                         "Supported mask for item should be specified");
180                 return -rte_errno;
181         }
182
183         /* Check that mask and spec not asks for more match than supp_mask */
184         for (i = 0; i < size; i++) {
185                 match = spec[i] | mask[i];
186                 supp = ((const uint8_t *)supp_mask)[i];
187
188                 if ((match | supp) != supp) {
189                         rte_flow_error_set(error, ENOTSUP,
190                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
191                                            "Item's field is not supported");
192                         return -rte_errno;
193                 }
194         }
195
196 exit:
197         *spec_ptr = spec;
198         *mask_ptr = mask;
199         return 0;
200 }
201
202 /*
203  * Protocol parsers.
204  * Masking is not supported, so masks in items should be either
205  * full or empty (zeroed) and set only for supported fields which
206  * are specified in the supp_mask.
207  */
208
209 static int
210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
211                     __rte_unused efx_filter_spec_t *efx_spec,
212                     __rte_unused struct rte_flow_error *error)
213 {
214         return 0;
215 }
216
217 /**
218  * Convert Ethernet item to EFX filter specification.
219  *
220  * @param item[in]
221  *   Item specification. Outer frame specification may only comprise
222  *   source/destination addresses and Ethertype field.
223  *   Inner frame specification may contain destination address only.
224  *   There is support for individual/group mask as well as for empty and full.
225  *   If the mask is NULL, default mask will be used. Ranging is not supported.
226  * @param efx_spec[in, out]
227  *   EFX filter specification to update.
228  * @param[out] error
229  *   Perform verbose error reporting if not NULL.
230  */
231 static int
232 sfc_flow_parse_eth(const struct rte_flow_item *item,
233                    efx_filter_spec_t *efx_spec,
234                    struct rte_flow_error *error)
235 {
236         int rc;
237         const struct rte_flow_item_eth *spec = NULL;
238         const struct rte_flow_item_eth *mask = NULL;
239         const struct rte_flow_item_eth supp_mask = {
240                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
241                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242                 .type = 0xffff,
243         };
244         const struct rte_flow_item_eth ifrm_supp_mask = {
245                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
246         };
247         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
248                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
249         };
250         const struct rte_flow_item_eth *supp_mask_p;
251         const struct rte_flow_item_eth *def_mask_p;
252         uint8_t *loc_mac = NULL;
253         boolean_t is_ifrm = (efx_spec->efs_encap_type !=
254                 EFX_TUNNEL_PROTOCOL_NONE);
255
256         if (is_ifrm) {
257                 supp_mask_p = &ifrm_supp_mask;
258                 def_mask_p = &ifrm_supp_mask;
259                 loc_mac = efx_spec->efs_ifrm_loc_mac;
260         } else {
261                 supp_mask_p = &supp_mask;
262                 def_mask_p = &rte_flow_item_eth_mask;
263                 loc_mac = efx_spec->efs_loc_mac;
264         }
265
266         rc = sfc_flow_parse_init(item,
267                                  (const void **)&spec,
268                                  (const void **)&mask,
269                                  supp_mask_p, def_mask_p,
270                                  sizeof(struct rte_flow_item_eth),
271                                  error);
272         if (rc != 0)
273                 return rc;
274
275         /* If "spec" is not set, could be any Ethernet */
276         if (spec == NULL)
277                 return 0;
278
279         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
280                 efx_spec->efs_match_flags |= is_ifrm ?
281                         EFX_FILTER_MATCH_IFRM_LOC_MAC :
282                         EFX_FILTER_MATCH_LOC_MAC;
283                 rte_memcpy(loc_mac, spec->dst.addr_bytes,
284                            EFX_MAC_ADDR_LEN);
285         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
286                           EFX_MAC_ADDR_LEN) == 0) {
287                 if (is_unicast_ether_addr(&spec->dst))
288                         efx_spec->efs_match_flags |= is_ifrm ?
289                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
290                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
291                 else
292                         efx_spec->efs_match_flags |= is_ifrm ?
293                                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
294                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
295         } else if (!is_zero_ether_addr(&mask->dst)) {
296                 goto fail_bad_mask;
297         }
298
299         /*
300          * ifrm_supp_mask ensures that the source address and
301          * ethertype masks are equal to zero in inner frame,
302          * so these fields are filled in only for the outer frame
303          */
304         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
305                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
306                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
307                            EFX_MAC_ADDR_LEN);
308         } else if (!is_zero_ether_addr(&mask->src)) {
309                 goto fail_bad_mask;
310         }
311
312         /*
313          * Ether type is in big-endian byte order in item and
314          * in little-endian in efx_spec, so byte swap is used
315          */
316         if (mask->type == supp_mask.type) {
317                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
318                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
319         } else if (mask->type != 0) {
320                 goto fail_bad_mask;
321         }
322
323         return 0;
324
325 fail_bad_mask:
326         rte_flow_error_set(error, EINVAL,
327                            RTE_FLOW_ERROR_TYPE_ITEM, item,
328                            "Bad mask in the ETH pattern item");
329         return -rte_errno;
330 }
331
332 /**
333  * Convert VLAN item to EFX filter specification.
334  *
335  * @param item[in]
336  *   Item specification. Only VID field is supported.
337  *   The mask can not be NULL. Ranging is not supported.
338  * @param efx_spec[in, out]
339  *   EFX filter specification to update.
340  * @param[out] error
341  *   Perform verbose error reporting if not NULL.
342  */
343 static int
344 sfc_flow_parse_vlan(const struct rte_flow_item *item,
345                     efx_filter_spec_t *efx_spec,
346                     struct rte_flow_error *error)
347 {
348         int rc;
349         uint16_t vid;
350         const struct rte_flow_item_vlan *spec = NULL;
351         const struct rte_flow_item_vlan *mask = NULL;
352         const struct rte_flow_item_vlan supp_mask = {
353                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
354         };
355
356         rc = sfc_flow_parse_init(item,
357                                  (const void **)&spec,
358                                  (const void **)&mask,
359                                  &supp_mask,
360                                  NULL,
361                                  sizeof(struct rte_flow_item_vlan),
362                                  error);
363         if (rc != 0)
364                 return rc;
365
366         /*
367          * VID is in big-endian byte order in item and
368          * in little-endian in efx_spec, so byte swap is used.
369          * If two VLAN items are included, the first matches
370          * the outer tag and the next matches the inner tag.
371          */
372         if (mask->tci == supp_mask.tci) {
373                 vid = rte_bswap16(spec->tci);
374
375                 if (!(efx_spec->efs_match_flags &
376                       EFX_FILTER_MATCH_OUTER_VID)) {
377                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
378                         efx_spec->efs_outer_vid = vid;
379                 } else if (!(efx_spec->efs_match_flags &
380                              EFX_FILTER_MATCH_INNER_VID)) {
381                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
382                         efx_spec->efs_inner_vid = vid;
383                 } else {
384                         rte_flow_error_set(error, EINVAL,
385                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
386                                            "More than two VLAN items");
387                         return -rte_errno;
388                 }
389         } else {
390                 rte_flow_error_set(error, EINVAL,
391                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
392                                    "VLAN ID in TCI match is required");
393                 return -rte_errno;
394         }
395
396         return 0;
397 }
398
399 /**
400  * Convert IPv4 item to EFX filter specification.
401  *
402  * @param item[in]
403  *   Item specification. Only source and destination addresses and
404  *   protocol fields are supported. If the mask is NULL, default
405  *   mask will be used. Ranging is not supported.
406  * @param efx_spec[in, out]
407  *   EFX filter specification to update.
408  * @param[out] error
409  *   Perform verbose error reporting if not NULL.
410  */
411 static int
412 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
413                     efx_filter_spec_t *efx_spec,
414                     struct rte_flow_error *error)
415 {
416         int rc;
417         const struct rte_flow_item_ipv4 *spec = NULL;
418         const struct rte_flow_item_ipv4 *mask = NULL;
419         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
420         const struct rte_flow_item_ipv4 supp_mask = {
421                 .hdr = {
422                         .src_addr = 0xffffffff,
423                         .dst_addr = 0xffffffff,
424                         .next_proto_id = 0xff,
425                 }
426         };
427
428         rc = sfc_flow_parse_init(item,
429                                  (const void **)&spec,
430                                  (const void **)&mask,
431                                  &supp_mask,
432                                  &rte_flow_item_ipv4_mask,
433                                  sizeof(struct rte_flow_item_ipv4),
434                                  error);
435         if (rc != 0)
436                 return rc;
437
438         /*
439          * Filtering by IPv4 source and destination addresses requires
440          * the appropriate ETHER_TYPE in hardware filters
441          */
442         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
443                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
444                 efx_spec->efs_ether_type = ether_type_ipv4;
445         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
446                 rte_flow_error_set(error, EINVAL,
447                         RTE_FLOW_ERROR_TYPE_ITEM, item,
448                         "Ethertype in pattern with IPV4 item should be appropriate");
449                 return -rte_errno;
450         }
451
452         if (spec == NULL)
453                 return 0;
454
455         /*
456          * IPv4 addresses are in big-endian byte order in item and in
457          * efx_spec
458          */
459         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
460                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
461                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
462         } else if (mask->hdr.src_addr != 0) {
463                 goto fail_bad_mask;
464         }
465
466         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
467                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
468                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
469         } else if (mask->hdr.dst_addr != 0) {
470                 goto fail_bad_mask;
471         }
472
473         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
474                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
475                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
476         } else if (mask->hdr.next_proto_id != 0) {
477                 goto fail_bad_mask;
478         }
479
480         return 0;
481
482 fail_bad_mask:
483         rte_flow_error_set(error, EINVAL,
484                            RTE_FLOW_ERROR_TYPE_ITEM, item,
485                            "Bad mask in the IPV4 pattern item");
486         return -rte_errno;
487 }
488
489 /**
490  * Convert IPv6 item to EFX filter specification.
491  *
492  * @param item[in]
493  *   Item specification. Only source and destination addresses and
494  *   next header fields are supported. If the mask is NULL, default
495  *   mask will be used. Ranging is not supported.
496  * @param efx_spec[in, out]
497  *   EFX filter specification to update.
498  * @param[out] error
499  *   Perform verbose error reporting if not NULL.
500  */
501 static int
502 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
503                     efx_filter_spec_t *efx_spec,
504                     struct rte_flow_error *error)
505 {
506         int rc;
507         const struct rte_flow_item_ipv6 *spec = NULL;
508         const struct rte_flow_item_ipv6 *mask = NULL;
509         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
510         const struct rte_flow_item_ipv6 supp_mask = {
511                 .hdr = {
512                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
513                                       0xff, 0xff, 0xff, 0xff,
514                                       0xff, 0xff, 0xff, 0xff,
515                                       0xff, 0xff, 0xff, 0xff },
516                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
517                                       0xff, 0xff, 0xff, 0xff,
518                                       0xff, 0xff, 0xff, 0xff,
519                                       0xff, 0xff, 0xff, 0xff },
520                         .proto = 0xff,
521                 }
522         };
523
524         rc = sfc_flow_parse_init(item,
525                                  (const void **)&spec,
526                                  (const void **)&mask,
527                                  &supp_mask,
528                                  &rte_flow_item_ipv6_mask,
529                                  sizeof(struct rte_flow_item_ipv6),
530                                  error);
531         if (rc != 0)
532                 return rc;
533
534         /*
535          * Filtering by IPv6 source and destination addresses requires
536          * the appropriate ETHER_TYPE in hardware filters
537          */
538         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
539                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
540                 efx_spec->efs_ether_type = ether_type_ipv6;
541         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_ITEM, item,
544                         "Ethertype in pattern with IPV6 item should be appropriate");
545                 return -rte_errno;
546         }
547
548         if (spec == NULL)
549                 return 0;
550
551         /*
552          * IPv6 addresses are in big-endian byte order in item and in
553          * efx_spec
554          */
555         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
556                    sizeof(mask->hdr.src_addr)) == 0) {
557                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
558
559                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
560                                  sizeof(spec->hdr.src_addr));
561                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
562                            sizeof(efx_spec->efs_rem_host));
563         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
564                                      sizeof(mask->hdr.src_addr))) {
565                 goto fail_bad_mask;
566         }
567
568         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
569                    sizeof(mask->hdr.dst_addr)) == 0) {
570                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
571
572                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
573                                  sizeof(spec->hdr.dst_addr));
574                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
575                            sizeof(efx_spec->efs_loc_host));
576         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
577                                      sizeof(mask->hdr.dst_addr))) {
578                 goto fail_bad_mask;
579         }
580
581         if (mask->hdr.proto == supp_mask.hdr.proto) {
582                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
583                 efx_spec->efs_ip_proto = spec->hdr.proto;
584         } else if (mask->hdr.proto != 0) {
585                 goto fail_bad_mask;
586         }
587
588         return 0;
589
590 fail_bad_mask:
591         rte_flow_error_set(error, EINVAL,
592                            RTE_FLOW_ERROR_TYPE_ITEM, item,
593                            "Bad mask in the IPV6 pattern item");
594         return -rte_errno;
595 }
596
597 /**
598  * Convert TCP item to EFX filter specification.
599  *
600  * @param item[in]
601  *   Item specification. Only source and destination ports fields
602  *   are supported. If the mask is NULL, default mask will be used.
603  *   Ranging is not supported.
604  * @param efx_spec[in, out]
605  *   EFX filter specification to update.
606  * @param[out] error
607  *   Perform verbose error reporting if not NULL.
608  */
609 static int
610 sfc_flow_parse_tcp(const struct rte_flow_item *item,
611                    efx_filter_spec_t *efx_spec,
612                    struct rte_flow_error *error)
613 {
614         int rc;
615         const struct rte_flow_item_tcp *spec = NULL;
616         const struct rte_flow_item_tcp *mask = NULL;
617         const struct rte_flow_item_tcp supp_mask = {
618                 .hdr = {
619                         .src_port = 0xffff,
620                         .dst_port = 0xffff,
621                 }
622         };
623
624         rc = sfc_flow_parse_init(item,
625                                  (const void **)&spec,
626                                  (const void **)&mask,
627                                  &supp_mask,
628                                  &rte_flow_item_tcp_mask,
629                                  sizeof(struct rte_flow_item_tcp),
630                                  error);
631         if (rc != 0)
632                 return rc;
633
634         /*
635          * Filtering by TCP source and destination ports requires
636          * the appropriate IP_PROTO in hardware filters
637          */
638         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
639                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
640                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
641         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
642                 rte_flow_error_set(error, EINVAL,
643                         RTE_FLOW_ERROR_TYPE_ITEM, item,
644                         "IP proto in pattern with TCP item should be appropriate");
645                 return -rte_errno;
646         }
647
648         if (spec == NULL)
649                 return 0;
650
651         /*
652          * Source and destination ports are in big-endian byte order in item and
653          * in little-endian in efx_spec, so byte swap is used
654          */
655         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
656                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
657                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
658         } else if (mask->hdr.src_port != 0) {
659                 goto fail_bad_mask;
660         }
661
662         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
663                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
664                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
665         } else if (mask->hdr.dst_port != 0) {
666                 goto fail_bad_mask;
667         }
668
669         return 0;
670
671 fail_bad_mask:
672         rte_flow_error_set(error, EINVAL,
673                            RTE_FLOW_ERROR_TYPE_ITEM, item,
674                            "Bad mask in the TCP pattern item");
675         return -rte_errno;
676 }
677
678 /**
679  * Convert UDP item to EFX filter specification.
680  *
681  * @param item[in]
682  *   Item specification. Only source and destination ports fields
683  *   are supported. If the mask is NULL, default mask will be used.
684  *   Ranging is not supported.
685  * @param efx_spec[in, out]
686  *   EFX filter specification to update.
687  * @param[out] error
688  *   Perform verbose error reporting if not NULL.
689  */
690 static int
691 sfc_flow_parse_udp(const struct rte_flow_item *item,
692                    efx_filter_spec_t *efx_spec,
693                    struct rte_flow_error *error)
694 {
695         int rc;
696         const struct rte_flow_item_udp *spec = NULL;
697         const struct rte_flow_item_udp *mask = NULL;
698         const struct rte_flow_item_udp supp_mask = {
699                 .hdr = {
700                         .src_port = 0xffff,
701                         .dst_port = 0xffff,
702                 }
703         };
704
705         rc = sfc_flow_parse_init(item,
706                                  (const void **)&spec,
707                                  (const void **)&mask,
708                                  &supp_mask,
709                                  &rte_flow_item_udp_mask,
710                                  sizeof(struct rte_flow_item_udp),
711                                  error);
712         if (rc != 0)
713                 return rc;
714
715         /*
716          * Filtering by UDP source and destination ports requires
717          * the appropriate IP_PROTO in hardware filters
718          */
719         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
720                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
721                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
722         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
723                 rte_flow_error_set(error, EINVAL,
724                         RTE_FLOW_ERROR_TYPE_ITEM, item,
725                         "IP proto in pattern with UDP item should be appropriate");
726                 return -rte_errno;
727         }
728
729         if (spec == NULL)
730                 return 0;
731
732         /*
733          * Source and destination ports are in big-endian byte order in item and
734          * in little-endian in efx_spec, so byte swap is used
735          */
736         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
737                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
738                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
739         } else if (mask->hdr.src_port != 0) {
740                 goto fail_bad_mask;
741         }
742
743         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
744                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
745                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
746         } else if (mask->hdr.dst_port != 0) {
747                 goto fail_bad_mask;
748         }
749
750         return 0;
751
752 fail_bad_mask:
753         rte_flow_error_set(error, EINVAL,
754                            RTE_FLOW_ERROR_TYPE_ITEM, item,
755                            "Bad mask in the UDP pattern item");
756         return -rte_errno;
757 }
758
759 /*
760  * Filters for encapsulated packets match based on the EtherType and IP
761  * protocol in the outer frame.
762  */
763 static int
764 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
765                                         efx_filter_spec_t *efx_spec,
766                                         uint8_t ip_proto,
767                                         struct rte_flow_error *error)
768 {
769         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
770                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
771                 efx_spec->efs_ip_proto = ip_proto;
772         } else if (efx_spec->efs_ip_proto != ip_proto) {
773                 switch (ip_proto) {
774                 case EFX_IPPROTO_UDP:
775                         rte_flow_error_set(error, EINVAL,
776                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
777                                 "Outer IP header protocol must be UDP "
778                                 "in VxLAN/GENEVE pattern");
779                         return -rte_errno;
780
781                 case EFX_IPPROTO_GRE:
782                         rte_flow_error_set(error, EINVAL,
783                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
784                                 "Outer IP header protocol must be GRE "
785                                 "in NVGRE pattern");
786                         return -rte_errno;
787
788                 default:
789                         rte_flow_error_set(error, EINVAL,
790                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
791                                 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
792                                 "are supported");
793                         return -rte_errno;
794                 }
795         }
796
797         if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
798             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
799             efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
800                 rte_flow_error_set(error, EINVAL,
801                         RTE_FLOW_ERROR_TYPE_ITEM, item,
802                         "Outer frame EtherType in pattern with tunneling "
803                         "must be IPv4 or IPv6");
804                 return -rte_errno;
805         }
806
807         return 0;
808 }
809
810 static int
811 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
812                                   const uint8_t *vni_or_vsid_val,
813                                   const uint8_t *vni_or_vsid_mask,
814                                   const struct rte_flow_item *item,
815                                   struct rte_flow_error *error)
816 {
817         const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
818                 0xff, 0xff, 0xff
819         };
820
821         if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
822                    EFX_VNI_OR_VSID_LEN) == 0) {
823                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
824                 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
825                            EFX_VNI_OR_VSID_LEN);
826         } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
827                 rte_flow_error_set(error, EINVAL,
828                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
829                                    "Unsupported VNI/VSID mask");
830                 return -rte_errno;
831         }
832
833         return 0;
834 }
835
836 /**
837  * Convert VXLAN item to EFX filter specification.
838  *
839  * @param item[in]
840  *   Item specification. Only VXLAN network identifier field is supported.
841  *   If the mask is NULL, default mask will be used.
842  *   Ranging is not supported.
843  * @param efx_spec[in, out]
844  *   EFX filter specification to update.
845  * @param[out] error
846  *   Perform verbose error reporting if not NULL.
847  */
848 static int
849 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
850                      efx_filter_spec_t *efx_spec,
851                      struct rte_flow_error *error)
852 {
853         int rc;
854         const struct rte_flow_item_vxlan *spec = NULL;
855         const struct rte_flow_item_vxlan *mask = NULL;
856         const struct rte_flow_item_vxlan supp_mask = {
857                 .vni = { 0xff, 0xff, 0xff }
858         };
859
860         rc = sfc_flow_parse_init(item,
861                                  (const void **)&spec,
862                                  (const void **)&mask,
863                                  &supp_mask,
864                                  &rte_flow_item_vxlan_mask,
865                                  sizeof(struct rte_flow_item_vxlan),
866                                  error);
867         if (rc != 0)
868                 return rc;
869
870         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
871                                                      EFX_IPPROTO_UDP, error);
872         if (rc != 0)
873                 return rc;
874
875         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
876         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
877
878         if (spec == NULL)
879                 return 0;
880
881         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
882                                                mask->vni, item, error);
883
884         return rc;
885 }
886
887 /**
888  * Convert GENEVE item to EFX filter specification.
889  *
890  * @param item[in]
891  *   Item specification. Only Virtual Network Identifier and protocol type
892  *   fields are supported. But protocol type can be only Ethernet (0x6558).
893  *   If the mask is NULL, default mask will be used.
894  *   Ranging is not supported.
895  * @param efx_spec[in, out]
896  *   EFX filter specification to update.
897  * @param[out] error
898  *   Perform verbose error reporting if not NULL.
899  */
900 static int
901 sfc_flow_parse_geneve(const struct rte_flow_item *item,
902                       efx_filter_spec_t *efx_spec,
903                       struct rte_flow_error *error)
904 {
905         int rc;
906         const struct rte_flow_item_geneve *spec = NULL;
907         const struct rte_flow_item_geneve *mask = NULL;
908         const struct rte_flow_item_geneve supp_mask = {
909                 .protocol = RTE_BE16(0xffff),
910                 .vni = { 0xff, 0xff, 0xff }
911         };
912
913         rc = sfc_flow_parse_init(item,
914                                  (const void **)&spec,
915                                  (const void **)&mask,
916                                  &supp_mask,
917                                  &rte_flow_item_geneve_mask,
918                                  sizeof(struct rte_flow_item_geneve),
919                                  error);
920         if (rc != 0)
921                 return rc;
922
923         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
924                                                      EFX_IPPROTO_UDP, error);
925         if (rc != 0)
926                 return rc;
927
928         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
929         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
930
931         if (spec == NULL)
932                 return 0;
933
934         if (mask->protocol == supp_mask.protocol) {
935                 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
936                         rte_flow_error_set(error, EINVAL,
937                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
938                                 "GENEVE encap. protocol must be Ethernet "
939                                 "(0x6558) in the GENEVE pattern item");
940                         return -rte_errno;
941                 }
942         } else if (mask->protocol != 0) {
943                 rte_flow_error_set(error, EINVAL,
944                         RTE_FLOW_ERROR_TYPE_ITEM, item,
945                         "Unsupported mask for GENEVE encap. protocol");
946                 return -rte_errno;
947         }
948
949         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
950                                                mask->vni, item, error);
951
952         return rc;
953 }
954
955 /**
956  * Convert NVGRE item to EFX filter specification.
957  *
958  * @param item[in]
959  *   Item specification. Only virtual subnet ID field is supported.
960  *   If the mask is NULL, default mask will be used.
961  *   Ranging is not supported.
962  * @param efx_spec[in, out]
963  *   EFX filter specification to update.
964  * @param[out] error
965  *   Perform verbose error reporting if not NULL.
966  */
967 static int
968 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
969                      efx_filter_spec_t *efx_spec,
970                      struct rte_flow_error *error)
971 {
972         int rc;
973         const struct rte_flow_item_nvgre *spec = NULL;
974         const struct rte_flow_item_nvgre *mask = NULL;
975         const struct rte_flow_item_nvgre supp_mask = {
976                 .tni = { 0xff, 0xff, 0xff }
977         };
978
979         rc = sfc_flow_parse_init(item,
980                                  (const void **)&spec,
981                                  (const void **)&mask,
982                                  &supp_mask,
983                                  &rte_flow_item_nvgre_mask,
984                                  sizeof(struct rte_flow_item_nvgre),
985                                  error);
986         if (rc != 0)
987                 return rc;
988
989         rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
990                                                      EFX_IPPROTO_GRE, error);
991         if (rc != 0)
992                 return rc;
993
994         efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
995         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
996
997         if (spec == NULL)
998                 return 0;
999
1000         rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1001                                                mask->tni, item, error);
1002
1003         return rc;
1004 }
1005
1006 static const struct sfc_flow_item sfc_flow_items[] = {
1007         {
1008                 .type = RTE_FLOW_ITEM_TYPE_VOID,
1009                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1010                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1011                 .parse = sfc_flow_parse_void,
1012         },
1013         {
1014                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1015                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1016                 .layer = SFC_FLOW_ITEM_L2,
1017                 .parse = sfc_flow_parse_eth,
1018         },
1019         {
1020                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1021                 .prev_layer = SFC_FLOW_ITEM_L2,
1022                 .layer = SFC_FLOW_ITEM_L2,
1023                 .parse = sfc_flow_parse_vlan,
1024         },
1025         {
1026                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1027                 .prev_layer = SFC_FLOW_ITEM_L2,
1028                 .layer = SFC_FLOW_ITEM_L3,
1029                 .parse = sfc_flow_parse_ipv4,
1030         },
1031         {
1032                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1033                 .prev_layer = SFC_FLOW_ITEM_L2,
1034                 .layer = SFC_FLOW_ITEM_L3,
1035                 .parse = sfc_flow_parse_ipv6,
1036         },
1037         {
1038                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1039                 .prev_layer = SFC_FLOW_ITEM_L3,
1040                 .layer = SFC_FLOW_ITEM_L4,
1041                 .parse = sfc_flow_parse_tcp,
1042         },
1043         {
1044                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1045                 .prev_layer = SFC_FLOW_ITEM_L3,
1046                 .layer = SFC_FLOW_ITEM_L4,
1047                 .parse = sfc_flow_parse_udp,
1048         },
1049         {
1050                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1051                 .prev_layer = SFC_FLOW_ITEM_L4,
1052                 .layer = SFC_FLOW_ITEM_START_LAYER,
1053                 .parse = sfc_flow_parse_vxlan,
1054         },
1055         {
1056                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1057                 .prev_layer = SFC_FLOW_ITEM_L4,
1058                 .layer = SFC_FLOW_ITEM_START_LAYER,
1059                 .parse = sfc_flow_parse_geneve,
1060         },
1061         {
1062                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1063                 .prev_layer = SFC_FLOW_ITEM_L3,
1064                 .layer = SFC_FLOW_ITEM_START_LAYER,
1065                 .parse = sfc_flow_parse_nvgre,
1066         },
1067 };
1068
1069 /*
1070  * Protocol-independent flow API support
1071  */
1072 static int
1073 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1074                     struct rte_flow *flow,
1075                     struct rte_flow_error *error)
1076 {
1077         if (attr == NULL) {
1078                 rte_flow_error_set(error, EINVAL,
1079                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1080                                    "NULL attribute");
1081                 return -rte_errno;
1082         }
1083         if (attr->group != 0) {
1084                 rte_flow_error_set(error, ENOTSUP,
1085                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1086                                    "Groups are not supported");
1087                 return -rte_errno;
1088         }
1089         if (attr->priority != 0) {
1090                 rte_flow_error_set(error, ENOTSUP,
1091                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1092                                    "Priorities are not supported");
1093                 return -rte_errno;
1094         }
1095         if (attr->egress != 0) {
1096                 rte_flow_error_set(error, ENOTSUP,
1097                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1098                                    "Egress is not supported");
1099                 return -rte_errno;
1100         }
1101         if (attr->ingress == 0) {
1102                 rte_flow_error_set(error, ENOTSUP,
1103                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1104                                    "Only ingress is supported");
1105                 return -rte_errno;
1106         }
1107
1108         flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1109         flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1110
1111         return 0;
1112 }
1113
1114 /* Get item from array sfc_flow_items */
1115 static const struct sfc_flow_item *
1116 sfc_flow_get_item(enum rte_flow_item_type type)
1117 {
1118         unsigned int i;
1119
1120         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1121                 if (sfc_flow_items[i].type == type)
1122                         return &sfc_flow_items[i];
1123
1124         return NULL;
1125 }
1126
1127 static int
1128 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1129                        struct rte_flow *flow,
1130                        struct rte_flow_error *error)
1131 {
1132         int rc;
1133         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1134         boolean_t is_ifrm = B_FALSE;
1135         const struct sfc_flow_item *item;
1136
1137         if (pattern == NULL) {
1138                 rte_flow_error_set(error, EINVAL,
1139                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1140                                    "NULL pattern");
1141                 return -rte_errno;
1142         }
1143
1144         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1145                 item = sfc_flow_get_item(pattern->type);
1146                 if (item == NULL) {
1147                         rte_flow_error_set(error, ENOTSUP,
1148                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1149                                            "Unsupported pattern item");
1150                         return -rte_errno;
1151                 }
1152
1153                 /*
1154                  * Omitting one or several protocol layers at the beginning
1155                  * of pattern is supported
1156                  */
1157                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1158                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1159                     item->prev_layer != prev_layer) {
1160                         rte_flow_error_set(error, ENOTSUP,
1161                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1162                                            "Unexpected sequence of pattern items");
1163                         return -rte_errno;
1164                 }
1165
1166                 /*
1167                  * Allow only VOID and ETH pattern items in the inner frame.
1168                  * Also check that there is only one tunneling protocol.
1169                  */
1170                 switch (item->type) {
1171                 case RTE_FLOW_ITEM_TYPE_VOID:
1172                 case RTE_FLOW_ITEM_TYPE_ETH:
1173                         break;
1174
1175                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1176                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1177                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1178                         if (is_ifrm) {
1179                                 rte_flow_error_set(error, EINVAL,
1180                                         RTE_FLOW_ERROR_TYPE_ITEM,
1181                                         pattern,
1182                                         "More than one tunneling protocol");
1183                                 return -rte_errno;
1184                         }
1185                         is_ifrm = B_TRUE;
1186                         break;
1187
1188                 default:
1189                         if (is_ifrm) {
1190                                 rte_flow_error_set(error, EINVAL,
1191                                         RTE_FLOW_ERROR_TYPE_ITEM,
1192                                         pattern,
1193                                         "There is an unsupported pattern item "
1194                                         "in the inner frame");
1195                                 return -rte_errno;
1196                         }
1197                         break;
1198                 }
1199
1200                 rc = item->parse(pattern, &flow->spec.template, error);
1201                 if (rc != 0)
1202                         return rc;
1203
1204                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1205                         prev_layer = item->layer;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int
1212 sfc_flow_parse_queue(struct sfc_adapter *sa,
1213                      const struct rte_flow_action_queue *queue,
1214                      struct rte_flow *flow)
1215 {
1216         struct sfc_rxq *rxq;
1217
1218         if (queue->index >= sa->rxq_count)
1219                 return -EINVAL;
1220
1221         rxq = sa->rxq_info[queue->index].rxq;
1222         flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1223
1224         return 0;
1225 }
1226
1227 #if EFSYS_OPT_RX_SCALE
1228 static int
1229 sfc_flow_parse_rss(struct sfc_adapter *sa,
1230                    const struct rte_flow_action_rss *rss,
1231                    struct rte_flow *flow)
1232 {
1233         unsigned int rxq_sw_index;
1234         struct sfc_rxq *rxq;
1235         unsigned int rxq_hw_index_min;
1236         unsigned int rxq_hw_index_max;
1237         const uint8_t *rss_key;
1238         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1239         unsigned int i;
1240
1241         if (rss->queue_num == 0)
1242                 return -EINVAL;
1243
1244         rxq_sw_index = sa->rxq_count - 1;
1245         rxq = sa->rxq_info[rxq_sw_index].rxq;
1246         rxq_hw_index_min = rxq->hw_index;
1247         rxq_hw_index_max = 0;
1248
1249         for (i = 0; i < rss->queue_num; ++i) {
1250                 rxq_sw_index = rss->queue[i];
1251
1252                 if (rxq_sw_index >= sa->rxq_count)
1253                         return -EINVAL;
1254
1255                 rxq = sa->rxq_info[rxq_sw_index].rxq;
1256
1257                 if (rxq->hw_index < rxq_hw_index_min)
1258                         rxq_hw_index_min = rxq->hw_index;
1259
1260                 if (rxq->hw_index > rxq_hw_index_max)
1261                         rxq_hw_index_max = rxq->hw_index;
1262         }
1263
1264         switch (rss->func) {
1265         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1266         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1267                 break;
1268         default:
1269                 return -EINVAL;
1270         }
1271
1272         if (rss->level)
1273                 return -EINVAL;
1274
1275         if ((rss->types & ~SFC_RSS_OFFLOADS) != 0)
1276                 return -EINVAL;
1277
1278         if (rss->key_len) {
1279                 if (rss->key_len != sizeof(sa->rss_key))
1280                         return -EINVAL;
1281
1282                 rss_key = rss->key;
1283         } else {
1284                 rss_key = sa->rss_key;
1285         }
1286
1287         flow->rss = B_TRUE;
1288
1289         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1290         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1291         sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss->types);
1292         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1293
1294         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1295                 unsigned int rxq_sw_index = rss->queue[i % rss->queue_num];
1296                 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1297
1298                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1299         }
1300
1301         return 0;
1302 }
1303 #endif /* EFSYS_OPT_RX_SCALE */
1304
1305 static int
1306 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1307                     unsigned int filters_count)
1308 {
1309         unsigned int i;
1310         int ret = 0;
1311
1312         for (i = 0; i < filters_count; i++) {
1313                 int rc;
1314
1315                 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1316                 if (ret == 0 && rc != 0) {
1317                         sfc_err(sa, "failed to remove filter specification "
1318                                 "(rc = %d)", rc);
1319                         ret = rc;
1320                 }
1321         }
1322
1323         return ret;
1324 }
1325
1326 static int
1327 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1328 {
1329         unsigned int i;
1330         int rc = 0;
1331
1332         for (i = 0; i < spec->count; i++) {
1333                 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1334                 if (rc != 0) {
1335                         sfc_flow_spec_flush(sa, spec, i);
1336                         break;
1337                 }
1338         }
1339
1340         return rc;
1341 }
1342
1343 static int
1344 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1345 {
1346         return sfc_flow_spec_flush(sa, spec, spec->count);
1347 }
1348
1349 static int
1350 sfc_flow_filter_insert(struct sfc_adapter *sa,
1351                        struct rte_flow *flow)
1352 {
1353 #if EFSYS_OPT_RX_SCALE
1354         struct sfc_flow_rss *rss = &flow->rss_conf;
1355         uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1356         unsigned int i;
1357         int rc = 0;
1358
1359         if (flow->rss) {
1360                 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1361                                               rss->rxq_hw_index_min + 1,
1362                                               EFX_MAXRSS);
1363
1364                 rc = efx_rx_scale_context_alloc(sa->nic,
1365                                                 EFX_RX_SCALE_EXCLUSIVE,
1366                                                 rss_spread,
1367                                                 &efs_rss_context);
1368                 if (rc != 0)
1369                         goto fail_scale_context_alloc;
1370
1371                 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1372                                            EFX_RX_HASHALG_TOEPLITZ,
1373                                            rss->rss_hash_types, B_TRUE);
1374                 if (rc != 0)
1375                         goto fail_scale_mode_set;
1376
1377                 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1378                                           rss->rss_key,
1379                                           sizeof(sa->rss_key));
1380                 if (rc != 0)
1381                         goto fail_scale_key_set;
1382
1383                 /*
1384                  * At this point, fully elaborated filter specifications
1385                  * have been produced from the template. To make sure that
1386                  * RSS behaviour is consistent between them, set the same
1387                  * RSS context value everywhere.
1388                  */
1389                 for (i = 0; i < flow->spec.count; i++) {
1390                         efx_filter_spec_t *spec = &flow->spec.filters[i];
1391
1392                         spec->efs_rss_context = efs_rss_context;
1393                         spec->efs_dmaq_id = rss->rxq_hw_index_min;
1394                         spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1395                 }
1396         }
1397
1398         rc = sfc_flow_spec_insert(sa, &flow->spec);
1399         if (rc != 0)
1400                 goto fail_filter_insert;
1401
1402         if (flow->rss) {
1403                 /*
1404                  * Scale table is set after filter insertion because
1405                  * the table entries are relative to the base RxQ ID
1406                  * and the latter is submitted to the HW by means of
1407                  * inserting a filter, so by the time of the request
1408                  * the HW knows all the information needed to verify
1409                  * the table entries, and the operation will succeed
1410                  */
1411                 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1412                                           rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1413                 if (rc != 0)
1414                         goto fail_scale_tbl_set;
1415         }
1416
1417         return 0;
1418
1419 fail_scale_tbl_set:
1420         sfc_flow_spec_remove(sa, &flow->spec);
1421
1422 fail_filter_insert:
1423 fail_scale_key_set:
1424 fail_scale_mode_set:
1425         if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1426                 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1427
1428 fail_scale_context_alloc:
1429         return rc;
1430 #else /* !EFSYS_OPT_RX_SCALE */
1431         return sfc_flow_spec_insert(sa, &flow->spec);
1432 #endif /* EFSYS_OPT_RX_SCALE */
1433 }
1434
1435 static int
1436 sfc_flow_filter_remove(struct sfc_adapter *sa,
1437                        struct rte_flow *flow)
1438 {
1439         int rc = 0;
1440
1441         rc = sfc_flow_spec_remove(sa, &flow->spec);
1442         if (rc != 0)
1443                 return rc;
1444
1445 #if EFSYS_OPT_RX_SCALE
1446         if (flow->rss) {
1447                 /*
1448                  * All specifications for a given flow rule have the same RSS
1449                  * context, so that RSS context value is taken from the first
1450                  * filter specification
1451                  */
1452                 efx_filter_spec_t *spec = &flow->spec.filters[0];
1453
1454                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1455         }
1456 #endif /* EFSYS_OPT_RX_SCALE */
1457
1458         return rc;
1459 }
1460
1461 static int
1462 sfc_flow_parse_actions(struct sfc_adapter *sa,
1463                        const struct rte_flow_action actions[],
1464                        struct rte_flow *flow,
1465                        struct rte_flow_error *error)
1466 {
1467         int rc;
1468         boolean_t is_specified = B_FALSE;
1469
1470         if (actions == NULL) {
1471                 rte_flow_error_set(error, EINVAL,
1472                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1473                                    "NULL actions");
1474                 return -rte_errno;
1475         }
1476
1477         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1478                 /* This one may appear anywhere multiple times. */
1479                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1480                         continue;
1481                 /* Fate-deciding actions may appear exactly once. */
1482                 if (is_specified) {
1483                         rte_flow_error_set
1484                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1485                                  actions,
1486                                  "Cannot combine several fate-deciding actions,"
1487                                  "choose between QUEUE, RSS or DROP");
1488                         return -rte_errno;
1489                 }
1490                 switch (actions->type) {
1491                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1492                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1493                         if (rc != 0) {
1494                                 rte_flow_error_set(error, EINVAL,
1495                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1496                                         "Bad QUEUE action");
1497                                 return -rte_errno;
1498                         }
1499
1500                         is_specified = B_TRUE;
1501                         break;
1502
1503 #if EFSYS_OPT_RX_SCALE
1504                 case RTE_FLOW_ACTION_TYPE_RSS:
1505                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1506                         if (rc != 0) {
1507                                 rte_flow_error_set(error, rc,
1508                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1509                                         "Bad RSS action");
1510                                 return -rte_errno;
1511                         }
1512
1513                         is_specified = B_TRUE;
1514                         break;
1515 #endif /* EFSYS_OPT_RX_SCALE */
1516
1517                 case RTE_FLOW_ACTION_TYPE_DROP:
1518                         flow->spec.template.efs_dmaq_id =
1519                                 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1520
1521                         is_specified = B_TRUE;
1522                         break;
1523
1524                 default:
1525                         rte_flow_error_set(error, ENOTSUP,
1526                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1527                                            "Action is not supported");
1528                         return -rte_errno;
1529                 }
1530         }
1531
1532         /* When fate is unknown, drop traffic. */
1533         if (!is_specified) {
1534                 flow->spec.template.efs_dmaq_id =
1535                         EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1536         }
1537
1538         return 0;
1539 }
1540
1541 /**
1542  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1543  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1544  * specifications after copying.
1545  *
1546  * @param spec[in, out]
1547  *   SFC flow specification to update.
1548  * @param filters_count_for_one_val[in]
1549  *   How many specifications should have the same match flag, what is the
1550  *   number of specifications before copying.
1551  * @param error[out]
1552  *   Perform verbose error reporting if not NULL.
1553  */
1554 static int
1555 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1556                                unsigned int filters_count_for_one_val,
1557                                struct rte_flow_error *error)
1558 {
1559         unsigned int i;
1560         static const efx_filter_match_flags_t vals[] = {
1561                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1562                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1563         };
1564
1565         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1566                 rte_flow_error_set(error, EINVAL,
1567                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1568                         "Number of specifications is incorrect while copying "
1569                         "by unknown destination flags");
1570                 return -rte_errno;
1571         }
1572
1573         for (i = 0; i < spec->count; i++) {
1574                 /* The check above ensures that divisor can't be zero here */
1575                 spec->filters[i].efs_match_flags |=
1576                         vals[i / filters_count_for_one_val];
1577         }
1578
1579         return 0;
1580 }
1581
1582 /**
1583  * Check that the following conditions are met:
1584  * - the list of supported filters has a filter
1585  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1586  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1587  *   be inserted.
1588  *
1589  * @param match[in]
1590  *   The match flags of filter.
1591  * @param spec[in]
1592  *   Specification to be supplemented.
1593  * @param filter[in]
1594  *   SFC filter with list of supported filters.
1595  */
1596 static boolean_t
1597 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1598                                  __rte_unused efx_filter_spec_t *spec,
1599                                  struct sfc_filter *filter)
1600 {
1601         unsigned int i;
1602         efx_filter_match_flags_t match_mcast_dst;
1603
1604         match_mcast_dst =
1605                 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1606                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1607         for (i = 0; i < filter->supported_match_num; i++) {
1608                 if (match_mcast_dst == filter->supported_match[i])
1609                         return B_TRUE;
1610         }
1611
1612         return B_FALSE;
1613 }
1614
1615 /**
1616  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1617  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1618  * specifications after copying.
1619  *
1620  * @param spec[in, out]
1621  *   SFC flow specification to update.
1622  * @param filters_count_for_one_val[in]
1623  *   How many specifications should have the same EtherType value, what is the
1624  *   number of specifications before copying.
1625  * @param error[out]
1626  *   Perform verbose error reporting if not NULL.
1627  */
1628 static int
1629 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1630                         unsigned int filters_count_for_one_val,
1631                         struct rte_flow_error *error)
1632 {
1633         unsigned int i;
1634         static const uint16_t vals[] = {
1635                 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1636         };
1637
1638         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1639                 rte_flow_error_set(error, EINVAL,
1640                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1641                         "Number of specifications is incorrect "
1642                         "while copying by Ethertype");
1643                 return -rte_errno;
1644         }
1645
1646         for (i = 0; i < spec->count; i++) {
1647                 spec->filters[i].efs_match_flags |=
1648                         EFX_FILTER_MATCH_ETHER_TYPE;
1649
1650                 /*
1651                  * The check above ensures that
1652                  * filters_count_for_one_val is not 0
1653                  */
1654                 spec->filters[i].efs_ether_type =
1655                         vals[i / filters_count_for_one_val];
1656         }
1657
1658         return 0;
1659 }
1660
1661 /**
1662  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1663  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1664  * specifications after copying.
1665  *
1666  * @param spec[in, out]
1667  *   SFC flow specification to update.
1668  * @param filters_count_for_one_val[in]
1669  *   How many specifications should have the same match flag, what is the
1670  *   number of specifications before copying.
1671  * @param error[out]
1672  *   Perform verbose error reporting if not NULL.
1673  */
1674 static int
1675 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1676                                     unsigned int filters_count_for_one_val,
1677                                     struct rte_flow_error *error)
1678 {
1679         unsigned int i;
1680         static const efx_filter_match_flags_t vals[] = {
1681                 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1682                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1683         };
1684
1685         if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1686                 rte_flow_error_set(error, EINVAL,
1687                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1688                         "Number of specifications is incorrect while copying "
1689                         "by inner frame unknown destination flags");
1690                 return -rte_errno;
1691         }
1692
1693         for (i = 0; i < spec->count; i++) {
1694                 /* The check above ensures that divisor can't be zero here */
1695                 spec->filters[i].efs_match_flags |=
1696                         vals[i / filters_count_for_one_val];
1697         }
1698
1699         return 0;
1700 }
1701
1702 /**
1703  * Check that the following conditions are met:
1704  * - the specification corresponds to a filter for encapsulated traffic
1705  * - the list of supported filters has a filter
1706  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1707  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1708  *   be inserted.
1709  *
1710  * @param match[in]
1711  *   The match flags of filter.
1712  * @param spec[in]
1713  *   Specification to be supplemented.
1714  * @param filter[in]
1715  *   SFC filter with list of supported filters.
1716  */
1717 static boolean_t
1718 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1719                                       efx_filter_spec_t *spec,
1720                                       struct sfc_filter *filter)
1721 {
1722         unsigned int i;
1723         efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1724         efx_filter_match_flags_t match_mcast_dst;
1725
1726         if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1727                 return B_FALSE;
1728
1729         match_mcast_dst =
1730                 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1731                 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1732         for (i = 0; i < filter->supported_match_num; i++) {
1733                 if (match_mcast_dst == filter->supported_match[i])
1734                         return B_TRUE;
1735         }
1736
1737         return B_FALSE;
1738 }
1739
1740 /*
1741  * Match flags that can be automatically added to filters.
1742  * Selecting the last minimum when searching for the copy flag ensures that the
1743  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1744  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1745  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1746  * filters.
1747  */
1748 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1749         {
1750                 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1751                 .vals_count = 2,
1752                 .set_vals = sfc_flow_set_unknown_dst_flags,
1753                 .spec_check = sfc_flow_check_unknown_dst_flags,
1754         },
1755         {
1756                 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1757                 .vals_count = 2,
1758                 .set_vals = sfc_flow_set_ethertypes,
1759                 .spec_check = NULL,
1760         },
1761         {
1762                 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1763                 .vals_count = 2,
1764                 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1765                 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1766         },
1767 };
1768
1769 /* Get item from array sfc_flow_copy_flags */
1770 static const struct sfc_flow_copy_flag *
1771 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1772 {
1773         unsigned int i;
1774
1775         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1776                 if (sfc_flow_copy_flags[i].flag == flag)
1777                         return &sfc_flow_copy_flags[i];
1778         }
1779
1780         return NULL;
1781 }
1782
1783 /**
1784  * Make copies of the specifications, set match flag and values
1785  * of the field that corresponds to it.
1786  *
1787  * @param spec[in, out]
1788  *   SFC flow specification to update.
1789  * @param flag[in]
1790  *   The match flag to add.
1791  * @param error[out]
1792  *   Perform verbose error reporting if not NULL.
1793  */
1794 static int
1795 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1796                              efx_filter_match_flags_t flag,
1797                              struct rte_flow_error *error)
1798 {
1799         unsigned int i;
1800         unsigned int new_filters_count;
1801         unsigned int filters_count_for_one_val;
1802         const struct sfc_flow_copy_flag *copy_flag;
1803         int rc;
1804
1805         copy_flag = sfc_flow_get_copy_flag(flag);
1806         if (copy_flag == NULL) {
1807                 rte_flow_error_set(error, ENOTSUP,
1808                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1809                                    "Unsupported spec field for copying");
1810                 return -rte_errno;
1811         }
1812
1813         new_filters_count = spec->count * copy_flag->vals_count;
1814         if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1815                 rte_flow_error_set(error, EINVAL,
1816                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1817                         "Too much EFX specifications in the flow rule");
1818                 return -rte_errno;
1819         }
1820
1821         /* Copy filters specifications */
1822         for (i = spec->count; i < new_filters_count; i++)
1823                 spec->filters[i] = spec->filters[i - spec->count];
1824
1825         filters_count_for_one_val = spec->count;
1826         spec->count = new_filters_count;
1827
1828         rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1829         if (rc != 0)
1830                 return rc;
1831
1832         return 0;
1833 }
1834
1835 /**
1836  * Check that the given set of match flags missing in the original filter spec
1837  * could be covered by adding spec copies which specify the corresponding
1838  * flags and packet field values to match.
1839  *
1840  * @param miss_flags[in]
1841  *   Flags that are missing until the supported filter.
1842  * @param spec[in]
1843  *   Specification to be supplemented.
1844  * @param filter[in]
1845  *   SFC filter.
1846  *
1847  * @return
1848  *   Number of specifications after copy or 0, if the flags can not be added.
1849  */
1850 static unsigned int
1851 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1852                              efx_filter_spec_t *spec,
1853                              struct sfc_filter *filter)
1854 {
1855         unsigned int i;
1856         efx_filter_match_flags_t copy_flags = 0;
1857         efx_filter_match_flags_t flag;
1858         efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1859         sfc_flow_spec_check *check;
1860         unsigned int multiplier = 1;
1861
1862         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1863                 flag = sfc_flow_copy_flags[i].flag;
1864                 check = sfc_flow_copy_flags[i].spec_check;
1865                 if ((flag & miss_flags) == flag) {
1866                         if (check != NULL && (!check(match, spec, filter)))
1867                                 continue;
1868
1869                         copy_flags |= flag;
1870                         multiplier *= sfc_flow_copy_flags[i].vals_count;
1871                 }
1872         }
1873
1874         if (copy_flags == miss_flags)
1875                 return multiplier;
1876
1877         return 0;
1878 }
1879
1880 /**
1881  * Attempt to supplement the specification template to the minimally
1882  * supported set of match flags. To do this, it is necessary to copy
1883  * the specifications, filling them with the values of fields that
1884  * correspond to the missing flags.
1885  * The necessary and sufficient filter is built from the fewest number
1886  * of copies which could be made to cover the minimally required set
1887  * of flags.
1888  *
1889  * @param sa[in]
1890  *   SFC adapter.
1891  * @param spec[in, out]
1892  *   SFC flow specification to update.
1893  * @param error[out]
1894  *   Perform verbose error reporting if not NULL.
1895  */
1896 static int
1897 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
1898                                struct sfc_flow_spec *spec,
1899                                struct rte_flow_error *error)
1900 {
1901         struct sfc_filter *filter = &sa->filter;
1902         efx_filter_match_flags_t miss_flags;
1903         efx_filter_match_flags_t min_miss_flags = 0;
1904         efx_filter_match_flags_t match;
1905         unsigned int min_multiplier = UINT_MAX;
1906         unsigned int multiplier;
1907         unsigned int i;
1908         int rc;
1909
1910         match = spec->template.efs_match_flags;
1911         for (i = 0; i < filter->supported_match_num; i++) {
1912                 if ((match & filter->supported_match[i]) == match) {
1913                         miss_flags = filter->supported_match[i] & (~match);
1914                         multiplier = sfc_flow_check_missing_flags(miss_flags,
1915                                 &spec->template, filter);
1916                         if (multiplier > 0) {
1917                                 if (multiplier <= min_multiplier) {
1918                                         min_multiplier = multiplier;
1919                                         min_miss_flags = miss_flags;
1920                                 }
1921                         }
1922                 }
1923         }
1924
1925         if (min_multiplier == UINT_MAX) {
1926                 rte_flow_error_set(error, ENOTSUP,
1927                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1928                                    "Flow rule pattern is not supported");
1929                 return -rte_errno;
1930         }
1931
1932         for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1933                 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
1934
1935                 if ((flag & min_miss_flags) == flag) {
1936                         rc = sfc_flow_spec_add_match_flag(spec, flag, error);
1937                         if (rc != 0)
1938                                 return rc;
1939                 }
1940         }
1941
1942         return 0;
1943 }
1944
1945 /**
1946  * Check that set of match flags is referred to by a filter. Filter is
1947  * described by match flags with the ability to add OUTER_VID and INNER_VID
1948  * flags.
1949  *
1950  * @param match_flags[in]
1951  *   Set of match flags.
1952  * @param flags_pattern[in]
1953  *   Pattern of filter match flags.
1954  */
1955 static boolean_t
1956 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
1957                             efx_filter_match_flags_t flags_pattern)
1958 {
1959         if ((match_flags & flags_pattern) != flags_pattern)
1960                 return B_FALSE;
1961
1962         switch (match_flags & ~flags_pattern) {
1963         case 0:
1964         case EFX_FILTER_MATCH_OUTER_VID:
1965         case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
1966                 return B_TRUE;
1967         default:
1968                 return B_FALSE;
1969         }
1970 }
1971
1972 /**
1973  * Check whether the spec maps to a hardware filter which is known to be
1974  * ineffective despite being valid.
1975  *
1976  * @param spec[in]
1977  *   SFC flow specification.
1978  */
1979 static boolean_t
1980 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
1981 {
1982         unsigned int i;
1983         uint16_t ether_type;
1984         uint8_t ip_proto;
1985         efx_filter_match_flags_t match_flags;
1986
1987         for (i = 0; i < spec->count; i++) {
1988                 match_flags = spec->filters[i].efs_match_flags;
1989
1990                 if (sfc_flow_is_match_with_vids(match_flags,
1991                                                 EFX_FILTER_MATCH_ETHER_TYPE) ||
1992                     sfc_flow_is_match_with_vids(match_flags,
1993                                                 EFX_FILTER_MATCH_ETHER_TYPE |
1994                                                 EFX_FILTER_MATCH_LOC_MAC)) {
1995                         ether_type = spec->filters[i].efs_ether_type;
1996                         if (ether_type == EFX_ETHER_TYPE_IPV4 ||
1997                             ether_type == EFX_ETHER_TYPE_IPV6)
1998                                 return B_TRUE;
1999                 } else if (sfc_flow_is_match_with_vids(match_flags,
2000                                 EFX_FILTER_MATCH_ETHER_TYPE |
2001                                 EFX_FILTER_MATCH_IP_PROTO) ||
2002                            sfc_flow_is_match_with_vids(match_flags,
2003                                 EFX_FILTER_MATCH_ETHER_TYPE |
2004                                 EFX_FILTER_MATCH_IP_PROTO |
2005                                 EFX_FILTER_MATCH_LOC_MAC)) {
2006                         ip_proto = spec->filters[i].efs_ip_proto;
2007                         if (ip_proto == EFX_IPPROTO_TCP ||
2008                             ip_proto == EFX_IPPROTO_UDP)
2009                                 return B_TRUE;
2010                 }
2011         }
2012
2013         return B_FALSE;
2014 }
2015
2016 static int
2017 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2018                               struct rte_flow *flow,
2019                               struct rte_flow_error *error)
2020 {
2021         efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2022         efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2023         int rc;
2024
2025         /* Initialize the first filter spec with template */
2026         flow->spec.filters[0] = *spec_tmpl;
2027         flow->spec.count = 1;
2028
2029         if (!sfc_filter_is_match_supported(sa, match_flags)) {
2030                 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2031                 if (rc != 0)
2032                         return rc;
2033         }
2034
2035         if (sfc_flow_is_match_flags_exception(&flow->spec)) {
2036                 rte_flow_error_set(error, ENOTSUP,
2037                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2038                         "The flow rule pattern is unsupported");
2039                 return -rte_errno;
2040         }
2041
2042         return 0;
2043 }
2044
2045 static int
2046 sfc_flow_parse(struct rte_eth_dev *dev,
2047                const struct rte_flow_attr *attr,
2048                const struct rte_flow_item pattern[],
2049                const struct rte_flow_action actions[],
2050                struct rte_flow *flow,
2051                struct rte_flow_error *error)
2052 {
2053         struct sfc_adapter *sa = dev->data->dev_private;
2054         int rc;
2055
2056         rc = sfc_flow_parse_attr(attr, flow, error);
2057         if (rc != 0)
2058                 goto fail_bad_value;
2059
2060         rc = sfc_flow_parse_pattern(pattern, flow, error);
2061         if (rc != 0)
2062                 goto fail_bad_value;
2063
2064         rc = sfc_flow_parse_actions(sa, actions, flow, error);
2065         if (rc != 0)
2066                 goto fail_bad_value;
2067
2068         rc = sfc_flow_validate_match_flags(sa, flow, error);
2069         if (rc != 0)
2070                 goto fail_bad_value;
2071
2072         return 0;
2073
2074 fail_bad_value:
2075         return rc;
2076 }
2077
2078 static int
2079 sfc_flow_validate(struct rte_eth_dev *dev,
2080                   const struct rte_flow_attr *attr,
2081                   const struct rte_flow_item pattern[],
2082                   const struct rte_flow_action actions[],
2083                   struct rte_flow_error *error)
2084 {
2085         struct rte_flow flow;
2086
2087         memset(&flow, 0, sizeof(flow));
2088
2089         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2090 }
2091
2092 static struct rte_flow *
2093 sfc_flow_create(struct rte_eth_dev *dev,
2094                 const struct rte_flow_attr *attr,
2095                 const struct rte_flow_item pattern[],
2096                 const struct rte_flow_action actions[],
2097                 struct rte_flow_error *error)
2098 {
2099         struct sfc_adapter *sa = dev->data->dev_private;
2100         struct rte_flow *flow = NULL;
2101         int rc;
2102
2103         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2104         if (flow == NULL) {
2105                 rte_flow_error_set(error, ENOMEM,
2106                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2107                                    "Failed to allocate memory");
2108                 goto fail_no_mem;
2109         }
2110
2111         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2112         if (rc != 0)
2113                 goto fail_bad_value;
2114
2115         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2116
2117         sfc_adapter_lock(sa);
2118
2119         if (sa->state == SFC_ADAPTER_STARTED) {
2120                 rc = sfc_flow_filter_insert(sa, flow);
2121                 if (rc != 0) {
2122                         rte_flow_error_set(error, rc,
2123                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2124                                 "Failed to insert filter");
2125                         goto fail_filter_insert;
2126                 }
2127         }
2128
2129         sfc_adapter_unlock(sa);
2130
2131         return flow;
2132
2133 fail_filter_insert:
2134         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2135
2136 fail_bad_value:
2137         rte_free(flow);
2138         sfc_adapter_unlock(sa);
2139
2140 fail_no_mem:
2141         return NULL;
2142 }
2143
2144 static int
2145 sfc_flow_remove(struct sfc_adapter *sa,
2146                 struct rte_flow *flow,
2147                 struct rte_flow_error *error)
2148 {
2149         int rc = 0;
2150
2151         SFC_ASSERT(sfc_adapter_is_locked(sa));
2152
2153         if (sa->state == SFC_ADAPTER_STARTED) {
2154                 rc = sfc_flow_filter_remove(sa, flow);
2155                 if (rc != 0)
2156                         rte_flow_error_set(error, rc,
2157                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2158                                 "Failed to destroy flow rule");
2159         }
2160
2161         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2162         rte_free(flow);
2163
2164         return rc;
2165 }
2166
2167 static int
2168 sfc_flow_destroy(struct rte_eth_dev *dev,
2169                  struct rte_flow *flow,
2170                  struct rte_flow_error *error)
2171 {
2172         struct sfc_adapter *sa = dev->data->dev_private;
2173         struct rte_flow *flow_ptr;
2174         int rc = EINVAL;
2175
2176         sfc_adapter_lock(sa);
2177
2178         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2179                 if (flow_ptr == flow)
2180                         rc = 0;
2181         }
2182         if (rc != 0) {
2183                 rte_flow_error_set(error, rc,
2184                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2185                                    "Failed to find flow rule to destroy");
2186                 goto fail_bad_value;
2187         }
2188
2189         rc = sfc_flow_remove(sa, flow, error);
2190
2191 fail_bad_value:
2192         sfc_adapter_unlock(sa);
2193
2194         return -rc;
2195 }
2196
2197 static int
2198 sfc_flow_flush(struct rte_eth_dev *dev,
2199                struct rte_flow_error *error)
2200 {
2201         struct sfc_adapter *sa = dev->data->dev_private;
2202         struct rte_flow *flow;
2203         int rc = 0;
2204         int ret = 0;
2205
2206         sfc_adapter_lock(sa);
2207
2208         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2209                 rc = sfc_flow_remove(sa, flow, error);
2210                 if (rc != 0)
2211                         ret = rc;
2212         }
2213
2214         sfc_adapter_unlock(sa);
2215
2216         return -ret;
2217 }
2218
2219 static int
2220 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2221                  struct rte_flow_error *error)
2222 {
2223         struct sfc_adapter *sa = dev->data->dev_private;
2224         struct sfc_port *port = &sa->port;
2225         int ret = 0;
2226
2227         sfc_adapter_lock(sa);
2228         if (sa->state != SFC_ADAPTER_INITIALIZED) {
2229                 rte_flow_error_set(error, EBUSY,
2230                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2231                                    NULL, "please close the port first");
2232                 ret = -rte_errno;
2233         } else {
2234                 port->isolated = (enable) ? B_TRUE : B_FALSE;
2235         }
2236         sfc_adapter_unlock(sa);
2237
2238         return ret;
2239 }
2240
2241 const struct rte_flow_ops sfc_flow_ops = {
2242         .validate = sfc_flow_validate,
2243         .create = sfc_flow_create,
2244         .destroy = sfc_flow_destroy,
2245         .flush = sfc_flow_flush,
2246         .query = NULL,
2247         .isolate = sfc_flow_isolate,
2248 };
2249
2250 void
2251 sfc_flow_init(struct sfc_adapter *sa)
2252 {
2253         SFC_ASSERT(sfc_adapter_is_locked(sa));
2254
2255         TAILQ_INIT(&sa->filter.flow_list);
2256 }
2257
2258 void
2259 sfc_flow_fini(struct sfc_adapter *sa)
2260 {
2261         struct rte_flow *flow;
2262
2263         SFC_ASSERT(sfc_adapter_is_locked(sa));
2264
2265         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2266                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2267                 rte_free(flow);
2268         }
2269 }
2270
2271 void
2272 sfc_flow_stop(struct sfc_adapter *sa)
2273 {
2274         struct rte_flow *flow;
2275
2276         SFC_ASSERT(sfc_adapter_is_locked(sa));
2277
2278         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2279                 sfc_flow_filter_remove(sa, flow);
2280 }
2281
2282 int
2283 sfc_flow_start(struct sfc_adapter *sa)
2284 {
2285         struct rte_flow *flow;
2286         int rc = 0;
2287
2288         sfc_log_init(sa, "entry");
2289
2290         SFC_ASSERT(sfc_adapter_is_locked(sa));
2291
2292         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2293                 rc = sfc_flow_filter_insert(sa, flow);
2294                 if (rc != 0)
2295                         goto fail_bad_flow;
2296         }
2297
2298         sfc_log_init(sa, "done");
2299
2300 fail_bad_flow:
2301         return rc;
2302 }