net/i40e: move RSS to flow API
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25
26 /*
27  * At now flow API is implemented in such a manner that each
28  * flow rule is converted to a hardware filter.
29  * All elements of flow rule (attributes, pattern items, actions)
30  * correspond to one or more fields in the efx_filter_spec_s structure
31  * that is responsible for the hardware filter.
32  */
33
34 enum sfc_flow_item_layers {
35         SFC_FLOW_ITEM_ANY_LAYER,
36         SFC_FLOW_ITEM_START_LAYER,
37         SFC_FLOW_ITEM_L2,
38         SFC_FLOW_ITEM_L3,
39         SFC_FLOW_ITEM_L4,
40 };
41
42 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
43                                   efx_filter_spec_t *spec,
44                                   struct rte_flow_error *error);
45
46 struct sfc_flow_item {
47         enum rte_flow_item_type type;           /* Type of item */
48         enum sfc_flow_item_layers layer;        /* Layer of item */
49         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
50         sfc_flow_item_parse *parse;             /* Parsing function */
51 };
52
53 static sfc_flow_item_parse sfc_flow_parse_void;
54 static sfc_flow_item_parse sfc_flow_parse_eth;
55 static sfc_flow_item_parse sfc_flow_parse_vlan;
56 static sfc_flow_item_parse sfc_flow_parse_ipv4;
57 static sfc_flow_item_parse sfc_flow_parse_ipv6;
58 static sfc_flow_item_parse sfc_flow_parse_tcp;
59 static sfc_flow_item_parse sfc_flow_parse_udp;
60
61 static boolean_t
62 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
63 {
64         uint8_t sum = 0;
65         unsigned int i;
66
67         for (i = 0; i < size; i++)
68                 sum |= buf[i];
69
70         return (sum == 0) ? B_TRUE : B_FALSE;
71 }
72
73 /*
74  * Validate item and prepare structures spec and mask for parsing
75  */
76 static int
77 sfc_flow_parse_init(const struct rte_flow_item *item,
78                     const void **spec_ptr,
79                     const void **mask_ptr,
80                     const void *supp_mask,
81                     const void *def_mask,
82                     unsigned int size,
83                     struct rte_flow_error *error)
84 {
85         const uint8_t *spec;
86         const uint8_t *mask;
87         const uint8_t *last;
88         uint8_t match;
89         uint8_t supp;
90         unsigned int i;
91
92         if (item == NULL) {
93                 rte_flow_error_set(error, EINVAL,
94                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
95                                    "NULL item");
96                 return -rte_errno;
97         }
98
99         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
100                 rte_flow_error_set(error, EINVAL,
101                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
102                                    "Mask or last is set without spec");
103                 return -rte_errno;
104         }
105
106         /*
107          * If "mask" is not set, default mask is used,
108          * but if default mask is NULL, "mask" should be set
109          */
110         if (item->mask == NULL) {
111                 if (def_mask == NULL) {
112                         rte_flow_error_set(error, EINVAL,
113                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
114                                 "Mask should be specified");
115                         return -rte_errno;
116                 }
117
118                 mask = (const uint8_t *)def_mask;
119         } else {
120                 mask = (const uint8_t *)item->mask;
121         }
122
123         spec = (const uint8_t *)item->spec;
124         last = (const uint8_t *)item->last;
125
126         if (spec == NULL)
127                 goto exit;
128
129         /*
130          * If field values in "last" are either 0 or equal to the corresponding
131          * values in "spec" then they are ignored
132          */
133         if (last != NULL &&
134             !sfc_flow_is_zero(last, size) &&
135             memcmp(last, spec, size) != 0) {
136                 rte_flow_error_set(error, ENOTSUP,
137                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
138                                    "Ranging is not supported");
139                 return -rte_errno;
140         }
141
142         if (supp_mask == NULL) {
143                 rte_flow_error_set(error, EINVAL,
144                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
145                         "Supported mask for item should be specified");
146                 return -rte_errno;
147         }
148
149         /* Check that mask and spec not asks for more match than supp_mask */
150         for (i = 0; i < size; i++) {
151                 match = spec[i] | mask[i];
152                 supp = ((const uint8_t *)supp_mask)[i];
153
154                 if ((match | supp) != supp) {
155                         rte_flow_error_set(error, ENOTSUP,
156                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
157                                            "Item's field is not supported");
158                         return -rte_errno;
159                 }
160         }
161
162 exit:
163         *spec_ptr = spec;
164         *mask_ptr = mask;
165         return 0;
166 }
167
168 /*
169  * Protocol parsers.
170  * Masking is not supported, so masks in items should be either
171  * full or empty (zeroed) and set only for supported fields which
172  * are specified in the supp_mask.
173  */
174
175 static int
176 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
177                     __rte_unused efx_filter_spec_t *efx_spec,
178                     __rte_unused struct rte_flow_error *error)
179 {
180         return 0;
181 }
182
183 /**
184  * Convert Ethernet item to EFX filter specification.
185  *
186  * @param item[in]
187  *   Item specification. Only source and destination addresses and
188  *   Ethernet type fields are supported. In addition to full and
189  *   empty masks of destination address, individual/group mask is
190  *   also supported. If the mask is NULL, default mask will be used.
191  *   Ranging is not supported.
192  * @param efx_spec[in, out]
193  *   EFX filter specification to update.
194  * @param[out] error
195  *   Perform verbose error reporting if not NULL.
196  */
197 static int
198 sfc_flow_parse_eth(const struct rte_flow_item *item,
199                    efx_filter_spec_t *efx_spec,
200                    struct rte_flow_error *error)
201 {
202         int rc;
203         const struct rte_flow_item_eth *spec = NULL;
204         const struct rte_flow_item_eth *mask = NULL;
205         const struct rte_flow_item_eth supp_mask = {
206                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
207                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
208                 .type = 0xffff,
209         };
210         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
211                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
212         };
213
214         rc = sfc_flow_parse_init(item,
215                                  (const void **)&spec,
216                                  (const void **)&mask,
217                                  &supp_mask,
218                                  &rte_flow_item_eth_mask,
219                                  sizeof(struct rte_flow_item_eth),
220                                  error);
221         if (rc != 0)
222                 return rc;
223
224         /* If "spec" is not set, could be any Ethernet */
225         if (spec == NULL)
226                 return 0;
227
228         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
229                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
230                 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
231                            EFX_MAC_ADDR_LEN);
232         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
233                           EFX_MAC_ADDR_LEN) == 0) {
234                 if (is_unicast_ether_addr(&spec->dst))
235                         efx_spec->efs_match_flags |=
236                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
237                 else
238                         efx_spec->efs_match_flags |=
239                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
240         } else if (!is_zero_ether_addr(&mask->dst)) {
241                 goto fail_bad_mask;
242         }
243
244         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
245                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
246                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
247                            EFX_MAC_ADDR_LEN);
248         } else if (!is_zero_ether_addr(&mask->src)) {
249                 goto fail_bad_mask;
250         }
251
252         /*
253          * Ether type is in big-endian byte order in item and
254          * in little-endian in efx_spec, so byte swap is used
255          */
256         if (mask->type == supp_mask.type) {
257                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
258                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
259         } else if (mask->type != 0) {
260                 goto fail_bad_mask;
261         }
262
263         return 0;
264
265 fail_bad_mask:
266         rte_flow_error_set(error, EINVAL,
267                            RTE_FLOW_ERROR_TYPE_ITEM, item,
268                            "Bad mask in the ETH pattern item");
269         return -rte_errno;
270 }
271
272 /**
273  * Convert VLAN item to EFX filter specification.
274  *
275  * @param item[in]
276  *   Item specification. Only VID field is supported.
277  *   The mask can not be NULL. Ranging is not supported.
278  * @param efx_spec[in, out]
279  *   EFX filter specification to update.
280  * @param[out] error
281  *   Perform verbose error reporting if not NULL.
282  */
283 static int
284 sfc_flow_parse_vlan(const struct rte_flow_item *item,
285                     efx_filter_spec_t *efx_spec,
286                     struct rte_flow_error *error)
287 {
288         int rc;
289         uint16_t vid;
290         const struct rte_flow_item_vlan *spec = NULL;
291         const struct rte_flow_item_vlan *mask = NULL;
292         const struct rte_flow_item_vlan supp_mask = {
293                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
294         };
295
296         rc = sfc_flow_parse_init(item,
297                                  (const void **)&spec,
298                                  (const void **)&mask,
299                                  &supp_mask,
300                                  NULL,
301                                  sizeof(struct rte_flow_item_vlan),
302                                  error);
303         if (rc != 0)
304                 return rc;
305
306         /*
307          * VID is in big-endian byte order in item and
308          * in little-endian in efx_spec, so byte swap is used.
309          * If two VLAN items are included, the first matches
310          * the outer tag and the next matches the inner tag.
311          */
312         if (mask->tci == supp_mask.tci) {
313                 vid = rte_bswap16(spec->tci);
314
315                 if (!(efx_spec->efs_match_flags &
316                       EFX_FILTER_MATCH_OUTER_VID)) {
317                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
318                         efx_spec->efs_outer_vid = vid;
319                 } else if (!(efx_spec->efs_match_flags &
320                              EFX_FILTER_MATCH_INNER_VID)) {
321                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
322                         efx_spec->efs_inner_vid = vid;
323                 } else {
324                         rte_flow_error_set(error, EINVAL,
325                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
326                                            "More than two VLAN items");
327                         return -rte_errno;
328                 }
329         } else {
330                 rte_flow_error_set(error, EINVAL,
331                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
332                                    "VLAN ID in TCI match is required");
333                 return -rte_errno;
334         }
335
336         return 0;
337 }
338
339 /**
340  * Convert IPv4 item to EFX filter specification.
341  *
342  * @param item[in]
343  *   Item specification. Only source and destination addresses and
344  *   protocol fields are supported. If the mask is NULL, default
345  *   mask will be used. Ranging is not supported.
346  * @param efx_spec[in, out]
347  *   EFX filter specification to update.
348  * @param[out] error
349  *   Perform verbose error reporting if not NULL.
350  */
351 static int
352 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
353                     efx_filter_spec_t *efx_spec,
354                     struct rte_flow_error *error)
355 {
356         int rc;
357         const struct rte_flow_item_ipv4 *spec = NULL;
358         const struct rte_flow_item_ipv4 *mask = NULL;
359         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
360         const struct rte_flow_item_ipv4 supp_mask = {
361                 .hdr = {
362                         .src_addr = 0xffffffff,
363                         .dst_addr = 0xffffffff,
364                         .next_proto_id = 0xff,
365                 }
366         };
367
368         rc = sfc_flow_parse_init(item,
369                                  (const void **)&spec,
370                                  (const void **)&mask,
371                                  &supp_mask,
372                                  &rte_flow_item_ipv4_mask,
373                                  sizeof(struct rte_flow_item_ipv4),
374                                  error);
375         if (rc != 0)
376                 return rc;
377
378         /*
379          * Filtering by IPv4 source and destination addresses requires
380          * the appropriate ETHER_TYPE in hardware filters
381          */
382         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
383                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
384                 efx_spec->efs_ether_type = ether_type_ipv4;
385         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
386                 rte_flow_error_set(error, EINVAL,
387                         RTE_FLOW_ERROR_TYPE_ITEM, item,
388                         "Ethertype in pattern with IPV4 item should be appropriate");
389                 return -rte_errno;
390         }
391
392         if (spec == NULL)
393                 return 0;
394
395         /*
396          * IPv4 addresses are in big-endian byte order in item and in
397          * efx_spec
398          */
399         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
400                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
401                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
402         } else if (mask->hdr.src_addr != 0) {
403                 goto fail_bad_mask;
404         }
405
406         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
407                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
408                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
409         } else if (mask->hdr.dst_addr != 0) {
410                 goto fail_bad_mask;
411         }
412
413         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
414                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
415                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
416         } else if (mask->hdr.next_proto_id != 0) {
417                 goto fail_bad_mask;
418         }
419
420         return 0;
421
422 fail_bad_mask:
423         rte_flow_error_set(error, EINVAL,
424                            RTE_FLOW_ERROR_TYPE_ITEM, item,
425                            "Bad mask in the IPV4 pattern item");
426         return -rte_errno;
427 }
428
429 /**
430  * Convert IPv6 item to EFX filter specification.
431  *
432  * @param item[in]
433  *   Item specification. Only source and destination addresses and
434  *   next header fields are supported. If the mask is NULL, default
435  *   mask will be used. Ranging is not supported.
436  * @param efx_spec[in, out]
437  *   EFX filter specification to update.
438  * @param[out] error
439  *   Perform verbose error reporting if not NULL.
440  */
441 static int
442 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
443                     efx_filter_spec_t *efx_spec,
444                     struct rte_flow_error *error)
445 {
446         int rc;
447         const struct rte_flow_item_ipv6 *spec = NULL;
448         const struct rte_flow_item_ipv6 *mask = NULL;
449         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
450         const struct rte_flow_item_ipv6 supp_mask = {
451                 .hdr = {
452                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
453                                       0xff, 0xff, 0xff, 0xff,
454                                       0xff, 0xff, 0xff, 0xff,
455                                       0xff, 0xff, 0xff, 0xff },
456                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
457                                       0xff, 0xff, 0xff, 0xff,
458                                       0xff, 0xff, 0xff, 0xff,
459                                       0xff, 0xff, 0xff, 0xff },
460                         .proto = 0xff,
461                 }
462         };
463
464         rc = sfc_flow_parse_init(item,
465                                  (const void **)&spec,
466                                  (const void **)&mask,
467                                  &supp_mask,
468                                  &rte_flow_item_ipv6_mask,
469                                  sizeof(struct rte_flow_item_ipv6),
470                                  error);
471         if (rc != 0)
472                 return rc;
473
474         /*
475          * Filtering by IPv6 source and destination addresses requires
476          * the appropriate ETHER_TYPE in hardware filters
477          */
478         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
479                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
480                 efx_spec->efs_ether_type = ether_type_ipv6;
481         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
482                 rte_flow_error_set(error, EINVAL,
483                         RTE_FLOW_ERROR_TYPE_ITEM, item,
484                         "Ethertype in pattern with IPV6 item should be appropriate");
485                 return -rte_errno;
486         }
487
488         if (spec == NULL)
489                 return 0;
490
491         /*
492          * IPv6 addresses are in big-endian byte order in item and in
493          * efx_spec
494          */
495         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
496                    sizeof(mask->hdr.src_addr)) == 0) {
497                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
498
499                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
500                                  sizeof(spec->hdr.src_addr));
501                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
502                            sizeof(efx_spec->efs_rem_host));
503         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
504                                      sizeof(mask->hdr.src_addr))) {
505                 goto fail_bad_mask;
506         }
507
508         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
509                    sizeof(mask->hdr.dst_addr)) == 0) {
510                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
511
512                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
513                                  sizeof(spec->hdr.dst_addr));
514                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
515                            sizeof(efx_spec->efs_loc_host));
516         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
517                                      sizeof(mask->hdr.dst_addr))) {
518                 goto fail_bad_mask;
519         }
520
521         if (mask->hdr.proto == supp_mask.hdr.proto) {
522                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
523                 efx_spec->efs_ip_proto = spec->hdr.proto;
524         } else if (mask->hdr.proto != 0) {
525                 goto fail_bad_mask;
526         }
527
528         return 0;
529
530 fail_bad_mask:
531         rte_flow_error_set(error, EINVAL,
532                            RTE_FLOW_ERROR_TYPE_ITEM, item,
533                            "Bad mask in the IPV6 pattern item");
534         return -rte_errno;
535 }
536
537 /**
538  * Convert TCP item to EFX filter specification.
539  *
540  * @param item[in]
541  *   Item specification. Only source and destination ports fields
542  *   are supported. If the mask is NULL, default mask will be used.
543  *   Ranging is not supported.
544  * @param efx_spec[in, out]
545  *   EFX filter specification to update.
546  * @param[out] error
547  *   Perform verbose error reporting if not NULL.
548  */
549 static int
550 sfc_flow_parse_tcp(const struct rte_flow_item *item,
551                    efx_filter_spec_t *efx_spec,
552                    struct rte_flow_error *error)
553 {
554         int rc;
555         const struct rte_flow_item_tcp *spec = NULL;
556         const struct rte_flow_item_tcp *mask = NULL;
557         const struct rte_flow_item_tcp supp_mask = {
558                 .hdr = {
559                         .src_port = 0xffff,
560                         .dst_port = 0xffff,
561                 }
562         };
563
564         rc = sfc_flow_parse_init(item,
565                                  (const void **)&spec,
566                                  (const void **)&mask,
567                                  &supp_mask,
568                                  &rte_flow_item_tcp_mask,
569                                  sizeof(struct rte_flow_item_tcp),
570                                  error);
571         if (rc != 0)
572                 return rc;
573
574         /*
575          * Filtering by TCP source and destination ports requires
576          * the appropriate IP_PROTO in hardware filters
577          */
578         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
579                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
580                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
581         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
582                 rte_flow_error_set(error, EINVAL,
583                         RTE_FLOW_ERROR_TYPE_ITEM, item,
584                         "IP proto in pattern with TCP item should be appropriate");
585                 return -rte_errno;
586         }
587
588         if (spec == NULL)
589                 return 0;
590
591         /*
592          * Source and destination ports are in big-endian byte order in item and
593          * in little-endian in efx_spec, so byte swap is used
594          */
595         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
596                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
597                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
598         } else if (mask->hdr.src_port != 0) {
599                 goto fail_bad_mask;
600         }
601
602         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
603                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
604                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
605         } else if (mask->hdr.dst_port != 0) {
606                 goto fail_bad_mask;
607         }
608
609         return 0;
610
611 fail_bad_mask:
612         rte_flow_error_set(error, EINVAL,
613                            RTE_FLOW_ERROR_TYPE_ITEM, item,
614                            "Bad mask in the TCP pattern item");
615         return -rte_errno;
616 }
617
618 /**
619  * Convert UDP item to EFX filter specification.
620  *
621  * @param item[in]
622  *   Item specification. Only source and destination ports fields
623  *   are supported. If the mask is NULL, default mask will be used.
624  *   Ranging is not supported.
625  * @param efx_spec[in, out]
626  *   EFX filter specification to update.
627  * @param[out] error
628  *   Perform verbose error reporting if not NULL.
629  */
630 static int
631 sfc_flow_parse_udp(const struct rte_flow_item *item,
632                    efx_filter_spec_t *efx_spec,
633                    struct rte_flow_error *error)
634 {
635         int rc;
636         const struct rte_flow_item_udp *spec = NULL;
637         const struct rte_flow_item_udp *mask = NULL;
638         const struct rte_flow_item_udp supp_mask = {
639                 .hdr = {
640                         .src_port = 0xffff,
641                         .dst_port = 0xffff,
642                 }
643         };
644
645         rc = sfc_flow_parse_init(item,
646                                  (const void **)&spec,
647                                  (const void **)&mask,
648                                  &supp_mask,
649                                  &rte_flow_item_udp_mask,
650                                  sizeof(struct rte_flow_item_udp),
651                                  error);
652         if (rc != 0)
653                 return rc;
654
655         /*
656          * Filtering by UDP source and destination ports requires
657          * the appropriate IP_PROTO in hardware filters
658          */
659         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
660                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
661                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
662         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
663                 rte_flow_error_set(error, EINVAL,
664                         RTE_FLOW_ERROR_TYPE_ITEM, item,
665                         "IP proto in pattern with UDP item should be appropriate");
666                 return -rte_errno;
667         }
668
669         if (spec == NULL)
670                 return 0;
671
672         /*
673          * Source and destination ports are in big-endian byte order in item and
674          * in little-endian in efx_spec, so byte swap is used
675          */
676         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
677                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
678                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
679         } else if (mask->hdr.src_port != 0) {
680                 goto fail_bad_mask;
681         }
682
683         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
684                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
685                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
686         } else if (mask->hdr.dst_port != 0) {
687                 goto fail_bad_mask;
688         }
689
690         return 0;
691
692 fail_bad_mask:
693         rte_flow_error_set(error, EINVAL,
694                            RTE_FLOW_ERROR_TYPE_ITEM, item,
695                            "Bad mask in the UDP pattern item");
696         return -rte_errno;
697 }
698
699 static const struct sfc_flow_item sfc_flow_items[] = {
700         {
701                 .type = RTE_FLOW_ITEM_TYPE_VOID,
702                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
703                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
704                 .parse = sfc_flow_parse_void,
705         },
706         {
707                 .type = RTE_FLOW_ITEM_TYPE_ETH,
708                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
709                 .layer = SFC_FLOW_ITEM_L2,
710                 .parse = sfc_flow_parse_eth,
711         },
712         {
713                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
714                 .prev_layer = SFC_FLOW_ITEM_L2,
715                 .layer = SFC_FLOW_ITEM_L2,
716                 .parse = sfc_flow_parse_vlan,
717         },
718         {
719                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
720                 .prev_layer = SFC_FLOW_ITEM_L2,
721                 .layer = SFC_FLOW_ITEM_L3,
722                 .parse = sfc_flow_parse_ipv4,
723         },
724         {
725                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
726                 .prev_layer = SFC_FLOW_ITEM_L2,
727                 .layer = SFC_FLOW_ITEM_L3,
728                 .parse = sfc_flow_parse_ipv6,
729         },
730         {
731                 .type = RTE_FLOW_ITEM_TYPE_TCP,
732                 .prev_layer = SFC_FLOW_ITEM_L3,
733                 .layer = SFC_FLOW_ITEM_L4,
734                 .parse = sfc_flow_parse_tcp,
735         },
736         {
737                 .type = RTE_FLOW_ITEM_TYPE_UDP,
738                 .prev_layer = SFC_FLOW_ITEM_L3,
739                 .layer = SFC_FLOW_ITEM_L4,
740                 .parse = sfc_flow_parse_udp,
741         },
742 };
743
744 /*
745  * Protocol-independent flow API support
746  */
747 static int
748 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
749                     struct rte_flow *flow,
750                     struct rte_flow_error *error)
751 {
752         if (attr == NULL) {
753                 rte_flow_error_set(error, EINVAL,
754                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
755                                    "NULL attribute");
756                 return -rte_errno;
757         }
758         if (attr->group != 0) {
759                 rte_flow_error_set(error, ENOTSUP,
760                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
761                                    "Groups are not supported");
762                 return -rte_errno;
763         }
764         if (attr->priority != 0) {
765                 rte_flow_error_set(error, ENOTSUP,
766                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
767                                    "Priorities are not supported");
768                 return -rte_errno;
769         }
770         if (attr->egress != 0) {
771                 rte_flow_error_set(error, ENOTSUP,
772                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
773                                    "Egress is not supported");
774                 return -rte_errno;
775         }
776         if (attr->ingress == 0) {
777                 rte_flow_error_set(error, ENOTSUP,
778                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
779                                    "Only ingress is supported");
780                 return -rte_errno;
781         }
782
783         flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
784         flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
785
786         return 0;
787 }
788
789 /* Get item from array sfc_flow_items */
790 static const struct sfc_flow_item *
791 sfc_flow_get_item(enum rte_flow_item_type type)
792 {
793         unsigned int i;
794
795         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
796                 if (sfc_flow_items[i].type == type)
797                         return &sfc_flow_items[i];
798
799         return NULL;
800 }
801
802 static int
803 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
804                        struct rte_flow *flow,
805                        struct rte_flow_error *error)
806 {
807         int rc;
808         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
809         const struct sfc_flow_item *item;
810
811         if (pattern == NULL) {
812                 rte_flow_error_set(error, EINVAL,
813                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
814                                    "NULL pattern");
815                 return -rte_errno;
816         }
817
818         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
819                 item = sfc_flow_get_item(pattern->type);
820                 if (item == NULL) {
821                         rte_flow_error_set(error, ENOTSUP,
822                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
823                                            "Unsupported pattern item");
824                         return -rte_errno;
825                 }
826
827                 /*
828                  * Omitting one or several protocol layers at the beginning
829                  * of pattern is supported
830                  */
831                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
832                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
833                     item->prev_layer != prev_layer) {
834                         rte_flow_error_set(error, ENOTSUP,
835                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
836                                            "Unexpected sequence of pattern items");
837                         return -rte_errno;
838                 }
839
840                 rc = item->parse(pattern, &flow->spec, error);
841                 if (rc != 0)
842                         return rc;
843
844                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
845                         prev_layer = item->layer;
846         }
847
848         return 0;
849 }
850
851 static int
852 sfc_flow_parse_queue(struct sfc_adapter *sa,
853                      const struct rte_flow_action_queue *queue,
854                      struct rte_flow *flow)
855 {
856         struct sfc_rxq *rxq;
857
858         if (queue->index >= sa->rxq_count)
859                 return -EINVAL;
860
861         rxq = sa->rxq_info[queue->index].rxq;
862         flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
863
864         return 0;
865 }
866
867 #if EFSYS_OPT_RX_SCALE
868 static int
869 sfc_flow_parse_rss(struct sfc_adapter *sa,
870                    const struct rte_flow_action_rss *rss,
871                    struct rte_flow *flow)
872 {
873         unsigned int rxq_sw_index;
874         struct sfc_rxq *rxq;
875         unsigned int rxq_hw_index_min;
876         unsigned int rxq_hw_index_max;
877         const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
878         uint64_t rss_hf;
879         uint8_t *rss_key = NULL;
880         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
881         unsigned int i;
882
883         if (rss->num == 0)
884                 return -EINVAL;
885
886         rxq_sw_index = sa->rxq_count - 1;
887         rxq = sa->rxq_info[rxq_sw_index].rxq;
888         rxq_hw_index_min = rxq->hw_index;
889         rxq_hw_index_max = 0;
890
891         for (i = 0; i < rss->num; ++i) {
892                 rxq_sw_index = rss->queue[i];
893
894                 if (rxq_sw_index >= sa->rxq_count)
895                         return -EINVAL;
896
897                 rxq = sa->rxq_info[rxq_sw_index].rxq;
898
899                 if (rxq->hw_index < rxq_hw_index_min)
900                         rxq_hw_index_min = rxq->hw_index;
901
902                 if (rxq->hw_index > rxq_hw_index_max)
903                         rxq_hw_index_max = rxq->hw_index;
904         }
905
906         rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
907         if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
908                 return -EINVAL;
909
910         if (rss_conf != NULL) {
911                 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
912                         return -EINVAL;
913
914                 rss_key = rss_conf->rss_key;
915         } else {
916                 rss_key = sa->rss_key;
917         }
918
919         flow->rss = B_TRUE;
920
921         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
922         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
923         sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
924         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
925
926         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
927                 unsigned int rxq_sw_index = rss->queue[i % rss->num];
928                 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
929
930                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
931         }
932
933         return 0;
934 }
935 #endif /* EFSYS_OPT_RX_SCALE */
936
937 static int
938 sfc_flow_filter_insert(struct sfc_adapter *sa,
939                        struct rte_flow *flow)
940 {
941         efx_filter_spec_t *spec = &flow->spec;
942
943 #if EFSYS_OPT_RX_SCALE
944         struct sfc_flow_rss *rss = &flow->rss_conf;
945         int rc = 0;
946
947         if (flow->rss) {
948                 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
949                                               rss->rxq_hw_index_min + 1,
950                                               EFX_MAXRSS);
951
952                 rc = efx_rx_scale_context_alloc(sa->nic,
953                                                 EFX_RX_SCALE_EXCLUSIVE,
954                                                 rss_spread,
955                                                 &spec->efs_rss_context);
956                 if (rc != 0)
957                         goto fail_scale_context_alloc;
958
959                 rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
960                                            EFX_RX_HASHALG_TOEPLITZ,
961                                            rss->rss_hash_types, B_TRUE);
962                 if (rc != 0)
963                         goto fail_scale_mode_set;
964
965                 rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
966                                           rss->rss_key,
967                                           sizeof(sa->rss_key));
968                 if (rc != 0)
969                         goto fail_scale_key_set;
970
971                 spec->efs_dmaq_id = rss->rxq_hw_index_min;
972                 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
973         }
974
975         rc = efx_filter_insert(sa->nic, spec);
976         if (rc != 0)
977                 goto fail_filter_insert;
978
979         if (flow->rss) {
980                 /*
981                  * Scale table is set after filter insertion because
982                  * the table entries are relative to the base RxQ ID
983                  * and the latter is submitted to the HW by means of
984                  * inserting a filter, so by the time of the request
985                  * the HW knows all the information needed to verify
986                  * the table entries, and the operation will succeed
987                  */
988                 rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
989                                           rss->rss_tbl, RTE_DIM(rss->rss_tbl));
990                 if (rc != 0)
991                         goto fail_scale_tbl_set;
992         }
993
994         return 0;
995
996 fail_scale_tbl_set:
997         efx_filter_remove(sa->nic, spec);
998
999 fail_filter_insert:
1000 fail_scale_key_set:
1001 fail_scale_mode_set:
1002         if (rss != NULL)
1003                 efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1004
1005 fail_scale_context_alloc:
1006         return rc;
1007 #else /* !EFSYS_OPT_RX_SCALE */
1008         return efx_filter_insert(sa->nic, spec);
1009 #endif /* EFSYS_OPT_RX_SCALE */
1010 }
1011
1012 static int
1013 sfc_flow_filter_remove(struct sfc_adapter *sa,
1014                        struct rte_flow *flow)
1015 {
1016         efx_filter_spec_t *spec = &flow->spec;
1017         int rc = 0;
1018
1019         rc = efx_filter_remove(sa->nic, spec);
1020         if (rc != 0)
1021                 return rc;
1022
1023 #if EFSYS_OPT_RX_SCALE
1024         if (flow->rss)
1025                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1026 #endif /* EFSYS_OPT_RX_SCALE */
1027
1028         return rc;
1029 }
1030
1031 static int
1032 sfc_flow_parse_actions(struct sfc_adapter *sa,
1033                        const struct rte_flow_action actions[],
1034                        struct rte_flow *flow,
1035                        struct rte_flow_error *error)
1036 {
1037         int rc;
1038         boolean_t is_specified = B_FALSE;
1039
1040         if (actions == NULL) {
1041                 rte_flow_error_set(error, EINVAL,
1042                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1043                                    "NULL actions");
1044                 return -rte_errno;
1045         }
1046
1047         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1048                 switch (actions->type) {
1049                 case RTE_FLOW_ACTION_TYPE_VOID:
1050                         break;
1051
1052                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1053                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1054                         if (rc != 0) {
1055                                 rte_flow_error_set(error, EINVAL,
1056                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1057                                         "Bad QUEUE action");
1058                                 return -rte_errno;
1059                         }
1060
1061                         is_specified = B_TRUE;
1062                         break;
1063
1064 #if EFSYS_OPT_RX_SCALE
1065                 case RTE_FLOW_ACTION_TYPE_RSS:
1066                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1067                         if (rc != 0) {
1068                                 rte_flow_error_set(error, rc,
1069                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1070                                         "Bad RSS action");
1071                                 return -rte_errno;
1072                         }
1073
1074                         is_specified = B_TRUE;
1075                         break;
1076 #endif /* EFSYS_OPT_RX_SCALE */
1077
1078                 default:
1079                         rte_flow_error_set(error, ENOTSUP,
1080                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1081                                            "Action is not supported");
1082                         return -rte_errno;
1083                 }
1084         }
1085
1086         if (!is_specified) {
1087                 rte_flow_error_set(error, EINVAL,
1088                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1089                                    "Action is unspecified");
1090                 return -rte_errno;
1091         }
1092
1093         return 0;
1094 }
1095
1096 static int
1097 sfc_flow_parse(struct rte_eth_dev *dev,
1098                const struct rte_flow_attr *attr,
1099                const struct rte_flow_item pattern[],
1100                const struct rte_flow_action actions[],
1101                struct rte_flow *flow,
1102                struct rte_flow_error *error)
1103 {
1104         struct sfc_adapter *sa = dev->data->dev_private;
1105         int rc;
1106
1107         memset(&flow->spec, 0, sizeof(flow->spec));
1108
1109         rc = sfc_flow_parse_attr(attr, flow, error);
1110         if (rc != 0)
1111                 goto fail_bad_value;
1112
1113         rc = sfc_flow_parse_pattern(pattern, flow, error);
1114         if (rc != 0)
1115                 goto fail_bad_value;
1116
1117         rc = sfc_flow_parse_actions(sa, actions, flow, error);
1118         if (rc != 0)
1119                 goto fail_bad_value;
1120
1121         if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
1122                 rte_flow_error_set(error, ENOTSUP,
1123                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1124                                    "Flow rule pattern is not supported");
1125                 return -rte_errno;
1126         }
1127
1128 fail_bad_value:
1129         return rc;
1130 }
1131
1132 static int
1133 sfc_flow_validate(struct rte_eth_dev *dev,
1134                   const struct rte_flow_attr *attr,
1135                   const struct rte_flow_item pattern[],
1136                   const struct rte_flow_action actions[],
1137                   struct rte_flow_error *error)
1138 {
1139         struct rte_flow flow;
1140
1141         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1142 }
1143
1144 static struct rte_flow *
1145 sfc_flow_create(struct rte_eth_dev *dev,
1146                 const struct rte_flow_attr *attr,
1147                 const struct rte_flow_item pattern[],
1148                 const struct rte_flow_action actions[],
1149                 struct rte_flow_error *error)
1150 {
1151         struct sfc_adapter *sa = dev->data->dev_private;
1152         struct rte_flow *flow = NULL;
1153         int rc;
1154
1155         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1156         if (flow == NULL) {
1157                 rte_flow_error_set(error, ENOMEM,
1158                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1159                                    "Failed to allocate memory");
1160                 goto fail_no_mem;
1161         }
1162
1163         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1164         if (rc != 0)
1165                 goto fail_bad_value;
1166
1167         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1168
1169         sfc_adapter_lock(sa);
1170
1171         if (sa->state == SFC_ADAPTER_STARTED) {
1172                 rc = sfc_flow_filter_insert(sa, flow);
1173                 if (rc != 0) {
1174                         rte_flow_error_set(error, rc,
1175                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1176                                 "Failed to insert filter");
1177                         goto fail_filter_insert;
1178                 }
1179         }
1180
1181         sfc_adapter_unlock(sa);
1182
1183         return flow;
1184
1185 fail_filter_insert:
1186         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1187
1188 fail_bad_value:
1189         rte_free(flow);
1190         sfc_adapter_unlock(sa);
1191
1192 fail_no_mem:
1193         return NULL;
1194 }
1195
1196 static int
1197 sfc_flow_remove(struct sfc_adapter *sa,
1198                 struct rte_flow *flow,
1199                 struct rte_flow_error *error)
1200 {
1201         int rc = 0;
1202
1203         SFC_ASSERT(sfc_adapter_is_locked(sa));
1204
1205         if (sa->state == SFC_ADAPTER_STARTED) {
1206                 rc = sfc_flow_filter_remove(sa, flow);
1207                 if (rc != 0)
1208                         rte_flow_error_set(error, rc,
1209                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1210                                 "Failed to destroy flow rule");
1211         }
1212
1213         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1214         rte_free(flow);
1215
1216         return rc;
1217 }
1218
1219 static int
1220 sfc_flow_destroy(struct rte_eth_dev *dev,
1221                  struct rte_flow *flow,
1222                  struct rte_flow_error *error)
1223 {
1224         struct sfc_adapter *sa = dev->data->dev_private;
1225         struct rte_flow *flow_ptr;
1226         int rc = EINVAL;
1227
1228         sfc_adapter_lock(sa);
1229
1230         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1231                 if (flow_ptr == flow)
1232                         rc = 0;
1233         }
1234         if (rc != 0) {
1235                 rte_flow_error_set(error, rc,
1236                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1237                                    "Failed to find flow rule to destroy");
1238                 goto fail_bad_value;
1239         }
1240
1241         rc = sfc_flow_remove(sa, flow, error);
1242
1243 fail_bad_value:
1244         sfc_adapter_unlock(sa);
1245
1246         return -rc;
1247 }
1248
1249 static int
1250 sfc_flow_flush(struct rte_eth_dev *dev,
1251                struct rte_flow_error *error)
1252 {
1253         struct sfc_adapter *sa = dev->data->dev_private;
1254         struct rte_flow *flow;
1255         int rc = 0;
1256         int ret = 0;
1257
1258         sfc_adapter_lock(sa);
1259
1260         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1261                 rc = sfc_flow_remove(sa, flow, error);
1262                 if (rc != 0)
1263                         ret = rc;
1264         }
1265
1266         sfc_adapter_unlock(sa);
1267
1268         return -ret;
1269 }
1270
1271 static int
1272 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
1273                  struct rte_flow_error *error)
1274 {
1275         struct sfc_adapter *sa = dev->data->dev_private;
1276         struct sfc_port *port = &sa->port;
1277         int ret = 0;
1278
1279         sfc_adapter_lock(sa);
1280         if (sa->state != SFC_ADAPTER_INITIALIZED) {
1281                 rte_flow_error_set(error, EBUSY,
1282                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1283                                    NULL, "please close the port first");
1284                 ret = -rte_errno;
1285         } else {
1286                 port->isolated = (enable) ? B_TRUE : B_FALSE;
1287         }
1288         sfc_adapter_unlock(sa);
1289
1290         return ret;
1291 }
1292
1293 const struct rte_flow_ops sfc_flow_ops = {
1294         .validate = sfc_flow_validate,
1295         .create = sfc_flow_create,
1296         .destroy = sfc_flow_destroy,
1297         .flush = sfc_flow_flush,
1298         .query = NULL,
1299         .isolate = sfc_flow_isolate,
1300 };
1301
1302 void
1303 sfc_flow_init(struct sfc_adapter *sa)
1304 {
1305         SFC_ASSERT(sfc_adapter_is_locked(sa));
1306
1307         TAILQ_INIT(&sa->filter.flow_list);
1308 }
1309
1310 void
1311 sfc_flow_fini(struct sfc_adapter *sa)
1312 {
1313         struct rte_flow *flow;
1314
1315         SFC_ASSERT(sfc_adapter_is_locked(sa));
1316
1317         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1318                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1319                 rte_free(flow);
1320         }
1321 }
1322
1323 void
1324 sfc_flow_stop(struct sfc_adapter *sa)
1325 {
1326         struct rte_flow *flow;
1327
1328         SFC_ASSERT(sfc_adapter_is_locked(sa));
1329
1330         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1331                 sfc_flow_filter_remove(sa, flow);
1332 }
1333
1334 int
1335 sfc_flow_start(struct sfc_adapter *sa)
1336 {
1337         struct rte_flow *flow;
1338         int rc = 0;
1339
1340         sfc_log_init(sa, "entry");
1341
1342         SFC_ASSERT(sfc_adapter_is_locked(sa));
1343
1344         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1345                 rc = sfc_flow_filter_insert(sa, flow);
1346                 if (rc != 0)
1347                         goto fail_bad_flow;
1348         }
1349
1350         sfc_log_init(sa, "done");
1351
1352 fail_bad_flow:
1353         return rc;
1354 }