net/sfc: support flow API filters
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /*-
2  * Copyright (c) 2017 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
35 #include <rte_flow.h>
36 #include <rte_flow_driver.h>
37
38 #include "efx.h"
39
40 #include "sfc.h"
41 #include "sfc_rx.h"
42 #include "sfc_filter.h"
43 #include "sfc_flow.h"
44 #include "sfc_log.h"
45
46 /*
47  * At now flow API is implemented in such a manner that each
48  * flow rule is converted to a hardware filter.
49  * All elements of flow rule (attributes, pattern items, actions)
50  * correspond to one or more fields in the efx_filter_spec_s structure
51  * that is responsible for the hardware filter.
52  */
53
54 enum sfc_flow_item_layers {
55         SFC_FLOW_ITEM_ANY_LAYER,
56         SFC_FLOW_ITEM_START_LAYER,
57         SFC_FLOW_ITEM_L2,
58 };
59
60 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
61                                   efx_filter_spec_t *spec,
62                                   struct rte_flow_error *error);
63
64 struct sfc_flow_item {
65         enum rte_flow_item_type type;           /* Type of item */
66         enum sfc_flow_item_layers layer;        /* Layer of item */
67         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
68         sfc_flow_item_parse *parse;             /* Parsing function */
69 };
70
71 static sfc_flow_item_parse sfc_flow_parse_void;
72 static sfc_flow_item_parse sfc_flow_parse_eth;
73
74 static boolean_t
75 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
76 {
77         uint8_t sum = 0;
78         unsigned int i;
79
80         for (i = 0; i < size; i++)
81                 sum |= buf[i];
82
83         return (sum == 0) ? B_TRUE : B_FALSE;
84 }
85
86 /*
87  * Validate item and prepare structures spec and mask for parsing
88  */
89 static int
90 sfc_flow_parse_init(const struct rte_flow_item *item,
91                     const void **spec_ptr,
92                     const void **mask_ptr,
93                     const void *supp_mask,
94                     const void *def_mask,
95                     unsigned int size,
96                     struct rte_flow_error *error)
97 {
98         const uint8_t *spec;
99         const uint8_t *mask;
100         const uint8_t *last;
101         uint8_t match;
102         uint8_t supp;
103         unsigned int i;
104
105         if (item == NULL) {
106                 rte_flow_error_set(error, EINVAL,
107                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
108                                    "NULL item");
109                 return -rte_errno;
110         }
111
112         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
113                 rte_flow_error_set(error, EINVAL,
114                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
115                                    "Mask or last is set without spec");
116                 return -rte_errno;
117         }
118
119         /*
120          * If "mask" is not set, default mask is used,
121          * but if default mask is NULL, "mask" should be set
122          */
123         if (item->mask == NULL) {
124                 if (def_mask == NULL) {
125                         rte_flow_error_set(error, EINVAL,
126                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
127                                 "Mask should be specified");
128                         return -rte_errno;
129                 }
130
131                 mask = (const uint8_t *)def_mask;
132         } else {
133                 mask = (const uint8_t *)item->mask;
134         }
135
136         spec = (const uint8_t *)item->spec;
137         last = (const uint8_t *)item->last;
138
139         if (spec == NULL)
140                 goto exit;
141
142         /*
143          * If field values in "last" are either 0 or equal to the corresponding
144          * values in "spec" then they are ignored
145          */
146         if (last != NULL &&
147             !sfc_flow_is_zero(last, size) &&
148             memcmp(last, spec, size) != 0) {
149                 rte_flow_error_set(error, ENOTSUP,
150                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
151                                    "Ranging is not supported");
152                 return -rte_errno;
153         }
154
155         if (supp_mask == NULL) {
156                 rte_flow_error_set(error, EINVAL,
157                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
158                         "Supported mask for item should be specified");
159                 return -rte_errno;
160         }
161
162         /* Check that mask and spec not asks for more match than supp_mask */
163         for (i = 0; i < size; i++) {
164                 match = spec[i] | mask[i];
165                 supp = ((const uint8_t *)supp_mask)[i];
166
167                 if ((match | supp) != supp) {
168                         rte_flow_error_set(error, ENOTSUP,
169                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
170                                            "Item's field is not supported");
171                         return -rte_errno;
172                 }
173         }
174
175 exit:
176         *spec_ptr = spec;
177         *mask_ptr = mask;
178         return 0;
179 }
180
181 /*
182  * Protocol parsers.
183  * Masking is not supported, so masks in items should be either
184  * full or empty (zeroed) and set only for supported fields which
185  * are specified in the supp_mask.
186  */
187
188 static int
189 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
190                     __rte_unused efx_filter_spec_t *efx_spec,
191                     __rte_unused struct rte_flow_error *error)
192 {
193         return 0;
194 }
195
196 /**
197  * Convert Ethernet item to EFX filter specification.
198  *
199  * @param item[in]
200  *   Item specification. Only source and destination addresses and
201  *   Ethernet type fields are supported. If the mask is NULL, default
202  *   mask will be used. Ranging is not supported.
203  * @param efx_spec[in, out]
204  *   EFX filter specification to update.
205  * @param[out] error
206  *   Perform verbose error reporting if not NULL.
207  */
208 static int
209 sfc_flow_parse_eth(const struct rte_flow_item *item,
210                    efx_filter_spec_t *efx_spec,
211                    struct rte_flow_error *error)
212 {
213         int rc;
214         const struct rte_flow_item_eth *spec = NULL;
215         const struct rte_flow_item_eth *mask = NULL;
216         const struct rte_flow_item_eth supp_mask = {
217                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
218                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
219                 .type = 0xffff,
220         };
221
222         rc = sfc_flow_parse_init(item,
223                                  (const void **)&spec,
224                                  (const void **)&mask,
225                                  &supp_mask,
226                                  &rte_flow_item_eth_mask,
227                                  sizeof(struct rte_flow_item_eth),
228                                  error);
229         if (rc != 0)
230                 return rc;
231
232         /* If "spec" is not set, could be any Ethernet */
233         if (spec == NULL)
234                 return 0;
235
236         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
237                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
238                 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
239                            EFX_MAC_ADDR_LEN);
240         } else if (!is_zero_ether_addr(&mask->dst)) {
241                 goto fail_bad_mask;
242         }
243
244         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
245                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
246                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
247                            EFX_MAC_ADDR_LEN);
248         } else if (!is_zero_ether_addr(&mask->src)) {
249                 goto fail_bad_mask;
250         }
251
252         /*
253          * Ether type is in big-endian byte order in item and
254          * in little-endian in efx_spec, so byte swap is used
255          */
256         if (mask->type == supp_mask.type) {
257                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
258                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
259         } else if (mask->type != 0) {
260                 goto fail_bad_mask;
261         }
262
263         return 0;
264
265 fail_bad_mask:
266         rte_flow_error_set(error, EINVAL,
267                            RTE_FLOW_ERROR_TYPE_ITEM, item,
268                            "Bad mask in the ETH pattern item");
269         return -rte_errno;
270 }
271
272 static const struct sfc_flow_item sfc_flow_items[] = {
273         {
274                 .type = RTE_FLOW_ITEM_TYPE_VOID,
275                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
276                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
277                 .parse = sfc_flow_parse_void,
278         },
279         {
280                 .type = RTE_FLOW_ITEM_TYPE_ETH,
281                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
282                 .layer = SFC_FLOW_ITEM_L2,
283                 .parse = sfc_flow_parse_eth,
284         },
285 };
286
287 /*
288  * Protocol-independent flow API support
289  */
290 static int
291 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
292                     struct rte_flow *flow,
293                     struct rte_flow_error *error)
294 {
295         if (attr == NULL) {
296                 rte_flow_error_set(error, EINVAL,
297                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
298                                    "NULL attribute");
299                 return -rte_errno;
300         }
301         if (attr->group != 0) {
302                 rte_flow_error_set(error, ENOTSUP,
303                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
304                                    "Groups are not supported");
305                 return -rte_errno;
306         }
307         if (attr->priority != 0) {
308                 rte_flow_error_set(error, ENOTSUP,
309                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
310                                    "Priorities are not supported");
311                 return -rte_errno;
312         }
313         if (attr->egress != 0) {
314                 rte_flow_error_set(error, ENOTSUP,
315                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
316                                    "Egress is not supported");
317                 return -rte_errno;
318         }
319         if (attr->ingress == 0) {
320                 rte_flow_error_set(error, ENOTSUP,
321                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
322                                    "Only ingress is supported");
323                 return -rte_errno;
324         }
325
326         flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
327         flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
328
329         return 0;
330 }
331
332 /* Get item from array sfc_flow_items */
333 static const struct sfc_flow_item *
334 sfc_flow_get_item(enum rte_flow_item_type type)
335 {
336         unsigned int i;
337
338         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
339                 if (sfc_flow_items[i].type == type)
340                         return &sfc_flow_items[i];
341
342         return NULL;
343 }
344
345 static int
346 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
347                        struct rte_flow *flow,
348                        struct rte_flow_error *error)
349 {
350         int rc;
351         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
352         const struct sfc_flow_item *item;
353
354         if (pattern == NULL) {
355                 rte_flow_error_set(error, EINVAL,
356                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
357                                    "NULL pattern");
358                 return -rte_errno;
359         }
360
361         for (; pattern != NULL &&
362                pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
363                 item = sfc_flow_get_item(pattern->type);
364                 if (item == NULL) {
365                         rte_flow_error_set(error, ENOTSUP,
366                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
367                                            "Unsupported pattern item");
368                         return -rte_errno;
369                 }
370
371                 /*
372                  * Omitting one or several protocol layers at the beginning
373                  * of pattern is supported
374                  */
375                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
376                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
377                     item->prev_layer != prev_layer) {
378                         rte_flow_error_set(error, ENOTSUP,
379                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
380                                            "Unexpected sequence of pattern items");
381                         return -rte_errno;
382                 }
383
384                 rc = item->parse(pattern, &flow->spec, error);
385                 if (rc != 0)
386                         return rc;
387
388                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
389                         prev_layer = item->layer;
390         }
391
392         if (pattern == NULL) {
393                 rte_flow_error_set(error, EINVAL,
394                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
395                                    "NULL item");
396                 return -rte_errno;
397         }
398
399         return 0;
400 }
401
402 static int
403 sfc_flow_parse_queue(struct sfc_adapter *sa,
404                      const struct rte_flow_action_queue *queue,
405                      struct rte_flow *flow)
406 {
407         struct sfc_rxq *rxq;
408
409         if (queue->index >= sa->rxq_count)
410                 return -EINVAL;
411
412         rxq = sa->rxq_info[queue->index].rxq;
413         flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
414
415         return 0;
416 }
417
418 static int
419 sfc_flow_parse_actions(struct sfc_adapter *sa,
420                        const struct rte_flow_action actions[],
421                        struct rte_flow *flow,
422                        struct rte_flow_error *error)
423 {
424         int rc;
425         boolean_t is_specified = B_FALSE;
426
427         if (actions == NULL) {
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
430                                    "NULL actions");
431                 return -rte_errno;
432         }
433
434         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
435                 switch (actions->type) {
436                 case RTE_FLOW_ACTION_TYPE_VOID:
437                         break;
438
439                 case RTE_FLOW_ACTION_TYPE_QUEUE:
440                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
441                         if (rc != 0) {
442                                 rte_flow_error_set(error, EINVAL,
443                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
444                                         "Bad QUEUE action");
445                                 return -rte_errno;
446                         }
447
448                         is_specified = B_TRUE;
449                         break;
450
451                 default:
452                         rte_flow_error_set(error, ENOTSUP,
453                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
454                                            "Action is not supported");
455                         return -rte_errno;
456                 }
457         }
458
459         if (!is_specified) {
460                 rte_flow_error_set(error, EINVAL,
461                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
462                                    "Action is unspecified");
463                 return -rte_errno;
464         }
465
466         return 0;
467 }
468
469 static int
470 sfc_flow_parse(struct rte_eth_dev *dev,
471                const struct rte_flow_attr *attr,
472                const struct rte_flow_item pattern[],
473                const struct rte_flow_action actions[],
474                struct rte_flow *flow,
475                struct rte_flow_error *error)
476 {
477         struct sfc_adapter *sa = dev->data->dev_private;
478         int rc;
479
480         memset(&flow->spec, 0, sizeof(flow->spec));
481
482         rc = sfc_flow_parse_attr(attr, flow, error);
483         if (rc != 0)
484                 goto fail_bad_value;
485
486         rc = sfc_flow_parse_pattern(pattern, flow, error);
487         if (rc != 0)
488                 goto fail_bad_value;
489
490         rc = sfc_flow_parse_actions(sa, actions, flow, error);
491         if (rc != 0)
492                 goto fail_bad_value;
493
494         if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
495                 rte_flow_error_set(error, ENOTSUP,
496                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
497                                    "Flow rule pattern is not supported");
498                 return -rte_errno;
499         }
500
501 fail_bad_value:
502         return rc;
503 }
504
505 static int
506 sfc_flow_validate(struct rte_eth_dev *dev,
507                   const struct rte_flow_attr *attr,
508                   const struct rte_flow_item pattern[],
509                   const struct rte_flow_action actions[],
510                   struct rte_flow_error *error)
511 {
512         struct rte_flow flow;
513
514         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
515 }
516
517 static struct rte_flow *
518 sfc_flow_create(struct rte_eth_dev *dev,
519                 const struct rte_flow_attr *attr,
520                 const struct rte_flow_item pattern[],
521                 const struct rte_flow_action actions[],
522                 struct rte_flow_error *error)
523 {
524         struct sfc_adapter *sa = dev->data->dev_private;
525         struct rte_flow *flow = NULL;
526         int rc;
527
528         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
529         if (flow == NULL) {
530                 rte_flow_error_set(error, ENOMEM,
531                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
532                                    "Failed to allocate memory");
533                 goto fail_no_mem;
534         }
535
536         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
537         if (rc != 0)
538                 goto fail_bad_value;
539
540         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
541
542         sfc_adapter_lock(sa);
543
544         if (sa->state == SFC_ADAPTER_STARTED) {
545                 rc = efx_filter_insert(sa->nic, &flow->spec);
546                 if (rc != 0) {
547                         rte_flow_error_set(error, rc,
548                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
549                                 "Failed to insert filter");
550                         goto fail_filter_insert;
551                 }
552         }
553
554         sfc_adapter_unlock(sa);
555
556         return flow;
557
558 fail_filter_insert:
559         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
560
561 fail_bad_value:
562         rte_free(flow);
563         sfc_adapter_unlock(sa);
564
565 fail_no_mem:
566         return NULL;
567 }
568
569 static int
570 sfc_flow_remove(struct sfc_adapter *sa,
571                 struct rte_flow *flow,
572                 struct rte_flow_error *error)
573 {
574         int rc = 0;
575
576         SFC_ASSERT(sfc_adapter_is_locked(sa));
577
578         if (sa->state == SFC_ADAPTER_STARTED) {
579                 rc = efx_filter_remove(sa->nic, &flow->spec);
580                 if (rc != 0)
581                         rte_flow_error_set(error, rc,
582                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
583                                 "Failed to destroy flow rule");
584         }
585
586         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
587         rte_free(flow);
588
589         return rc;
590 }
591
592 static int
593 sfc_flow_destroy(struct rte_eth_dev *dev,
594                  struct rte_flow *flow,
595                  struct rte_flow_error *error)
596 {
597         struct sfc_adapter *sa = dev->data->dev_private;
598         struct rte_flow *flow_ptr;
599         int rc = EINVAL;
600
601         sfc_adapter_lock(sa);
602
603         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
604                 if (flow_ptr == flow)
605                         rc = 0;
606         }
607         if (rc != 0) {
608                 rte_flow_error_set(error, rc,
609                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
610                                    "Failed to find flow rule to destroy");
611                 goto fail_bad_value;
612         }
613
614         rc = sfc_flow_remove(sa, flow, error);
615
616 fail_bad_value:
617         sfc_adapter_unlock(sa);
618
619         return -rc;
620 }
621
622 static int
623 sfc_flow_flush(struct rte_eth_dev *dev,
624                struct rte_flow_error *error)
625 {
626         struct sfc_adapter *sa = dev->data->dev_private;
627         struct rte_flow *flow;
628         int rc = 0;
629         int ret = 0;
630
631         sfc_adapter_lock(sa);
632
633         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
634                 rc = sfc_flow_remove(sa, flow, error);
635                 if (rc != 0)
636                         ret = rc;
637         }
638
639         sfc_adapter_unlock(sa);
640
641         return -ret;
642 }
643
644 const struct rte_flow_ops sfc_flow_ops = {
645         .validate = sfc_flow_validate,
646         .create = sfc_flow_create,
647         .destroy = sfc_flow_destroy,
648         .flush = sfc_flow_flush,
649         .query = NULL,
650 };
651
652 void
653 sfc_flow_init(struct sfc_adapter *sa)
654 {
655         SFC_ASSERT(sfc_adapter_is_locked(sa));
656
657         TAILQ_INIT(&sa->filter.flow_list);
658 }
659
660 void
661 sfc_flow_fini(struct sfc_adapter *sa)
662 {
663         struct rte_flow *flow;
664
665         SFC_ASSERT(sfc_adapter_is_locked(sa));
666
667         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
668                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
669                 rte_free(flow);
670         }
671 }
672
673 void
674 sfc_flow_stop(struct sfc_adapter *sa)
675 {
676         struct rte_flow *flow;
677
678         SFC_ASSERT(sfc_adapter_is_locked(sa));
679
680         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
681                 efx_filter_remove(sa->nic, &flow->spec);
682 }
683
684 int
685 sfc_flow_start(struct sfc_adapter *sa)
686 {
687         struct rte_flow *flow;
688         int rc = 0;
689
690         sfc_log_init(sa, "entry");
691
692         SFC_ASSERT(sfc_adapter_is_locked(sa));
693
694         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
695                 rc = efx_filter_insert(sa->nic, &flow->spec);
696                 if (rc != 0)
697                         goto fail_bad_flow;
698         }
699
700         sfc_log_init(sa, "done");
701
702 fail_bad_flow:
703         return rc;
704 }