net/sfc: support IPV4 in flow API filters
[dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /*-
2  * Copyright (c) 2017 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
35 #include <rte_flow.h>
36 #include <rte_flow_driver.h>
37
38 #include "efx.h"
39
40 #include "sfc.h"
41 #include "sfc_rx.h"
42 #include "sfc_filter.h"
43 #include "sfc_flow.h"
44 #include "sfc_log.h"
45
46 /*
47  * At now flow API is implemented in such a manner that each
48  * flow rule is converted to a hardware filter.
49  * All elements of flow rule (attributes, pattern items, actions)
50  * correspond to one or more fields in the efx_filter_spec_s structure
51  * that is responsible for the hardware filter.
52  */
53
54 enum sfc_flow_item_layers {
55         SFC_FLOW_ITEM_ANY_LAYER,
56         SFC_FLOW_ITEM_START_LAYER,
57         SFC_FLOW_ITEM_L2,
58         SFC_FLOW_ITEM_L3,
59 };
60
61 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
62                                   efx_filter_spec_t *spec,
63                                   struct rte_flow_error *error);
64
65 struct sfc_flow_item {
66         enum rte_flow_item_type type;           /* Type of item */
67         enum sfc_flow_item_layers layer;        /* Layer of item */
68         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
69         sfc_flow_item_parse *parse;             /* Parsing function */
70 };
71
72 static sfc_flow_item_parse sfc_flow_parse_void;
73 static sfc_flow_item_parse sfc_flow_parse_eth;
74 static sfc_flow_item_parse sfc_flow_parse_vlan;
75 static sfc_flow_item_parse sfc_flow_parse_ipv4;
76
77 static boolean_t
78 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
79 {
80         uint8_t sum = 0;
81         unsigned int i;
82
83         for (i = 0; i < size; i++)
84                 sum |= buf[i];
85
86         return (sum == 0) ? B_TRUE : B_FALSE;
87 }
88
89 /*
90  * Validate item and prepare structures spec and mask for parsing
91  */
92 static int
93 sfc_flow_parse_init(const struct rte_flow_item *item,
94                     const void **spec_ptr,
95                     const void **mask_ptr,
96                     const void *supp_mask,
97                     const void *def_mask,
98                     unsigned int size,
99                     struct rte_flow_error *error)
100 {
101         const uint8_t *spec;
102         const uint8_t *mask;
103         const uint8_t *last;
104         uint8_t match;
105         uint8_t supp;
106         unsigned int i;
107
108         if (item == NULL) {
109                 rte_flow_error_set(error, EINVAL,
110                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
111                                    "NULL item");
112                 return -rte_errno;
113         }
114
115         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
118                                    "Mask or last is set without spec");
119                 return -rte_errno;
120         }
121
122         /*
123          * If "mask" is not set, default mask is used,
124          * but if default mask is NULL, "mask" should be set
125          */
126         if (item->mask == NULL) {
127                 if (def_mask == NULL) {
128                         rte_flow_error_set(error, EINVAL,
129                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
130                                 "Mask should be specified");
131                         return -rte_errno;
132                 }
133
134                 mask = (const uint8_t *)def_mask;
135         } else {
136                 mask = (const uint8_t *)item->mask;
137         }
138
139         spec = (const uint8_t *)item->spec;
140         last = (const uint8_t *)item->last;
141
142         if (spec == NULL)
143                 goto exit;
144
145         /*
146          * If field values in "last" are either 0 or equal to the corresponding
147          * values in "spec" then they are ignored
148          */
149         if (last != NULL &&
150             !sfc_flow_is_zero(last, size) &&
151             memcmp(last, spec, size) != 0) {
152                 rte_flow_error_set(error, ENOTSUP,
153                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
154                                    "Ranging is not supported");
155                 return -rte_errno;
156         }
157
158         if (supp_mask == NULL) {
159                 rte_flow_error_set(error, EINVAL,
160                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
161                         "Supported mask for item should be specified");
162                 return -rte_errno;
163         }
164
165         /* Check that mask and spec not asks for more match than supp_mask */
166         for (i = 0; i < size; i++) {
167                 match = spec[i] | mask[i];
168                 supp = ((const uint8_t *)supp_mask)[i];
169
170                 if ((match | supp) != supp) {
171                         rte_flow_error_set(error, ENOTSUP,
172                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
173                                            "Item's field is not supported");
174                         return -rte_errno;
175                 }
176         }
177
178 exit:
179         *spec_ptr = spec;
180         *mask_ptr = mask;
181         return 0;
182 }
183
184 /*
185  * Protocol parsers.
186  * Masking is not supported, so masks in items should be either
187  * full or empty (zeroed) and set only for supported fields which
188  * are specified in the supp_mask.
189  */
190
191 static int
192 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
193                     __rte_unused efx_filter_spec_t *efx_spec,
194                     __rte_unused struct rte_flow_error *error)
195 {
196         return 0;
197 }
198
199 /**
200  * Convert Ethernet item to EFX filter specification.
201  *
202  * @param item[in]
203  *   Item specification. Only source and destination addresses and
204  *   Ethernet type fields are supported. If the mask is NULL, default
205  *   mask will be used. Ranging is not supported.
206  * @param efx_spec[in, out]
207  *   EFX filter specification to update.
208  * @param[out] error
209  *   Perform verbose error reporting if not NULL.
210  */
211 static int
212 sfc_flow_parse_eth(const struct rte_flow_item *item,
213                    efx_filter_spec_t *efx_spec,
214                    struct rte_flow_error *error)
215 {
216         int rc;
217         const struct rte_flow_item_eth *spec = NULL;
218         const struct rte_flow_item_eth *mask = NULL;
219         const struct rte_flow_item_eth supp_mask = {
220                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
221                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
222                 .type = 0xffff,
223         };
224
225         rc = sfc_flow_parse_init(item,
226                                  (const void **)&spec,
227                                  (const void **)&mask,
228                                  &supp_mask,
229                                  &rte_flow_item_eth_mask,
230                                  sizeof(struct rte_flow_item_eth),
231                                  error);
232         if (rc != 0)
233                 return rc;
234
235         /* If "spec" is not set, could be any Ethernet */
236         if (spec == NULL)
237                 return 0;
238
239         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
240                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
241                 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
242                            EFX_MAC_ADDR_LEN);
243         } else if (!is_zero_ether_addr(&mask->dst)) {
244                 goto fail_bad_mask;
245         }
246
247         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
248                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
249                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
250                            EFX_MAC_ADDR_LEN);
251         } else if (!is_zero_ether_addr(&mask->src)) {
252                 goto fail_bad_mask;
253         }
254
255         /*
256          * Ether type is in big-endian byte order in item and
257          * in little-endian in efx_spec, so byte swap is used
258          */
259         if (mask->type == supp_mask.type) {
260                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
261                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
262         } else if (mask->type != 0) {
263                 goto fail_bad_mask;
264         }
265
266         return 0;
267
268 fail_bad_mask:
269         rte_flow_error_set(error, EINVAL,
270                            RTE_FLOW_ERROR_TYPE_ITEM, item,
271                            "Bad mask in the ETH pattern item");
272         return -rte_errno;
273 }
274
275 /**
276  * Convert VLAN item to EFX filter specification.
277  *
278  * @param item[in]
279  *   Item specification. Only VID field is supported.
280  *   The mask can not be NULL. Ranging is not supported.
281  * @param efx_spec[in, out]
282  *   EFX filter specification to update.
283  * @param[out] error
284  *   Perform verbose error reporting if not NULL.
285  */
286 static int
287 sfc_flow_parse_vlan(const struct rte_flow_item *item,
288                     efx_filter_spec_t *efx_spec,
289                     struct rte_flow_error *error)
290 {
291         int rc;
292         uint16_t vid;
293         const struct rte_flow_item_vlan *spec = NULL;
294         const struct rte_flow_item_vlan *mask = NULL;
295         const struct rte_flow_item_vlan supp_mask = {
296                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
297         };
298
299         rc = sfc_flow_parse_init(item,
300                                  (const void **)&spec,
301                                  (const void **)&mask,
302                                  &supp_mask,
303                                  NULL,
304                                  sizeof(struct rte_flow_item_vlan),
305                                  error);
306         if (rc != 0)
307                 return rc;
308
309         /*
310          * VID is in big-endian byte order in item and
311          * in little-endian in efx_spec, so byte swap is used.
312          * If two VLAN items are included, the first matches
313          * the outer tag and the next matches the inner tag.
314          */
315         if (mask->tci == supp_mask.tci) {
316                 vid = rte_bswap16(spec->tci);
317
318                 if (!(efx_spec->efs_match_flags &
319                       EFX_FILTER_MATCH_OUTER_VID)) {
320                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
321                         efx_spec->efs_outer_vid = vid;
322                 } else if (!(efx_spec->efs_match_flags &
323                              EFX_FILTER_MATCH_INNER_VID)) {
324                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
325                         efx_spec->efs_inner_vid = vid;
326                 } else {
327                         rte_flow_error_set(error, EINVAL,
328                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
329                                            "More than two VLAN items");
330                         return -rte_errno;
331                 }
332         } else {
333                 rte_flow_error_set(error, EINVAL,
334                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
335                                    "VLAN ID in TCI match is required");
336                 return -rte_errno;
337         }
338
339         return 0;
340 }
341
342 /**
343  * Convert IPv4 item to EFX filter specification.
344  *
345  * @param item[in]
346  *   Item specification. Only source and destination addresses and
347  *   protocol fields are supported. If the mask is NULL, default
348  *   mask will be used. Ranging is not supported.
349  * @param efx_spec[in, out]
350  *   EFX filter specification to update.
351  * @param[out] error
352  *   Perform verbose error reporting if not NULL.
353  */
354 static int
355 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
356                     efx_filter_spec_t *efx_spec,
357                     struct rte_flow_error *error)
358 {
359         int rc;
360         const struct rte_flow_item_ipv4 *spec = NULL;
361         const struct rte_flow_item_ipv4 *mask = NULL;
362         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
363         const struct rte_flow_item_ipv4 supp_mask = {
364                 .hdr = {
365                         .src_addr = 0xffffffff,
366                         .dst_addr = 0xffffffff,
367                         .next_proto_id = 0xff,
368                 }
369         };
370
371         rc = sfc_flow_parse_init(item,
372                                  (const void **)&spec,
373                                  (const void **)&mask,
374                                  &supp_mask,
375                                  &rte_flow_item_ipv4_mask,
376                                  sizeof(struct rte_flow_item_ipv4),
377                                  error);
378         if (rc != 0)
379                 return rc;
380
381         /*
382          * Filtering by IPv4 source and destination addresses requires
383          * the appropriate ETHER_TYPE in hardware filters
384          */
385         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
386                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
387                 efx_spec->efs_ether_type = ether_type_ipv4;
388         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
389                 rte_flow_error_set(error, EINVAL,
390                         RTE_FLOW_ERROR_TYPE_ITEM, item,
391                         "Ethertype in pattern with IPV4 item should be appropriate");
392                 return -rte_errno;
393         }
394
395         if (spec == NULL)
396                 return 0;
397
398         /*
399          * IPv4 addresses are in big-endian byte order in item and in
400          * efx_spec
401          */
402         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
403                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
404                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
405         } else if (mask->hdr.src_addr != 0) {
406                 goto fail_bad_mask;
407         }
408
409         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
410                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
411                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
412         } else if (mask->hdr.dst_addr != 0) {
413                 goto fail_bad_mask;
414         }
415
416         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
417                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
418                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
419         } else if (mask->hdr.next_proto_id != 0) {
420                 goto fail_bad_mask;
421         }
422
423         return 0;
424
425 fail_bad_mask:
426         rte_flow_error_set(error, EINVAL,
427                            RTE_FLOW_ERROR_TYPE_ITEM, item,
428                            "Bad mask in the IPV4 pattern item");
429         return -rte_errno;
430 }
431
432 static const struct sfc_flow_item sfc_flow_items[] = {
433         {
434                 .type = RTE_FLOW_ITEM_TYPE_VOID,
435                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
436                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
437                 .parse = sfc_flow_parse_void,
438         },
439         {
440                 .type = RTE_FLOW_ITEM_TYPE_ETH,
441                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
442                 .layer = SFC_FLOW_ITEM_L2,
443                 .parse = sfc_flow_parse_eth,
444         },
445         {
446                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
447                 .prev_layer = SFC_FLOW_ITEM_L2,
448                 .layer = SFC_FLOW_ITEM_L2,
449                 .parse = sfc_flow_parse_vlan,
450         },
451         {
452                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
453                 .prev_layer = SFC_FLOW_ITEM_L2,
454                 .layer = SFC_FLOW_ITEM_L3,
455                 .parse = sfc_flow_parse_ipv4,
456         },
457 };
458
459 /*
460  * Protocol-independent flow API support
461  */
462 static int
463 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
464                     struct rte_flow *flow,
465                     struct rte_flow_error *error)
466 {
467         if (attr == NULL) {
468                 rte_flow_error_set(error, EINVAL,
469                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
470                                    "NULL attribute");
471                 return -rte_errno;
472         }
473         if (attr->group != 0) {
474                 rte_flow_error_set(error, ENOTSUP,
475                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
476                                    "Groups are not supported");
477                 return -rte_errno;
478         }
479         if (attr->priority != 0) {
480                 rte_flow_error_set(error, ENOTSUP,
481                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
482                                    "Priorities are not supported");
483                 return -rte_errno;
484         }
485         if (attr->egress != 0) {
486                 rte_flow_error_set(error, ENOTSUP,
487                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
488                                    "Egress is not supported");
489                 return -rte_errno;
490         }
491         if (attr->ingress == 0) {
492                 rte_flow_error_set(error, ENOTSUP,
493                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
494                                    "Only ingress is supported");
495                 return -rte_errno;
496         }
497
498         flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
499         flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
500
501         return 0;
502 }
503
504 /* Get item from array sfc_flow_items */
505 static const struct sfc_flow_item *
506 sfc_flow_get_item(enum rte_flow_item_type type)
507 {
508         unsigned int i;
509
510         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
511                 if (sfc_flow_items[i].type == type)
512                         return &sfc_flow_items[i];
513
514         return NULL;
515 }
516
517 static int
518 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
519                        struct rte_flow *flow,
520                        struct rte_flow_error *error)
521 {
522         int rc;
523         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
524         const struct sfc_flow_item *item;
525
526         if (pattern == NULL) {
527                 rte_flow_error_set(error, EINVAL,
528                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
529                                    "NULL pattern");
530                 return -rte_errno;
531         }
532
533         for (; pattern != NULL &&
534                pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
535                 item = sfc_flow_get_item(pattern->type);
536                 if (item == NULL) {
537                         rte_flow_error_set(error, ENOTSUP,
538                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
539                                            "Unsupported pattern item");
540                         return -rte_errno;
541                 }
542
543                 /*
544                  * Omitting one or several protocol layers at the beginning
545                  * of pattern is supported
546                  */
547                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
548                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
549                     item->prev_layer != prev_layer) {
550                         rte_flow_error_set(error, ENOTSUP,
551                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
552                                            "Unexpected sequence of pattern items");
553                         return -rte_errno;
554                 }
555
556                 rc = item->parse(pattern, &flow->spec, error);
557                 if (rc != 0)
558                         return rc;
559
560                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
561                         prev_layer = item->layer;
562         }
563
564         if (pattern == NULL) {
565                 rte_flow_error_set(error, EINVAL,
566                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
567                                    "NULL item");
568                 return -rte_errno;
569         }
570
571         return 0;
572 }
573
574 static int
575 sfc_flow_parse_queue(struct sfc_adapter *sa,
576                      const struct rte_flow_action_queue *queue,
577                      struct rte_flow *flow)
578 {
579         struct sfc_rxq *rxq;
580
581         if (queue->index >= sa->rxq_count)
582                 return -EINVAL;
583
584         rxq = sa->rxq_info[queue->index].rxq;
585         flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
586
587         return 0;
588 }
589
590 static int
591 sfc_flow_parse_actions(struct sfc_adapter *sa,
592                        const struct rte_flow_action actions[],
593                        struct rte_flow *flow,
594                        struct rte_flow_error *error)
595 {
596         int rc;
597         boolean_t is_specified = B_FALSE;
598
599         if (actions == NULL) {
600                 rte_flow_error_set(error, EINVAL,
601                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
602                                    "NULL actions");
603                 return -rte_errno;
604         }
605
606         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
607                 switch (actions->type) {
608                 case RTE_FLOW_ACTION_TYPE_VOID:
609                         break;
610
611                 case RTE_FLOW_ACTION_TYPE_QUEUE:
612                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
613                         if (rc != 0) {
614                                 rte_flow_error_set(error, EINVAL,
615                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
616                                         "Bad QUEUE action");
617                                 return -rte_errno;
618                         }
619
620                         is_specified = B_TRUE;
621                         break;
622
623                 default:
624                         rte_flow_error_set(error, ENOTSUP,
625                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
626                                            "Action is not supported");
627                         return -rte_errno;
628                 }
629         }
630
631         if (!is_specified) {
632                 rte_flow_error_set(error, EINVAL,
633                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
634                                    "Action is unspecified");
635                 return -rte_errno;
636         }
637
638         return 0;
639 }
640
641 static int
642 sfc_flow_parse(struct rte_eth_dev *dev,
643                const struct rte_flow_attr *attr,
644                const struct rte_flow_item pattern[],
645                const struct rte_flow_action actions[],
646                struct rte_flow *flow,
647                struct rte_flow_error *error)
648 {
649         struct sfc_adapter *sa = dev->data->dev_private;
650         int rc;
651
652         memset(&flow->spec, 0, sizeof(flow->spec));
653
654         rc = sfc_flow_parse_attr(attr, flow, error);
655         if (rc != 0)
656                 goto fail_bad_value;
657
658         rc = sfc_flow_parse_pattern(pattern, flow, error);
659         if (rc != 0)
660                 goto fail_bad_value;
661
662         rc = sfc_flow_parse_actions(sa, actions, flow, error);
663         if (rc != 0)
664                 goto fail_bad_value;
665
666         if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
667                 rte_flow_error_set(error, ENOTSUP,
668                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
669                                    "Flow rule pattern is not supported");
670                 return -rte_errno;
671         }
672
673 fail_bad_value:
674         return rc;
675 }
676
677 static int
678 sfc_flow_validate(struct rte_eth_dev *dev,
679                   const struct rte_flow_attr *attr,
680                   const struct rte_flow_item pattern[],
681                   const struct rte_flow_action actions[],
682                   struct rte_flow_error *error)
683 {
684         struct rte_flow flow;
685
686         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
687 }
688
689 static struct rte_flow *
690 sfc_flow_create(struct rte_eth_dev *dev,
691                 const struct rte_flow_attr *attr,
692                 const struct rte_flow_item pattern[],
693                 const struct rte_flow_action actions[],
694                 struct rte_flow_error *error)
695 {
696         struct sfc_adapter *sa = dev->data->dev_private;
697         struct rte_flow *flow = NULL;
698         int rc;
699
700         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
701         if (flow == NULL) {
702                 rte_flow_error_set(error, ENOMEM,
703                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
704                                    "Failed to allocate memory");
705                 goto fail_no_mem;
706         }
707
708         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
709         if (rc != 0)
710                 goto fail_bad_value;
711
712         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
713
714         sfc_adapter_lock(sa);
715
716         if (sa->state == SFC_ADAPTER_STARTED) {
717                 rc = efx_filter_insert(sa->nic, &flow->spec);
718                 if (rc != 0) {
719                         rte_flow_error_set(error, rc,
720                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
721                                 "Failed to insert filter");
722                         goto fail_filter_insert;
723                 }
724         }
725
726         sfc_adapter_unlock(sa);
727
728         return flow;
729
730 fail_filter_insert:
731         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
732
733 fail_bad_value:
734         rte_free(flow);
735         sfc_adapter_unlock(sa);
736
737 fail_no_mem:
738         return NULL;
739 }
740
741 static int
742 sfc_flow_remove(struct sfc_adapter *sa,
743                 struct rte_flow *flow,
744                 struct rte_flow_error *error)
745 {
746         int rc = 0;
747
748         SFC_ASSERT(sfc_adapter_is_locked(sa));
749
750         if (sa->state == SFC_ADAPTER_STARTED) {
751                 rc = efx_filter_remove(sa->nic, &flow->spec);
752                 if (rc != 0)
753                         rte_flow_error_set(error, rc,
754                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
755                                 "Failed to destroy flow rule");
756         }
757
758         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
759         rte_free(flow);
760
761         return rc;
762 }
763
764 static int
765 sfc_flow_destroy(struct rte_eth_dev *dev,
766                  struct rte_flow *flow,
767                  struct rte_flow_error *error)
768 {
769         struct sfc_adapter *sa = dev->data->dev_private;
770         struct rte_flow *flow_ptr;
771         int rc = EINVAL;
772
773         sfc_adapter_lock(sa);
774
775         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
776                 if (flow_ptr == flow)
777                         rc = 0;
778         }
779         if (rc != 0) {
780                 rte_flow_error_set(error, rc,
781                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
782                                    "Failed to find flow rule to destroy");
783                 goto fail_bad_value;
784         }
785
786         rc = sfc_flow_remove(sa, flow, error);
787
788 fail_bad_value:
789         sfc_adapter_unlock(sa);
790
791         return -rc;
792 }
793
794 static int
795 sfc_flow_flush(struct rte_eth_dev *dev,
796                struct rte_flow_error *error)
797 {
798         struct sfc_adapter *sa = dev->data->dev_private;
799         struct rte_flow *flow;
800         int rc = 0;
801         int ret = 0;
802
803         sfc_adapter_lock(sa);
804
805         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
806                 rc = sfc_flow_remove(sa, flow, error);
807                 if (rc != 0)
808                         ret = rc;
809         }
810
811         sfc_adapter_unlock(sa);
812
813         return -ret;
814 }
815
816 const struct rte_flow_ops sfc_flow_ops = {
817         .validate = sfc_flow_validate,
818         .create = sfc_flow_create,
819         .destroy = sfc_flow_destroy,
820         .flush = sfc_flow_flush,
821         .query = NULL,
822 };
823
824 void
825 sfc_flow_init(struct sfc_adapter *sa)
826 {
827         SFC_ASSERT(sfc_adapter_is_locked(sa));
828
829         TAILQ_INIT(&sa->filter.flow_list);
830 }
831
832 void
833 sfc_flow_fini(struct sfc_adapter *sa)
834 {
835         struct rte_flow *flow;
836
837         SFC_ASSERT(sfc_adapter_is_locked(sa));
838
839         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
840                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
841                 rte_free(flow);
842         }
843 }
844
845 void
846 sfc_flow_stop(struct sfc_adapter *sa)
847 {
848         struct rte_flow *flow;
849
850         SFC_ASSERT(sfc_adapter_is_locked(sa));
851
852         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
853                 efx_filter_remove(sa->nic, &flow->spec);
854 }
855
856 int
857 sfc_flow_start(struct sfc_adapter *sa)
858 {
859         struct rte_flow *flow;
860         int rc = 0;
861
862         sfc_log_init(sa, "entry");
863
864         SFC_ASSERT(sfc_adapter_is_locked(sa));
865
866         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
867                 rc = efx_filter_insert(sa->nic, &flow->spec);
868                 if (rc != 0)
869                         goto fail_bad_flow;
870         }
871
872         sfc_log_init(sa, "done");
873
874 fail_bad_flow:
875         return rc;
876 }