net/sfc: add facilities to handle bundles of actions
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_common.h>
13
14 #include "efx.h"
15
16 #include "sfc.h"
17 #include "sfc_log.h"
18
19 int
20 sfc_mae_attach(struct sfc_adapter *sa)
21 {
22         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
23         struct sfc_mae *mae = &sa->mae;
24         efx_mae_limits_t limits;
25         int rc;
26
27         sfc_log_init(sa, "entry");
28
29         if (!encp->enc_mae_supported) {
30                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
31                 return 0;
32         }
33
34         sfc_log_init(sa, "init MAE");
35         rc = efx_mae_init(sa->nic);
36         if (rc != 0)
37                 goto fail_mae_init;
38
39         sfc_log_init(sa, "get MAE limits");
40         rc = efx_mae_get_limits(sa->nic, &limits);
41         if (rc != 0)
42                 goto fail_mae_get_limits;
43
44         mae->status = SFC_MAE_STATUS_SUPPORTED;
45         mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
46         TAILQ_INIT(&mae->action_sets);
47
48         sfc_log_init(sa, "done");
49
50         return 0;
51
52 fail_mae_get_limits:
53         efx_mae_fini(sa->nic);
54
55 fail_mae_init:
56         sfc_log_init(sa, "failed %d", rc);
57
58         return rc;
59 }
60
61 void
62 sfc_mae_detach(struct sfc_adapter *sa)
63 {
64         struct sfc_mae *mae = &sa->mae;
65         enum sfc_mae_status status_prev = mae->status;
66
67         sfc_log_init(sa, "entry");
68
69         mae->nb_action_rule_prios_max = 0;
70         mae->status = SFC_MAE_STATUS_UNKNOWN;
71
72         if (status_prev != SFC_MAE_STATUS_SUPPORTED)
73                 return;
74
75         efx_mae_fini(sa->nic);
76
77         sfc_log_init(sa, "done");
78 }
79
80 static struct sfc_mae_action_set *
81 sfc_mae_action_set_attach(struct sfc_adapter *sa,
82                           const efx_mae_actions_t *spec)
83 {
84         struct sfc_mae_action_set *action_set;
85         struct sfc_mae *mae = &sa->mae;
86
87         SFC_ASSERT(sfc_adapter_is_locked(sa));
88
89         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
90                 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
91                         ++(action_set->refcnt);
92                         return action_set;
93                 }
94         }
95
96         return NULL;
97 }
98
99 static int
100 sfc_mae_action_set_add(struct sfc_adapter *sa,
101                        efx_mae_actions_t *spec,
102                        struct sfc_mae_action_set **action_setp)
103 {
104         struct sfc_mae_action_set *action_set;
105         struct sfc_mae *mae = &sa->mae;
106
107         SFC_ASSERT(sfc_adapter_is_locked(sa));
108
109         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
110         if (action_set == NULL)
111                 return ENOMEM;
112
113         action_set->refcnt = 1;
114         action_set->spec = spec;
115
116         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
117
118         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
119
120         *action_setp = action_set;
121
122         return 0;
123 }
124
125 static void
126 sfc_mae_action_set_del(struct sfc_adapter *sa,
127                        struct sfc_mae_action_set *action_set)
128 {
129         struct sfc_mae *mae = &sa->mae;
130
131         SFC_ASSERT(sfc_adapter_is_locked(sa));
132         SFC_ASSERT(action_set->refcnt != 0);
133
134         --(action_set->refcnt);
135
136         if (action_set->refcnt != 0)
137                 return;
138
139         SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
140         SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
141
142         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
143         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
144         rte_free(action_set);
145 }
146
147 static int
148 sfc_mae_action_set_enable(struct sfc_adapter *sa,
149                           struct sfc_mae_action_set *action_set)
150 {
151         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
152         int rc;
153
154         SFC_ASSERT(sfc_adapter_is_locked(sa));
155
156         if (fw_rsrc->refcnt == 0) {
157                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
158                 SFC_ASSERT(action_set->spec != NULL);
159
160                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
161                                               &fw_rsrc->aset_id);
162                 if (rc != 0)
163                         return rc;
164         }
165
166         ++(fw_rsrc->refcnt);
167
168         return 0;
169 }
170
171 static int
172 sfc_mae_action_set_disable(struct sfc_adapter *sa,
173                            struct sfc_mae_action_set *action_set)
174 {
175         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
176         int rc;
177
178         SFC_ASSERT(sfc_adapter_is_locked(sa));
179         SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
180         SFC_ASSERT(fw_rsrc->refcnt != 0);
181
182         if (fw_rsrc->refcnt == 1) {
183                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
184                 if (rc != 0)
185                         return rc;
186
187                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
188         }
189
190         --(fw_rsrc->refcnt);
191
192         return 0;
193 }
194
195 void
196 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
197                      struct rte_flow *flow)
198 {
199         struct sfc_flow_spec *spec;
200         struct sfc_flow_spec_mae *spec_mae;
201
202         if (flow == NULL)
203                 return;
204
205         spec = &flow->spec;
206
207         if (spec == NULL)
208                 return;
209
210         spec_mae = &spec->mae;
211
212         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
213
214         if (spec_mae->action_set != NULL)
215                 sfc_mae_action_set_del(sa, spec_mae->action_set);
216
217         if (spec_mae->match_spec != NULL)
218                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
219 }
220
221 static int
222 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
223                                  struct sfc_flow_parse_ctx *ctx,
224                                  struct rte_flow_error *error)
225 {
226         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
227         const struct rte_flow_item_phy_port supp_mask = {
228                 .index = 0xffffffff,
229         };
230         const void *def_mask = &rte_flow_item_phy_port_mask;
231         const struct rte_flow_item_phy_port *spec = NULL;
232         const struct rte_flow_item_phy_port *mask = NULL;
233         efx_mport_sel_t mport_v;
234         int rc;
235
236         if (ctx_mae->match_mport_set) {
237                 return rte_flow_error_set(error, ENOTSUP,
238                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
239                                 "Can't handle multiple traffic source items");
240         }
241
242         rc = sfc_flow_parse_init(item,
243                                  (const void **)&spec, (const void **)&mask,
244                                  (const void *)&supp_mask, def_mask,
245                                  sizeof(struct rte_flow_item_phy_port), error);
246         if (rc != 0)
247                 return rc;
248
249         if (mask->index != supp_mask.index) {
250                 return rte_flow_error_set(error, EINVAL,
251                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
252                                 "Bad mask in the PHY_PORT pattern item");
253         }
254
255         /* If "spec" is not set, could be any physical port */
256         if (spec == NULL)
257                 return 0;
258
259         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
260         if (rc != 0) {
261                 return rte_flow_error_set(error, rc,
262                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
263                                 "Failed to convert the PHY_PORT index");
264         }
265
266         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
267                                           &mport_v, NULL);
268         if (rc != 0) {
269                 return rte_flow_error_set(error, rc,
270                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
271                                 "Failed to set MPORT for the PHY_PORT");
272         }
273
274         ctx_mae->match_mport_set = B_TRUE;
275
276         return 0;
277 }
278
279 struct sfc_mae_field_locator {
280         efx_mae_field_id_t              field_id;
281         size_t                          size;
282         /* Field offset in the corresponding rte_flow_item_ struct */
283         size_t                          ofst;
284 };
285
286 static void
287 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
288                              unsigned int nb_field_locators, void *mask_ptr,
289                              size_t mask_size)
290 {
291         unsigned int i;
292
293         memset(mask_ptr, 0, mask_size);
294
295         for (i = 0; i < nb_field_locators; ++i) {
296                 const struct sfc_mae_field_locator *fl = &field_locators[i];
297
298                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
299                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
300         }
301 }
302
303 static int
304 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
305                    unsigned int nb_field_locators, const uint8_t *spec,
306                    const uint8_t *mask, efx_mae_match_spec_t *efx_spec,
307                    struct rte_flow_error *error)
308 {
309         unsigned int i;
310         int rc = 0;
311
312         for (i = 0; i < nb_field_locators; ++i) {
313                 const struct sfc_mae_field_locator *fl = &field_locators[i];
314
315                 rc = efx_mae_match_spec_field_set(efx_spec, fl->field_id,
316                                                   fl->size, spec + fl->ofst,
317                                                   fl->size, mask + fl->ofst);
318                 if (rc != 0)
319                         break;
320         }
321
322         if (rc != 0) {
323                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
324                                 NULL, "Failed to process item fields");
325         }
326
327         return rc;
328 }
329
330 static const struct sfc_mae_field_locator flocs_eth[] = {
331         {
332                 EFX_MAE_FIELD_ETHER_TYPE_BE,
333                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
334                 offsetof(struct rte_flow_item_eth, type),
335         },
336         {
337                 EFX_MAE_FIELD_ETH_DADDR_BE,
338                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
339                 offsetof(struct rte_flow_item_eth, dst),
340         },
341         {
342                 EFX_MAE_FIELD_ETH_SADDR_BE,
343                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
344                 offsetof(struct rte_flow_item_eth, src),
345         },
346 };
347
348 static int
349 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
350                             struct sfc_flow_parse_ctx *ctx,
351                             struct rte_flow_error *error)
352 {
353         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
354         struct rte_flow_item_eth supp_mask;
355         const uint8_t *spec = NULL;
356         const uint8_t *mask = NULL;
357         int rc;
358
359         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
360                                      &supp_mask, sizeof(supp_mask));
361
362         rc = sfc_flow_parse_init(item,
363                                  (const void **)&spec, (const void **)&mask,
364                                  (const void *)&supp_mask,
365                                  &rte_flow_item_eth_mask,
366                                  sizeof(struct rte_flow_item_eth), error);
367         if (rc != 0)
368                 return rc;
369
370         /* If "spec" is not set, could be any Ethernet */
371         if (spec == NULL)
372                 return 0;
373
374         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
375                                   ctx_mae->match_spec_action, error);
376 }
377
378 static const struct sfc_flow_item sfc_flow_items[] = {
379         {
380                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
381                 /*
382                  * In terms of RTE flow, this item is a META one,
383                  * and its position in the pattern is don't care.
384                  */
385                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
386                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
387                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
388                 .parse = sfc_mae_rule_parse_item_phy_port,
389         },
390         {
391                 .type = RTE_FLOW_ITEM_TYPE_ETH,
392                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
393                 .layer = SFC_FLOW_ITEM_L2,
394                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
395                 .parse = sfc_mae_rule_parse_item_eth,
396         },
397 };
398
399 int
400 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
401                            const struct rte_flow_item pattern[],
402                            struct sfc_flow_spec_mae *spec,
403                            struct rte_flow_error *error)
404 {
405         struct sfc_mae_parse_ctx ctx_mae;
406         struct sfc_flow_parse_ctx ctx;
407         int rc;
408
409         memset(&ctx_mae, 0, sizeof(ctx_mae));
410
411         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
412                                      spec->priority,
413                                      &ctx_mae.match_spec_action);
414         if (rc != 0) {
415                 rc = rte_flow_error_set(error, rc,
416                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
417                         "Failed to initialise action rule match specification");
418                 goto fail_init_match_spec_action;
419         }
420
421         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
422         ctx.mae = &ctx_mae;
423
424         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
425                                     pattern, &ctx, error);
426         if (rc != 0)
427                 goto fail_parse_pattern;
428
429         if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
430                 rc = rte_flow_error_set(error, ENOTSUP,
431                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
432                                         "Inconsistent pattern");
433                 goto fail_validate_match_spec_action;
434         }
435
436         spec->match_spec = ctx_mae.match_spec_action;
437
438         return 0;
439
440 fail_validate_match_spec_action:
441 fail_parse_pattern:
442         efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
443
444 fail_init_match_spec_action:
445         return rc;
446 }
447
448 /*
449  * An action supported by MAE may correspond to a bundle of RTE flow actions,
450  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
451  * That is, related RTE flow actions need to be tracked as parts of a whole
452  * so that they can be combined into a single action and submitted to MAE
453  * representation of a given rule's action set.
454  *
455  * Each RTE flow action provided by an application gets classified as
456  * one belonging to some bundle type. If an action is not supposed to
457  * belong to any bundle, or if this action is END, it is described as
458  * one belonging to a dummy bundle of type EMPTY.
459  *
460  * A currently tracked bundle will be submitted if a repeating
461  * action or an action of different bundle type follows.
462  */
463
464 enum sfc_mae_actions_bundle_type {
465         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
466 };
467
468 struct sfc_mae_actions_bundle {
469         enum sfc_mae_actions_bundle_type        type;
470
471         /* Indicates actions already tracked by the current bundle */
472         uint64_t                                actions_mask;
473 };
474
475 /*
476  * Combine configuration of RTE flow actions tracked by the bundle into a
477  * single action and submit the result to MAE action set specification.
478  * Do nothing in the case of dummy action bundle.
479  */
480 static int
481 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
482                               __rte_unused efx_mae_actions_t *spec)
483 {
484         int rc = 0;
485
486         switch (bundle->type) {
487         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
488                 break;
489         default:
490                 SFC_ASSERT(B_FALSE);
491                 break;
492         }
493
494         return rc;
495 }
496
497 /*
498  * Given the type of the next RTE flow action in the line, decide
499  * whether a new bundle is about to start, and, if this is the case,
500  * submit and reset the current bundle.
501  */
502 static int
503 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
504                             struct sfc_mae_actions_bundle *bundle,
505                             efx_mae_actions_t *spec,
506                             struct rte_flow_error *error)
507 {
508         enum sfc_mae_actions_bundle_type bundle_type_new;
509         int rc;
510
511         switch (action->type) {
512         default:
513                 /*
514                  * Self-sufficient actions, including END, are handled in this
515                  * case. No checks for unsupported actions are needed here
516                  * because parsing doesn't occur at this point.
517                  */
518                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
519                 break;
520         }
521
522         if (bundle_type_new != bundle->type ||
523             (bundle->actions_mask & (1ULL << action->type)) != 0) {
524                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
525                 if (rc != 0)
526                         goto fail_submit;
527
528                 memset(bundle, 0, sizeof(*bundle));
529         }
530
531         bundle->type = bundle_type_new;
532
533         return 0;
534
535 fail_submit:
536         return rte_flow_error_set(error, rc,
537                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
538                         "Failed to request the (group of) action(s)");
539 }
540
541 static int
542 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
543                                    const struct rte_flow_action_phy_port *conf,
544                                    efx_mae_actions_t *spec)
545 {
546         efx_mport_sel_t mport;
547         uint32_t phy_port;
548         int rc;
549
550         if (conf->original != 0)
551                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
552         else
553                 phy_port = conf->index;
554
555         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
556         if (rc != 0)
557                 return rc;
558
559         return efx_mae_action_set_populate_deliver(spec, &mport);
560 }
561
562 static int
563 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
564                           const struct rte_flow_action *action,
565                           struct sfc_mae_actions_bundle *bundle,
566                           efx_mae_actions_t *spec,
567                           struct rte_flow_error *error)
568 {
569         int rc;
570
571         switch (action->type) {
572         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
573                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
574                                        bundle->actions_mask);
575                 rc = efx_mae_action_set_populate_vlan_pop(spec);
576                 break;
577         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
578                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
579                                        bundle->actions_mask);
580                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
581                 break;
582         default:
583                 return rte_flow_error_set(error, ENOTSUP,
584                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
585                                 "Unsupported action");
586         }
587
588         if (rc != 0) {
589                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
590                                 NULL, "Failed to request the action");
591         } else {
592                 bundle->actions_mask |= (1ULL << action->type);
593         }
594
595         return rc;
596 }
597
598 int
599 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
600                            const struct rte_flow_action actions[],
601                            struct sfc_mae_action_set **action_setp,
602                            struct rte_flow_error *error)
603 {
604         struct sfc_mae_actions_bundle bundle = {0};
605         const struct rte_flow_action *action;
606         efx_mae_actions_t *spec;
607         int rc;
608
609         if (actions == NULL) {
610                 return rte_flow_error_set(error, EINVAL,
611                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
612                                 "NULL actions");
613         }
614
615         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
616         if (rc != 0)
617                 goto fail_action_set_spec_init;
618
619         for (action = actions;
620              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
621                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
622                 if (rc != 0)
623                         goto fail_rule_parse_action;
624
625                 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
626                                                error);
627                 if (rc != 0)
628                         goto fail_rule_parse_action;
629         }
630
631         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
632         if (rc != 0)
633                 goto fail_rule_parse_action;
634
635         *action_setp = sfc_mae_action_set_attach(sa, spec);
636         if (*action_setp != NULL) {
637                 efx_mae_action_set_spec_fini(sa->nic, spec);
638                 return 0;
639         }
640
641         rc = sfc_mae_action_set_add(sa, spec, action_setp);
642         if (rc != 0)
643                 goto fail_action_set_add;
644
645         return 0;
646
647 fail_action_set_add:
648 fail_rule_parse_action:
649         efx_mae_action_set_spec_fini(sa->nic, spec);
650
651 fail_action_set_spec_init:
652         if (rc > 0) {
653                 rc = rte_flow_error_set(error, rc,
654                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
655                         NULL, "Failed to process the action");
656         }
657         return rc;
658 }
659
660 static bool
661 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
662                         const efx_mae_match_spec_t *left,
663                         const efx_mae_match_spec_t *right)
664 {
665         bool have_same_class;
666         int rc;
667
668         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
669                                            &have_same_class);
670
671         return (rc == 0) ? have_same_class : false;
672 }
673
674 static int
675 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
676                                  struct sfc_flow_spec_mae *spec)
677 {
678         const struct rte_flow *entry;
679
680         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
681                 const struct sfc_flow_spec *entry_spec = &entry->spec;
682                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
683                 const efx_mae_match_spec_t *left = es_mae->match_spec;
684                 const efx_mae_match_spec_t *right = spec->match_spec;
685
686                 switch (entry_spec->type) {
687                 case SFC_FLOW_SPEC_FILTER:
688                         /* Ignore VNIC-level flows */
689                         break;
690                 case SFC_FLOW_SPEC_MAE:
691                         if (sfc_mae_rules_class_cmp(sa, left, right))
692                                 return 0;
693                         break;
694                 default:
695                         SFC_ASSERT(false);
696                 }
697         }
698
699         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
700                  "support for inner frame pattern items is not guaranteed; "
701                  "other than that, the items are valid from SW standpoint");
702         return 0;
703 }
704
705 /**
706  * Confirm that a given flow can be accepted by the FW.
707  *
708  * @param sa
709  *   Software adapter context
710  * @param flow
711  *   Flow to be verified
712  * @return
713  *   Zero on success and non-zero in the case of error.
714  *   A special value of EAGAIN indicates that the adapter is
715  *   not in started state. This state is compulsory because
716  *   it only makes sense to compare the rule class of the flow
717  *   being validated with classes of the active rules.
718  *   Such classes are wittingly supported by the FW.
719  */
720 int
721 sfc_mae_flow_verify(struct sfc_adapter *sa,
722                     struct rte_flow *flow)
723 {
724         struct sfc_flow_spec *spec = &flow->spec;
725         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
726
727         SFC_ASSERT(sfc_adapter_is_locked(sa));
728
729         if (sa->state != SFC_ADAPTER_STARTED)
730                 return EAGAIN;
731
732         return sfc_mae_action_rule_class_verify(sa, spec_mae);
733 }
734
735 int
736 sfc_mae_flow_insert(struct sfc_adapter *sa,
737                     struct rte_flow *flow)
738 {
739         struct sfc_flow_spec *spec = &flow->spec;
740         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
741         struct sfc_mae_action_set *action_set = spec_mae->action_set;
742         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
743         int rc;
744
745         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
746         SFC_ASSERT(action_set != NULL);
747
748         rc = sfc_mae_action_set_enable(sa, action_set);
749         if (rc != 0)
750                 goto fail_action_set_enable;
751
752         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
753                                         NULL, &fw_rsrc->aset_id,
754                                         &spec_mae->rule_id);
755         if (rc != 0)
756                 goto fail_action_rule_insert;
757
758         return 0;
759
760 fail_action_rule_insert:
761         (void)sfc_mae_action_set_disable(sa, action_set);
762
763 fail_action_set_enable:
764         return rc;
765 }
766
767 int
768 sfc_mae_flow_remove(struct sfc_adapter *sa,
769                     struct rte_flow *flow)
770 {
771         struct sfc_flow_spec *spec = &flow->spec;
772         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
773         struct sfc_mae_action_set *action_set = spec_mae->action_set;
774         int rc;
775
776         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
777         SFC_ASSERT(action_set != NULL);
778
779         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
780         if (rc != 0)
781                 return rc;
782
783         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
784
785         return sfc_mae_action_set_disable(sa, action_set);
786 }