1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
20 sfc_mae_attach(struct sfc_adapter *sa)
22 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
23 struct sfc_mae *mae = &sa->mae;
24 efx_mae_limits_t limits;
27 sfc_log_init(sa, "entry");
29 if (!encp->enc_mae_supported) {
30 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
34 sfc_log_init(sa, "init MAE");
35 rc = efx_mae_init(sa->nic);
39 sfc_log_init(sa, "get MAE limits");
40 rc = efx_mae_get_limits(sa->nic, &limits);
42 goto fail_mae_get_limits;
44 mae->status = SFC_MAE_STATUS_SUPPORTED;
45 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
46 TAILQ_INIT(&mae->action_sets);
48 sfc_log_init(sa, "done");
53 efx_mae_fini(sa->nic);
56 sfc_log_init(sa, "failed %d", rc);
62 sfc_mae_detach(struct sfc_adapter *sa)
64 struct sfc_mae *mae = &sa->mae;
65 enum sfc_mae_status status_prev = mae->status;
67 sfc_log_init(sa, "entry");
69 mae->nb_action_rule_prios_max = 0;
70 mae->status = SFC_MAE_STATUS_UNKNOWN;
72 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
75 efx_mae_fini(sa->nic);
77 sfc_log_init(sa, "done");
80 static struct sfc_mae_action_set *
81 sfc_mae_action_set_attach(struct sfc_adapter *sa,
82 const efx_mae_actions_t *spec)
84 struct sfc_mae_action_set *action_set;
85 struct sfc_mae *mae = &sa->mae;
87 SFC_ASSERT(sfc_adapter_is_locked(sa));
89 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
90 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
91 ++(action_set->refcnt);
100 sfc_mae_action_set_add(struct sfc_adapter *sa,
101 efx_mae_actions_t *spec,
102 struct sfc_mae_action_set **action_setp)
104 struct sfc_mae_action_set *action_set;
105 struct sfc_mae *mae = &sa->mae;
107 SFC_ASSERT(sfc_adapter_is_locked(sa));
109 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
110 if (action_set == NULL)
113 action_set->refcnt = 1;
114 action_set->spec = spec;
116 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
118 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
120 *action_setp = action_set;
126 sfc_mae_action_set_del(struct sfc_adapter *sa,
127 struct sfc_mae_action_set *action_set)
129 struct sfc_mae *mae = &sa->mae;
131 SFC_ASSERT(sfc_adapter_is_locked(sa));
132 SFC_ASSERT(action_set->refcnt != 0);
134 --(action_set->refcnt);
136 if (action_set->refcnt != 0)
139 SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
140 SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
142 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
143 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
144 rte_free(action_set);
148 sfc_mae_action_set_enable(struct sfc_adapter *sa,
149 struct sfc_mae_action_set *action_set)
151 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
154 SFC_ASSERT(sfc_adapter_is_locked(sa));
156 if (fw_rsrc->refcnt == 0) {
157 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
158 SFC_ASSERT(action_set->spec != NULL);
160 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
172 sfc_mae_action_set_disable(struct sfc_adapter *sa,
173 struct sfc_mae_action_set *action_set)
175 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
178 SFC_ASSERT(sfc_adapter_is_locked(sa));
179 SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
180 SFC_ASSERT(fw_rsrc->refcnt != 0);
182 if (fw_rsrc->refcnt == 1) {
183 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
187 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
196 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
197 struct rte_flow *flow)
199 struct sfc_flow_spec *spec;
200 struct sfc_flow_spec_mae *spec_mae;
210 spec_mae = &spec->mae;
212 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
214 if (spec_mae->action_set != NULL)
215 sfc_mae_action_set_del(sa, spec_mae->action_set);
217 if (spec_mae->match_spec != NULL)
218 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
222 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
223 struct sfc_flow_parse_ctx *ctx,
224 struct rte_flow_error *error)
226 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
227 const struct rte_flow_item_phy_port supp_mask = {
230 const void *def_mask = &rte_flow_item_phy_port_mask;
231 const struct rte_flow_item_phy_port *spec = NULL;
232 const struct rte_flow_item_phy_port *mask = NULL;
233 efx_mport_sel_t mport_v;
236 if (ctx_mae->match_mport_set) {
237 return rte_flow_error_set(error, ENOTSUP,
238 RTE_FLOW_ERROR_TYPE_ITEM, item,
239 "Can't handle multiple traffic source items");
242 rc = sfc_flow_parse_init(item,
243 (const void **)&spec, (const void **)&mask,
244 (const void *)&supp_mask, def_mask,
245 sizeof(struct rte_flow_item_phy_port), error);
249 if (mask->index != supp_mask.index) {
250 return rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ITEM, item,
252 "Bad mask in the PHY_PORT pattern item");
255 /* If "spec" is not set, could be any physical port */
259 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
261 return rte_flow_error_set(error, rc,
262 RTE_FLOW_ERROR_TYPE_ITEM, item,
263 "Failed to convert the PHY_PORT index");
266 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
269 return rte_flow_error_set(error, rc,
270 RTE_FLOW_ERROR_TYPE_ITEM, item,
271 "Failed to set MPORT for the PHY_PORT");
274 ctx_mae->match_mport_set = B_TRUE;
279 struct sfc_mae_field_locator {
280 efx_mae_field_id_t field_id;
282 /* Field offset in the corresponding rte_flow_item_ struct */
287 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
288 unsigned int nb_field_locators, void *mask_ptr,
293 memset(mask_ptr, 0, mask_size);
295 for (i = 0; i < nb_field_locators; ++i) {
296 const struct sfc_mae_field_locator *fl = &field_locators[i];
298 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
299 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
304 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
305 unsigned int nb_field_locators, const uint8_t *spec,
306 const uint8_t *mask, efx_mae_match_spec_t *efx_spec,
307 struct rte_flow_error *error)
312 for (i = 0; i < nb_field_locators; ++i) {
313 const struct sfc_mae_field_locator *fl = &field_locators[i];
315 rc = efx_mae_match_spec_field_set(efx_spec, fl->field_id,
316 fl->size, spec + fl->ofst,
317 fl->size, mask + fl->ofst);
323 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
324 NULL, "Failed to process item fields");
330 static const struct sfc_mae_field_locator flocs_eth[] = {
332 EFX_MAE_FIELD_ETHER_TYPE_BE,
333 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
334 offsetof(struct rte_flow_item_eth, type),
337 EFX_MAE_FIELD_ETH_DADDR_BE,
338 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
339 offsetof(struct rte_flow_item_eth, dst),
342 EFX_MAE_FIELD_ETH_SADDR_BE,
343 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
344 offsetof(struct rte_flow_item_eth, src),
349 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
350 struct sfc_flow_parse_ctx *ctx,
351 struct rte_flow_error *error)
353 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
354 struct rte_flow_item_eth supp_mask;
355 const uint8_t *spec = NULL;
356 const uint8_t *mask = NULL;
359 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
360 &supp_mask, sizeof(supp_mask));
362 rc = sfc_flow_parse_init(item,
363 (const void **)&spec, (const void **)&mask,
364 (const void *)&supp_mask,
365 &rte_flow_item_eth_mask,
366 sizeof(struct rte_flow_item_eth), error);
370 /* If "spec" is not set, could be any Ethernet */
374 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
375 ctx_mae->match_spec_action, error);
378 static const struct sfc_flow_item sfc_flow_items[] = {
380 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
382 * In terms of RTE flow, this item is a META one,
383 * and its position in the pattern is don't care.
385 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
386 .layer = SFC_FLOW_ITEM_ANY_LAYER,
387 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
388 .parse = sfc_mae_rule_parse_item_phy_port,
391 .type = RTE_FLOW_ITEM_TYPE_ETH,
392 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
393 .layer = SFC_FLOW_ITEM_L2,
394 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
395 .parse = sfc_mae_rule_parse_item_eth,
400 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
401 const struct rte_flow_item pattern[],
402 struct sfc_flow_spec_mae *spec,
403 struct rte_flow_error *error)
405 struct sfc_mae_parse_ctx ctx_mae;
406 struct sfc_flow_parse_ctx ctx;
409 memset(&ctx_mae, 0, sizeof(ctx_mae));
411 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
413 &ctx_mae.match_spec_action);
415 rc = rte_flow_error_set(error, rc,
416 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
417 "Failed to initialise action rule match specification");
418 goto fail_init_match_spec_action;
421 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
424 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
425 pattern, &ctx, error);
427 goto fail_parse_pattern;
429 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
430 rc = rte_flow_error_set(error, ENOTSUP,
431 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
432 "Inconsistent pattern");
433 goto fail_validate_match_spec_action;
436 spec->match_spec = ctx_mae.match_spec_action;
440 fail_validate_match_spec_action:
442 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
444 fail_init_match_spec_action:
449 * An action supported by MAE may correspond to a bundle of RTE flow actions,
450 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
451 * That is, related RTE flow actions need to be tracked as parts of a whole
452 * so that they can be combined into a single action and submitted to MAE
453 * representation of a given rule's action set.
455 * Each RTE flow action provided by an application gets classified as
456 * one belonging to some bundle type. If an action is not supposed to
457 * belong to any bundle, or if this action is END, it is described as
458 * one belonging to a dummy bundle of type EMPTY.
460 * A currently tracked bundle will be submitted if a repeating
461 * action or an action of different bundle type follows.
464 enum sfc_mae_actions_bundle_type {
465 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
466 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
469 struct sfc_mae_actions_bundle {
470 enum sfc_mae_actions_bundle_type type;
472 /* Indicates actions already tracked by the current bundle */
473 uint64_t actions_mask;
475 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
476 rte_be16_t vlan_push_tpid;
477 rte_be16_t vlan_push_tci;
481 * Combine configuration of RTE flow actions tracked by the bundle into a
482 * single action and submit the result to MAE action set specification.
483 * Do nothing in the case of dummy action bundle.
486 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
487 efx_mae_actions_t *spec)
491 switch (bundle->type) {
492 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
494 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
495 rc = efx_mae_action_set_populate_vlan_push(
496 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
507 * Given the type of the next RTE flow action in the line, decide
508 * whether a new bundle is about to start, and, if this is the case,
509 * submit and reset the current bundle.
512 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
513 struct sfc_mae_actions_bundle *bundle,
514 efx_mae_actions_t *spec,
515 struct rte_flow_error *error)
517 enum sfc_mae_actions_bundle_type bundle_type_new;
520 switch (action->type) {
521 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
522 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
523 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
524 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
528 * Self-sufficient actions, including END, are handled in this
529 * case. No checks for unsupported actions are needed here
530 * because parsing doesn't occur at this point.
532 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
536 if (bundle_type_new != bundle->type ||
537 (bundle->actions_mask & (1ULL << action->type)) != 0) {
538 rc = sfc_mae_actions_bundle_submit(bundle, spec);
542 memset(bundle, 0, sizeof(*bundle));
545 bundle->type = bundle_type_new;
550 return rte_flow_error_set(error, rc,
551 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
552 "Failed to request the (group of) action(s)");
556 sfc_mae_rule_parse_action_of_push_vlan(
557 const struct rte_flow_action_of_push_vlan *conf,
558 struct sfc_mae_actions_bundle *bundle)
560 bundle->vlan_push_tpid = conf->ethertype;
564 sfc_mae_rule_parse_action_of_set_vlan_vid(
565 const struct rte_flow_action_of_set_vlan_vid *conf,
566 struct sfc_mae_actions_bundle *bundle)
568 bundle->vlan_push_tci |= (conf->vlan_vid &
569 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
573 sfc_mae_rule_parse_action_of_set_vlan_pcp(
574 const struct rte_flow_action_of_set_vlan_pcp *conf,
575 struct sfc_mae_actions_bundle *bundle)
577 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
578 RTE_LEN2MASK(3, uint8_t)) << 13;
580 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
584 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
585 const struct rte_flow_action_phy_port *conf,
586 efx_mae_actions_t *spec)
588 efx_mport_sel_t mport;
592 if (conf->original != 0)
593 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
595 phy_port = conf->index;
597 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
601 return efx_mae_action_set_populate_deliver(spec, &mport);
605 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
606 const struct rte_flow_action *action,
607 struct sfc_mae_actions_bundle *bundle,
608 efx_mae_actions_t *spec,
609 struct rte_flow_error *error)
613 switch (action->type) {
614 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
615 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
616 bundle->actions_mask);
617 rc = efx_mae_action_set_populate_vlan_pop(spec);
619 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
620 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
621 bundle->actions_mask);
622 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
624 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
625 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
626 bundle->actions_mask);
627 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
629 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
630 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
631 bundle->actions_mask);
632 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
634 case RTE_FLOW_ACTION_TYPE_FLAG:
635 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
636 bundle->actions_mask);
637 rc = efx_mae_action_set_populate_flag(spec);
639 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
640 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
641 bundle->actions_mask);
642 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
645 return rte_flow_error_set(error, ENOTSUP,
646 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
647 "Unsupported action");
651 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
652 NULL, "Failed to request the action");
654 bundle->actions_mask |= (1ULL << action->type);
661 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
662 const struct rte_flow_action actions[],
663 struct sfc_mae_action_set **action_setp,
664 struct rte_flow_error *error)
666 struct sfc_mae_actions_bundle bundle = {0};
667 const struct rte_flow_action *action;
668 efx_mae_actions_t *spec;
671 if (actions == NULL) {
672 return rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
677 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
679 goto fail_action_set_spec_init;
681 for (action = actions;
682 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
683 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
685 goto fail_rule_parse_action;
687 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
690 goto fail_rule_parse_action;
693 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
695 goto fail_rule_parse_action;
697 *action_setp = sfc_mae_action_set_attach(sa, spec);
698 if (*action_setp != NULL) {
699 efx_mae_action_set_spec_fini(sa->nic, spec);
703 rc = sfc_mae_action_set_add(sa, spec, action_setp);
705 goto fail_action_set_add;
710 fail_rule_parse_action:
711 efx_mae_action_set_spec_fini(sa->nic, spec);
713 fail_action_set_spec_init:
715 rc = rte_flow_error_set(error, rc,
716 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
717 NULL, "Failed to process the action");
723 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
724 const efx_mae_match_spec_t *left,
725 const efx_mae_match_spec_t *right)
727 bool have_same_class;
730 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
733 return (rc == 0) ? have_same_class : false;
737 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
738 struct sfc_flow_spec_mae *spec)
740 const struct rte_flow *entry;
742 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
743 const struct sfc_flow_spec *entry_spec = &entry->spec;
744 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
745 const efx_mae_match_spec_t *left = es_mae->match_spec;
746 const efx_mae_match_spec_t *right = spec->match_spec;
748 switch (entry_spec->type) {
749 case SFC_FLOW_SPEC_FILTER:
750 /* Ignore VNIC-level flows */
752 case SFC_FLOW_SPEC_MAE:
753 if (sfc_mae_rules_class_cmp(sa, left, right))
761 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
762 "support for inner frame pattern items is not guaranteed; "
763 "other than that, the items are valid from SW standpoint");
768 * Confirm that a given flow can be accepted by the FW.
771 * Software adapter context
773 * Flow to be verified
775 * Zero on success and non-zero in the case of error.
776 * A special value of EAGAIN indicates that the adapter is
777 * not in started state. This state is compulsory because
778 * it only makes sense to compare the rule class of the flow
779 * being validated with classes of the active rules.
780 * Such classes are wittingly supported by the FW.
783 sfc_mae_flow_verify(struct sfc_adapter *sa,
784 struct rte_flow *flow)
786 struct sfc_flow_spec *spec = &flow->spec;
787 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
789 SFC_ASSERT(sfc_adapter_is_locked(sa));
791 if (sa->state != SFC_ADAPTER_STARTED)
794 return sfc_mae_action_rule_class_verify(sa, spec_mae);
798 sfc_mae_flow_insert(struct sfc_adapter *sa,
799 struct rte_flow *flow)
801 struct sfc_flow_spec *spec = &flow->spec;
802 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
803 struct sfc_mae_action_set *action_set = spec_mae->action_set;
804 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
807 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
808 SFC_ASSERT(action_set != NULL);
810 rc = sfc_mae_action_set_enable(sa, action_set);
812 goto fail_action_set_enable;
814 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
815 NULL, &fw_rsrc->aset_id,
818 goto fail_action_rule_insert;
822 fail_action_rule_insert:
823 (void)sfc_mae_action_set_disable(sa, action_set);
825 fail_action_set_enable:
830 sfc_mae_flow_remove(struct sfc_adapter *sa,
831 struct rte_flow *flow)
833 struct sfc_flow_spec *spec = &flow->spec;
834 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
835 struct sfc_mae_action_set *action_set = spec_mae->action_set;
838 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
839 SFC_ASSERT(action_set != NULL);
841 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
845 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
847 return sfc_mae_action_set_disable(sa, action_set);