1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
20 sfc_mae_attach(struct sfc_adapter *sa)
22 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
23 struct sfc_mae *mae = &sa->mae;
24 efx_mae_limits_t limits;
27 sfc_log_init(sa, "entry");
29 if (!encp->enc_mae_supported) {
30 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
34 sfc_log_init(sa, "init MAE");
35 rc = efx_mae_init(sa->nic);
39 sfc_log_init(sa, "get MAE limits");
40 rc = efx_mae_get_limits(sa->nic, &limits);
42 goto fail_mae_get_limits;
44 mae->status = SFC_MAE_STATUS_SUPPORTED;
45 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
46 TAILQ_INIT(&mae->action_sets);
48 sfc_log_init(sa, "done");
53 efx_mae_fini(sa->nic);
56 sfc_log_init(sa, "failed %d", rc);
62 sfc_mae_detach(struct sfc_adapter *sa)
64 struct sfc_mae *mae = &sa->mae;
65 enum sfc_mae_status status_prev = mae->status;
67 sfc_log_init(sa, "entry");
69 mae->nb_action_rule_prios_max = 0;
70 mae->status = SFC_MAE_STATUS_UNKNOWN;
72 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
75 efx_mae_fini(sa->nic);
77 sfc_log_init(sa, "done");
80 static struct sfc_mae_action_set *
81 sfc_mae_action_set_attach(struct sfc_adapter *sa,
82 const efx_mae_actions_t *spec)
84 struct sfc_mae_action_set *action_set;
85 struct sfc_mae *mae = &sa->mae;
87 SFC_ASSERT(sfc_adapter_is_locked(sa));
89 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
90 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
91 ++(action_set->refcnt);
100 sfc_mae_action_set_add(struct sfc_adapter *sa,
101 efx_mae_actions_t *spec,
102 struct sfc_mae_action_set **action_setp)
104 struct sfc_mae_action_set *action_set;
105 struct sfc_mae *mae = &sa->mae;
107 SFC_ASSERT(sfc_adapter_is_locked(sa));
109 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
110 if (action_set == NULL)
113 action_set->refcnt = 1;
114 action_set->spec = spec;
116 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
118 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
120 *action_setp = action_set;
126 sfc_mae_action_set_del(struct sfc_adapter *sa,
127 struct sfc_mae_action_set *action_set)
129 struct sfc_mae *mae = &sa->mae;
131 SFC_ASSERT(sfc_adapter_is_locked(sa));
132 SFC_ASSERT(action_set->refcnt != 0);
134 --(action_set->refcnt);
136 if (action_set->refcnt != 0)
139 SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
140 SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
142 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
143 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
144 rte_free(action_set);
148 sfc_mae_action_set_enable(struct sfc_adapter *sa,
149 struct sfc_mae_action_set *action_set)
151 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
154 SFC_ASSERT(sfc_adapter_is_locked(sa));
156 if (fw_rsrc->refcnt == 0) {
157 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
158 SFC_ASSERT(action_set->spec != NULL);
160 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
172 sfc_mae_action_set_disable(struct sfc_adapter *sa,
173 struct sfc_mae_action_set *action_set)
175 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
178 SFC_ASSERT(sfc_adapter_is_locked(sa));
179 SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
180 SFC_ASSERT(fw_rsrc->refcnt != 0);
182 if (fw_rsrc->refcnt == 1) {
183 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
187 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
196 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
197 struct rte_flow *flow)
199 struct sfc_flow_spec *spec;
200 struct sfc_flow_spec_mae *spec_mae;
210 spec_mae = &spec->mae;
212 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
214 if (spec_mae->action_set != NULL)
215 sfc_mae_action_set_del(sa, spec_mae->action_set);
217 if (spec_mae->match_spec != NULL)
218 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
222 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
223 struct sfc_flow_parse_ctx *ctx,
224 struct rte_flow_error *error)
226 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
227 const struct rte_flow_item_phy_port supp_mask = {
230 const void *def_mask = &rte_flow_item_phy_port_mask;
231 const struct rte_flow_item_phy_port *spec = NULL;
232 const struct rte_flow_item_phy_port *mask = NULL;
233 efx_mport_sel_t mport_v;
236 if (ctx_mae->match_mport_set) {
237 return rte_flow_error_set(error, ENOTSUP,
238 RTE_FLOW_ERROR_TYPE_ITEM, item,
239 "Can't handle multiple traffic source items");
242 rc = sfc_flow_parse_init(item,
243 (const void **)&spec, (const void **)&mask,
244 (const void *)&supp_mask, def_mask,
245 sizeof(struct rte_flow_item_phy_port), error);
249 if (mask->index != supp_mask.index) {
250 return rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ITEM, item,
252 "Bad mask in the PHY_PORT pattern item");
255 /* If "spec" is not set, could be any physical port */
259 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
261 return rte_flow_error_set(error, rc,
262 RTE_FLOW_ERROR_TYPE_ITEM, item,
263 "Failed to convert the PHY_PORT index");
266 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
269 return rte_flow_error_set(error, rc,
270 RTE_FLOW_ERROR_TYPE_ITEM, item,
271 "Failed to set MPORT for the PHY_PORT");
274 ctx_mae->match_mport_set = B_TRUE;
279 struct sfc_mae_field_locator {
280 efx_mae_field_id_t field_id;
282 /* Field offset in the corresponding rte_flow_item_ struct */
287 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
288 unsigned int nb_field_locators, void *mask_ptr,
293 memset(mask_ptr, 0, mask_size);
295 for (i = 0; i < nb_field_locators; ++i) {
296 const struct sfc_mae_field_locator *fl = &field_locators[i];
298 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
299 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
304 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
305 unsigned int nb_field_locators, const uint8_t *spec,
306 const uint8_t *mask, efx_mae_match_spec_t *efx_spec,
307 struct rte_flow_error *error)
312 for (i = 0; i < nb_field_locators; ++i) {
313 const struct sfc_mae_field_locator *fl = &field_locators[i];
315 rc = efx_mae_match_spec_field_set(efx_spec, fl->field_id,
316 fl->size, spec + fl->ofst,
317 fl->size, mask + fl->ofst);
323 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
324 NULL, "Failed to process item fields");
330 static const struct sfc_mae_field_locator flocs_eth[] = {
332 EFX_MAE_FIELD_ETHER_TYPE_BE,
333 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
334 offsetof(struct rte_flow_item_eth, type),
337 EFX_MAE_FIELD_ETH_DADDR_BE,
338 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
339 offsetof(struct rte_flow_item_eth, dst),
342 EFX_MAE_FIELD_ETH_SADDR_BE,
343 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
344 offsetof(struct rte_flow_item_eth, src),
349 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
350 struct sfc_flow_parse_ctx *ctx,
351 struct rte_flow_error *error)
353 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
354 struct rte_flow_item_eth supp_mask;
355 const uint8_t *spec = NULL;
356 const uint8_t *mask = NULL;
359 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
360 &supp_mask, sizeof(supp_mask));
362 rc = sfc_flow_parse_init(item,
363 (const void **)&spec, (const void **)&mask,
364 (const void *)&supp_mask,
365 &rte_flow_item_eth_mask,
366 sizeof(struct rte_flow_item_eth), error);
370 /* If "spec" is not set, could be any Ethernet */
374 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
375 ctx_mae->match_spec_action, error);
378 static const struct sfc_flow_item sfc_flow_items[] = {
380 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
382 * In terms of RTE flow, this item is a META one,
383 * and its position in the pattern is don't care.
385 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
386 .layer = SFC_FLOW_ITEM_ANY_LAYER,
387 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
388 .parse = sfc_mae_rule_parse_item_phy_port,
391 .type = RTE_FLOW_ITEM_TYPE_ETH,
392 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
393 .layer = SFC_FLOW_ITEM_L2,
394 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
395 .parse = sfc_mae_rule_parse_item_eth,
400 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
401 const struct rte_flow_item pattern[],
402 struct sfc_flow_spec_mae *spec,
403 struct rte_flow_error *error)
405 struct sfc_mae_parse_ctx ctx_mae;
406 struct sfc_flow_parse_ctx ctx;
409 memset(&ctx_mae, 0, sizeof(ctx_mae));
411 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
413 &ctx_mae.match_spec_action);
415 rc = rte_flow_error_set(error, rc,
416 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
417 "Failed to initialise action rule match specification");
418 goto fail_init_match_spec_action;
421 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
424 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
425 pattern, &ctx, error);
427 goto fail_parse_pattern;
429 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
430 rc = rte_flow_error_set(error, ENOTSUP,
431 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
432 "Inconsistent pattern");
433 goto fail_validate_match_spec_action;
436 spec->match_spec = ctx_mae.match_spec_action;
440 fail_validate_match_spec_action:
442 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
444 fail_init_match_spec_action:
449 * An action supported by MAE may correspond to a bundle of RTE flow actions,
450 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
451 * That is, related RTE flow actions need to be tracked as parts of a whole
452 * so that they can be combined into a single action and submitted to MAE
453 * representation of a given rule's action set.
455 * Each RTE flow action provided by an application gets classified as
456 * one belonging to some bundle type. If an action is not supposed to
457 * belong to any bundle, or if this action is END, it is described as
458 * one belonging to a dummy bundle of type EMPTY.
460 * A currently tracked bundle will be submitted if a repeating
461 * action or an action of different bundle type follows.
464 enum sfc_mae_actions_bundle_type {
465 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
466 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
469 struct sfc_mae_actions_bundle {
470 enum sfc_mae_actions_bundle_type type;
472 /* Indicates actions already tracked by the current bundle */
473 uint64_t actions_mask;
475 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
476 rte_be16_t vlan_push_tpid;
477 rte_be16_t vlan_push_tci;
481 * Combine configuration of RTE flow actions tracked by the bundle into a
482 * single action and submit the result to MAE action set specification.
483 * Do nothing in the case of dummy action bundle.
486 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
487 efx_mae_actions_t *spec)
491 switch (bundle->type) {
492 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
494 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
495 rc = efx_mae_action_set_populate_vlan_push(
496 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
507 * Given the type of the next RTE flow action in the line, decide
508 * whether a new bundle is about to start, and, if this is the case,
509 * submit and reset the current bundle.
512 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
513 struct sfc_mae_actions_bundle *bundle,
514 efx_mae_actions_t *spec,
515 struct rte_flow_error *error)
517 enum sfc_mae_actions_bundle_type bundle_type_new;
520 switch (action->type) {
521 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
522 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
523 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
524 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
528 * Self-sufficient actions, including END, are handled in this
529 * case. No checks for unsupported actions are needed here
530 * because parsing doesn't occur at this point.
532 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
536 if (bundle_type_new != bundle->type ||
537 (bundle->actions_mask & (1ULL << action->type)) != 0) {
538 rc = sfc_mae_actions_bundle_submit(bundle, spec);
542 memset(bundle, 0, sizeof(*bundle));
545 bundle->type = bundle_type_new;
550 return rte_flow_error_set(error, rc,
551 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
552 "Failed to request the (group of) action(s)");
556 sfc_mae_rule_parse_action_of_push_vlan(
557 const struct rte_flow_action_of_push_vlan *conf,
558 struct sfc_mae_actions_bundle *bundle)
560 bundle->vlan_push_tpid = conf->ethertype;
564 sfc_mae_rule_parse_action_of_set_vlan_vid(
565 const struct rte_flow_action_of_set_vlan_vid *conf,
566 struct sfc_mae_actions_bundle *bundle)
568 bundle->vlan_push_tci |= (conf->vlan_vid &
569 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
573 sfc_mae_rule_parse_action_of_set_vlan_pcp(
574 const struct rte_flow_action_of_set_vlan_pcp *conf,
575 struct sfc_mae_actions_bundle *bundle)
577 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
578 RTE_LEN2MASK(3, uint8_t)) << 13;
580 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
584 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
585 efx_mae_actions_t *spec)
587 return efx_mae_action_set_populate_mark(spec, conf->id);
591 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
592 const struct rte_flow_action_phy_port *conf,
593 efx_mae_actions_t *spec)
595 efx_mport_sel_t mport;
599 if (conf->original != 0)
600 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
602 phy_port = conf->index;
604 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
608 return efx_mae_action_set_populate_deliver(spec, &mport);
612 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
613 const struct rte_flow_action *action,
614 struct sfc_mae_actions_bundle *bundle,
615 efx_mae_actions_t *spec,
616 struct rte_flow_error *error)
620 switch (action->type) {
621 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
622 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
623 bundle->actions_mask);
624 rc = efx_mae_action_set_populate_vlan_pop(spec);
626 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
627 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
628 bundle->actions_mask);
629 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
631 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
632 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
633 bundle->actions_mask);
634 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
636 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
637 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
638 bundle->actions_mask);
639 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
641 case RTE_FLOW_ACTION_TYPE_FLAG:
642 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
643 bundle->actions_mask);
644 rc = efx_mae_action_set_populate_flag(spec);
646 case RTE_FLOW_ACTION_TYPE_MARK:
647 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
648 bundle->actions_mask);
649 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
651 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
652 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
653 bundle->actions_mask);
654 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
657 return rte_flow_error_set(error, ENOTSUP,
658 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
659 "Unsupported action");
663 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
664 NULL, "Failed to request the action");
666 bundle->actions_mask |= (1ULL << action->type);
673 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
674 const struct rte_flow_action actions[],
675 struct sfc_mae_action_set **action_setp,
676 struct rte_flow_error *error)
678 struct sfc_mae_actions_bundle bundle = {0};
679 const struct rte_flow_action *action;
680 efx_mae_actions_t *spec;
683 if (actions == NULL) {
684 return rte_flow_error_set(error, EINVAL,
685 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
689 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
691 goto fail_action_set_spec_init;
693 for (action = actions;
694 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
695 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
697 goto fail_rule_parse_action;
699 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
702 goto fail_rule_parse_action;
705 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
707 goto fail_rule_parse_action;
709 *action_setp = sfc_mae_action_set_attach(sa, spec);
710 if (*action_setp != NULL) {
711 efx_mae_action_set_spec_fini(sa->nic, spec);
715 rc = sfc_mae_action_set_add(sa, spec, action_setp);
717 goto fail_action_set_add;
722 fail_rule_parse_action:
723 efx_mae_action_set_spec_fini(sa->nic, spec);
725 fail_action_set_spec_init:
727 rc = rte_flow_error_set(error, rc,
728 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
729 NULL, "Failed to process the action");
735 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
736 const efx_mae_match_spec_t *left,
737 const efx_mae_match_spec_t *right)
739 bool have_same_class;
742 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
745 return (rc == 0) ? have_same_class : false;
749 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
750 struct sfc_flow_spec_mae *spec)
752 const struct rte_flow *entry;
754 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
755 const struct sfc_flow_spec *entry_spec = &entry->spec;
756 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
757 const efx_mae_match_spec_t *left = es_mae->match_spec;
758 const efx_mae_match_spec_t *right = spec->match_spec;
760 switch (entry_spec->type) {
761 case SFC_FLOW_SPEC_FILTER:
762 /* Ignore VNIC-level flows */
764 case SFC_FLOW_SPEC_MAE:
765 if (sfc_mae_rules_class_cmp(sa, left, right))
773 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
774 "support for inner frame pattern items is not guaranteed; "
775 "other than that, the items are valid from SW standpoint");
780 * Confirm that a given flow can be accepted by the FW.
783 * Software adapter context
785 * Flow to be verified
787 * Zero on success and non-zero in the case of error.
788 * A special value of EAGAIN indicates that the adapter is
789 * not in started state. This state is compulsory because
790 * it only makes sense to compare the rule class of the flow
791 * being validated with classes of the active rules.
792 * Such classes are wittingly supported by the FW.
795 sfc_mae_flow_verify(struct sfc_adapter *sa,
796 struct rte_flow *flow)
798 struct sfc_flow_spec *spec = &flow->spec;
799 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
801 SFC_ASSERT(sfc_adapter_is_locked(sa));
803 if (sa->state != SFC_ADAPTER_STARTED)
806 return sfc_mae_action_rule_class_verify(sa, spec_mae);
810 sfc_mae_flow_insert(struct sfc_adapter *sa,
811 struct rte_flow *flow)
813 struct sfc_flow_spec *spec = &flow->spec;
814 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
815 struct sfc_mae_action_set *action_set = spec_mae->action_set;
816 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
819 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
820 SFC_ASSERT(action_set != NULL);
822 rc = sfc_mae_action_set_enable(sa, action_set);
824 goto fail_action_set_enable;
826 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
827 NULL, &fw_rsrc->aset_id,
830 goto fail_action_rule_insert;
834 fail_action_rule_insert:
835 (void)sfc_mae_action_set_disable(sa, action_set);
837 fail_action_set_enable:
842 sfc_mae_flow_remove(struct sfc_adapter *sa,
843 struct rte_flow *flow)
845 struct sfc_flow_spec *spec = &flow->spec;
846 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
847 struct sfc_mae_action_set *action_set = spec_mae->action_set;
850 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
851 SFC_ASSERT(action_set != NULL);
853 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
857 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
859 return sfc_mae_action_set_disable(sa, action_set);