1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
20 sfc_mae_attach(struct sfc_adapter *sa)
22 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
23 struct sfc_mae *mae = &sa->mae;
24 efx_mae_limits_t limits;
27 sfc_log_init(sa, "entry");
29 if (!encp->enc_mae_supported) {
30 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
34 sfc_log_init(sa, "init MAE");
35 rc = efx_mae_init(sa->nic);
39 sfc_log_init(sa, "get MAE limits");
40 rc = efx_mae_get_limits(sa->nic, &limits);
42 goto fail_mae_get_limits;
44 mae->status = SFC_MAE_STATUS_SUPPORTED;
45 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
46 TAILQ_INIT(&mae->action_sets);
48 sfc_log_init(sa, "done");
53 efx_mae_fini(sa->nic);
56 sfc_log_init(sa, "failed %d", rc);
62 sfc_mae_detach(struct sfc_adapter *sa)
64 struct sfc_mae *mae = &sa->mae;
65 enum sfc_mae_status status_prev = mae->status;
67 sfc_log_init(sa, "entry");
69 mae->nb_action_rule_prios_max = 0;
70 mae->status = SFC_MAE_STATUS_UNKNOWN;
72 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
75 efx_mae_fini(sa->nic);
77 sfc_log_init(sa, "done");
80 static struct sfc_mae_action_set *
81 sfc_mae_action_set_attach(struct sfc_adapter *sa,
82 const efx_mae_actions_t *spec)
84 struct sfc_mae_action_set *action_set;
85 struct sfc_mae *mae = &sa->mae;
87 SFC_ASSERT(sfc_adapter_is_locked(sa));
89 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
90 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
91 ++(action_set->refcnt);
100 sfc_mae_action_set_add(struct sfc_adapter *sa,
101 efx_mae_actions_t *spec,
102 struct sfc_mae_action_set **action_setp)
104 struct sfc_mae_action_set *action_set;
105 struct sfc_mae *mae = &sa->mae;
107 SFC_ASSERT(sfc_adapter_is_locked(sa));
109 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
110 if (action_set == NULL)
113 action_set->refcnt = 1;
114 action_set->spec = spec;
116 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
118 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
120 *action_setp = action_set;
126 sfc_mae_action_set_del(struct sfc_adapter *sa,
127 struct sfc_mae_action_set *action_set)
129 struct sfc_mae *mae = &sa->mae;
131 SFC_ASSERT(sfc_adapter_is_locked(sa));
132 SFC_ASSERT(action_set->refcnt != 0);
134 --(action_set->refcnt);
136 if (action_set->refcnt != 0)
139 SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
140 SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
142 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
143 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
144 rte_free(action_set);
148 sfc_mae_action_set_enable(struct sfc_adapter *sa,
149 struct sfc_mae_action_set *action_set)
151 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
154 SFC_ASSERT(sfc_adapter_is_locked(sa));
156 if (fw_rsrc->refcnt == 0) {
157 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
158 SFC_ASSERT(action_set->spec != NULL);
160 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
172 sfc_mae_action_set_disable(struct sfc_adapter *sa,
173 struct sfc_mae_action_set *action_set)
175 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
178 SFC_ASSERT(sfc_adapter_is_locked(sa));
179 SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
180 SFC_ASSERT(fw_rsrc->refcnt != 0);
182 if (fw_rsrc->refcnt == 1) {
183 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
187 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
196 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
197 struct rte_flow *flow)
199 struct sfc_flow_spec *spec;
200 struct sfc_flow_spec_mae *spec_mae;
210 spec_mae = &spec->mae;
212 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
214 if (spec_mae->action_set != NULL)
215 sfc_mae_action_set_del(sa, spec_mae->action_set);
217 if (spec_mae->match_spec != NULL)
218 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
222 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
223 struct sfc_flow_parse_ctx *ctx,
224 struct rte_flow_error *error)
226 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
227 const struct rte_flow_item_phy_port supp_mask = {
230 const void *def_mask = &rte_flow_item_phy_port_mask;
231 const struct rte_flow_item_phy_port *spec = NULL;
232 const struct rte_flow_item_phy_port *mask = NULL;
233 efx_mport_sel_t mport_v;
236 if (ctx_mae->match_mport_set) {
237 return rte_flow_error_set(error, ENOTSUP,
238 RTE_FLOW_ERROR_TYPE_ITEM, item,
239 "Can't handle multiple traffic source items");
242 rc = sfc_flow_parse_init(item,
243 (const void **)&spec, (const void **)&mask,
244 (const void *)&supp_mask, def_mask,
245 sizeof(struct rte_flow_item_phy_port), error);
249 if (mask->index != supp_mask.index) {
250 return rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ITEM, item,
252 "Bad mask in the PHY_PORT pattern item");
255 /* If "spec" is not set, could be any physical port */
259 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
261 return rte_flow_error_set(error, rc,
262 RTE_FLOW_ERROR_TYPE_ITEM, item,
263 "Failed to convert the PHY_PORT index");
266 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
269 return rte_flow_error_set(error, rc,
270 RTE_FLOW_ERROR_TYPE_ITEM, item,
271 "Failed to set MPORT for the PHY_PORT");
274 ctx_mae->match_mport_set = B_TRUE;
280 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
281 struct sfc_flow_parse_ctx *ctx,
282 struct rte_flow_error *error)
284 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
285 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
286 efx_mport_sel_t mport_v;
289 if (ctx_mae->match_mport_set) {
290 return rte_flow_error_set(error, ENOTSUP,
291 RTE_FLOW_ERROR_TYPE_ITEM, item,
292 "Can't handle multiple traffic source items");
295 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
298 return rte_flow_error_set(error, rc,
299 RTE_FLOW_ERROR_TYPE_ITEM, item,
300 "Failed to convert the PF ID");
303 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
306 return rte_flow_error_set(error, rc,
307 RTE_FLOW_ERROR_TYPE_ITEM, item,
308 "Failed to set MPORT for the PF");
311 ctx_mae->match_mport_set = B_TRUE;
317 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
318 struct sfc_flow_parse_ctx *ctx,
319 struct rte_flow_error *error)
321 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
322 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
323 const struct rte_flow_item_vf supp_mask = {
326 const void *def_mask = &rte_flow_item_vf_mask;
327 const struct rte_flow_item_vf *spec = NULL;
328 const struct rte_flow_item_vf *mask = NULL;
329 efx_mport_sel_t mport_v;
332 if (ctx_mae->match_mport_set) {
333 return rte_flow_error_set(error, ENOTSUP,
334 RTE_FLOW_ERROR_TYPE_ITEM, item,
335 "Can't handle multiple traffic source items");
338 rc = sfc_flow_parse_init(item,
339 (const void **)&spec, (const void **)&mask,
340 (const void *)&supp_mask, def_mask,
341 sizeof(struct rte_flow_item_vf), error);
345 if (mask->id != supp_mask.id) {
346 return rte_flow_error_set(error, EINVAL,
347 RTE_FLOW_ERROR_TYPE_ITEM, item,
348 "Bad mask in the VF pattern item");
352 * If "spec" is not set, the item requests any VF related to the
353 * PF of the current DPDK port (but not the PF itself).
354 * Reject this match criterion as unsupported.
357 return rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM, item,
359 "Bad spec in the VF pattern item");
362 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
364 return rte_flow_error_set(error, rc,
365 RTE_FLOW_ERROR_TYPE_ITEM, item,
366 "Failed to convert the PF + VF IDs");
369 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
372 return rte_flow_error_set(error, rc,
373 RTE_FLOW_ERROR_TYPE_ITEM, item,
374 "Failed to set MPORT for the PF + VF");
377 ctx_mae->match_mport_set = B_TRUE;
382 struct sfc_mae_field_locator {
383 efx_mae_field_id_t field_id;
385 /* Field offset in the corresponding rte_flow_item_ struct */
390 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
391 unsigned int nb_field_locators, void *mask_ptr,
396 memset(mask_ptr, 0, mask_size);
398 for (i = 0; i < nb_field_locators; ++i) {
399 const struct sfc_mae_field_locator *fl = &field_locators[i];
401 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
402 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
407 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
408 unsigned int nb_field_locators, const uint8_t *spec,
409 const uint8_t *mask, efx_mae_match_spec_t *efx_spec,
410 struct rte_flow_error *error)
415 for (i = 0; i < nb_field_locators; ++i) {
416 const struct sfc_mae_field_locator *fl = &field_locators[i];
418 rc = efx_mae_match_spec_field_set(efx_spec, fl->field_id,
419 fl->size, spec + fl->ofst,
420 fl->size, mask + fl->ofst);
426 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
427 NULL, "Failed to process item fields");
433 static const struct sfc_mae_field_locator flocs_eth[] = {
435 EFX_MAE_FIELD_ETHER_TYPE_BE,
436 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
437 offsetof(struct rte_flow_item_eth, type),
440 EFX_MAE_FIELD_ETH_DADDR_BE,
441 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
442 offsetof(struct rte_flow_item_eth, dst),
445 EFX_MAE_FIELD_ETH_SADDR_BE,
446 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
447 offsetof(struct rte_flow_item_eth, src),
452 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
453 struct sfc_flow_parse_ctx *ctx,
454 struct rte_flow_error *error)
456 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
457 struct rte_flow_item_eth supp_mask;
458 const uint8_t *spec = NULL;
459 const uint8_t *mask = NULL;
462 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
463 &supp_mask, sizeof(supp_mask));
465 rc = sfc_flow_parse_init(item,
466 (const void **)&spec, (const void **)&mask,
467 (const void *)&supp_mask,
468 &rte_flow_item_eth_mask,
469 sizeof(struct rte_flow_item_eth), error);
473 /* If "spec" is not set, could be any Ethernet */
477 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
478 ctx_mae->match_spec_action, error);
481 static const struct sfc_flow_item sfc_flow_items[] = {
483 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
485 * In terms of RTE flow, this item is a META one,
486 * and its position in the pattern is don't care.
488 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
489 .layer = SFC_FLOW_ITEM_ANY_LAYER,
490 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
491 .parse = sfc_mae_rule_parse_item_phy_port,
494 .type = RTE_FLOW_ITEM_TYPE_PF,
496 * In terms of RTE flow, this item is a META one,
497 * and its position in the pattern is don't care.
499 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
500 .layer = SFC_FLOW_ITEM_ANY_LAYER,
501 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
502 .parse = sfc_mae_rule_parse_item_pf,
505 .type = RTE_FLOW_ITEM_TYPE_VF,
507 * In terms of RTE flow, this item is a META one,
508 * and its position in the pattern is don't care.
510 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
511 .layer = SFC_FLOW_ITEM_ANY_LAYER,
512 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
513 .parse = sfc_mae_rule_parse_item_vf,
516 .type = RTE_FLOW_ITEM_TYPE_ETH,
517 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
518 .layer = SFC_FLOW_ITEM_L2,
519 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
520 .parse = sfc_mae_rule_parse_item_eth,
525 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
526 const struct rte_flow_item pattern[],
527 struct sfc_flow_spec_mae *spec,
528 struct rte_flow_error *error)
530 struct sfc_mae_parse_ctx ctx_mae;
531 struct sfc_flow_parse_ctx ctx;
534 memset(&ctx_mae, 0, sizeof(ctx_mae));
537 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
539 &ctx_mae.match_spec_action);
541 rc = rte_flow_error_set(error, rc,
542 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
543 "Failed to initialise action rule match specification");
544 goto fail_init_match_spec_action;
547 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
550 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
551 pattern, &ctx, error);
553 goto fail_parse_pattern;
555 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
556 rc = rte_flow_error_set(error, ENOTSUP,
557 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
558 "Inconsistent pattern");
559 goto fail_validate_match_spec_action;
562 spec->match_spec = ctx_mae.match_spec_action;
566 fail_validate_match_spec_action:
568 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
570 fail_init_match_spec_action:
575 * An action supported by MAE may correspond to a bundle of RTE flow actions,
576 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
577 * That is, related RTE flow actions need to be tracked as parts of a whole
578 * so that they can be combined into a single action and submitted to MAE
579 * representation of a given rule's action set.
581 * Each RTE flow action provided by an application gets classified as
582 * one belonging to some bundle type. If an action is not supposed to
583 * belong to any bundle, or if this action is END, it is described as
584 * one belonging to a dummy bundle of type EMPTY.
586 * A currently tracked bundle will be submitted if a repeating
587 * action or an action of different bundle type follows.
590 enum sfc_mae_actions_bundle_type {
591 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
592 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
595 struct sfc_mae_actions_bundle {
596 enum sfc_mae_actions_bundle_type type;
598 /* Indicates actions already tracked by the current bundle */
599 uint64_t actions_mask;
601 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
602 rte_be16_t vlan_push_tpid;
603 rte_be16_t vlan_push_tci;
607 * Combine configuration of RTE flow actions tracked by the bundle into a
608 * single action and submit the result to MAE action set specification.
609 * Do nothing in the case of dummy action bundle.
612 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
613 efx_mae_actions_t *spec)
617 switch (bundle->type) {
618 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
620 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
621 rc = efx_mae_action_set_populate_vlan_push(
622 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
633 * Given the type of the next RTE flow action in the line, decide
634 * whether a new bundle is about to start, and, if this is the case,
635 * submit and reset the current bundle.
638 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
639 struct sfc_mae_actions_bundle *bundle,
640 efx_mae_actions_t *spec,
641 struct rte_flow_error *error)
643 enum sfc_mae_actions_bundle_type bundle_type_new;
646 switch (action->type) {
647 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
648 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
649 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
650 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
654 * Self-sufficient actions, including END, are handled in this
655 * case. No checks for unsupported actions are needed here
656 * because parsing doesn't occur at this point.
658 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
662 if (bundle_type_new != bundle->type ||
663 (bundle->actions_mask & (1ULL << action->type)) != 0) {
664 rc = sfc_mae_actions_bundle_submit(bundle, spec);
668 memset(bundle, 0, sizeof(*bundle));
671 bundle->type = bundle_type_new;
676 return rte_flow_error_set(error, rc,
677 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
678 "Failed to request the (group of) action(s)");
682 sfc_mae_rule_parse_action_of_push_vlan(
683 const struct rte_flow_action_of_push_vlan *conf,
684 struct sfc_mae_actions_bundle *bundle)
686 bundle->vlan_push_tpid = conf->ethertype;
690 sfc_mae_rule_parse_action_of_set_vlan_vid(
691 const struct rte_flow_action_of_set_vlan_vid *conf,
692 struct sfc_mae_actions_bundle *bundle)
694 bundle->vlan_push_tci |= (conf->vlan_vid &
695 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
699 sfc_mae_rule_parse_action_of_set_vlan_pcp(
700 const struct rte_flow_action_of_set_vlan_pcp *conf,
701 struct sfc_mae_actions_bundle *bundle)
703 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
704 RTE_LEN2MASK(3, uint8_t)) << 13;
706 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
710 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
711 efx_mae_actions_t *spec)
713 return efx_mae_action_set_populate_mark(spec, conf->id);
717 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
718 const struct rte_flow_action_phy_port *conf,
719 efx_mae_actions_t *spec)
721 efx_mport_sel_t mport;
725 if (conf->original != 0)
726 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
728 phy_port = conf->index;
730 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
734 return efx_mae_action_set_populate_deliver(spec, &mport);
738 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
739 const struct rte_flow_action *action,
740 struct sfc_mae_actions_bundle *bundle,
741 efx_mae_actions_t *spec,
742 struct rte_flow_error *error)
746 switch (action->type) {
747 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
748 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
749 bundle->actions_mask);
750 rc = efx_mae_action_set_populate_vlan_pop(spec);
752 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
753 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
754 bundle->actions_mask);
755 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
757 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
758 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
759 bundle->actions_mask);
760 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
762 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
763 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
764 bundle->actions_mask);
765 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
767 case RTE_FLOW_ACTION_TYPE_FLAG:
768 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
769 bundle->actions_mask);
770 rc = efx_mae_action_set_populate_flag(spec);
772 case RTE_FLOW_ACTION_TYPE_MARK:
773 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
774 bundle->actions_mask);
775 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
777 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
778 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
779 bundle->actions_mask);
780 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
783 return rte_flow_error_set(error, ENOTSUP,
784 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
785 "Unsupported action");
789 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
790 NULL, "Failed to request the action");
792 bundle->actions_mask |= (1ULL << action->type);
799 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
800 const struct rte_flow_action actions[],
801 struct sfc_mae_action_set **action_setp,
802 struct rte_flow_error *error)
804 struct sfc_mae_actions_bundle bundle = {0};
805 const struct rte_flow_action *action;
806 efx_mae_actions_t *spec;
809 if (actions == NULL) {
810 return rte_flow_error_set(error, EINVAL,
811 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
815 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
817 goto fail_action_set_spec_init;
819 for (action = actions;
820 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
821 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
823 goto fail_rule_parse_action;
825 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
828 goto fail_rule_parse_action;
831 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
833 goto fail_rule_parse_action;
835 *action_setp = sfc_mae_action_set_attach(sa, spec);
836 if (*action_setp != NULL) {
837 efx_mae_action_set_spec_fini(sa->nic, spec);
841 rc = sfc_mae_action_set_add(sa, spec, action_setp);
843 goto fail_action_set_add;
848 fail_rule_parse_action:
849 efx_mae_action_set_spec_fini(sa->nic, spec);
851 fail_action_set_spec_init:
853 rc = rte_flow_error_set(error, rc,
854 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
855 NULL, "Failed to process the action");
861 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
862 const efx_mae_match_spec_t *left,
863 const efx_mae_match_spec_t *right)
865 bool have_same_class;
868 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
871 return (rc == 0) ? have_same_class : false;
875 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
876 struct sfc_flow_spec_mae *spec)
878 const struct rte_flow *entry;
880 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
881 const struct sfc_flow_spec *entry_spec = &entry->spec;
882 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
883 const efx_mae_match_spec_t *left = es_mae->match_spec;
884 const efx_mae_match_spec_t *right = spec->match_spec;
886 switch (entry_spec->type) {
887 case SFC_FLOW_SPEC_FILTER:
888 /* Ignore VNIC-level flows */
890 case SFC_FLOW_SPEC_MAE:
891 if (sfc_mae_rules_class_cmp(sa, left, right))
899 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
900 "support for inner frame pattern items is not guaranteed; "
901 "other than that, the items are valid from SW standpoint");
906 * Confirm that a given flow can be accepted by the FW.
909 * Software adapter context
911 * Flow to be verified
913 * Zero on success and non-zero in the case of error.
914 * A special value of EAGAIN indicates that the adapter is
915 * not in started state. This state is compulsory because
916 * it only makes sense to compare the rule class of the flow
917 * being validated with classes of the active rules.
918 * Such classes are wittingly supported by the FW.
921 sfc_mae_flow_verify(struct sfc_adapter *sa,
922 struct rte_flow *flow)
924 struct sfc_flow_spec *spec = &flow->spec;
925 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
927 SFC_ASSERT(sfc_adapter_is_locked(sa));
929 if (sa->state != SFC_ADAPTER_STARTED)
932 return sfc_mae_action_rule_class_verify(sa, spec_mae);
936 sfc_mae_flow_insert(struct sfc_adapter *sa,
937 struct rte_flow *flow)
939 struct sfc_flow_spec *spec = &flow->spec;
940 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
941 struct sfc_mae_action_set *action_set = spec_mae->action_set;
942 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
945 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
946 SFC_ASSERT(action_set != NULL);
948 rc = sfc_mae_action_set_enable(sa, action_set);
950 goto fail_action_set_enable;
952 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
953 NULL, &fw_rsrc->aset_id,
956 goto fail_action_rule_insert;
960 fail_action_rule_insert:
961 (void)sfc_mae_action_set_disable(sa, action_set);
963 fail_action_set_enable:
968 sfc_mae_flow_remove(struct sfc_adapter *sa,
969 struct rte_flow *flow)
971 struct sfc_flow_spec *spec = &flow->spec;
972 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
973 struct sfc_mae_action_set *action_set = spec_mae->action_set;
976 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
977 SFC_ASSERT(action_set != NULL);
979 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
983 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
985 return sfc_mae_action_set_disable(sa, action_set);