1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
20 sfc_mae_attach(struct sfc_adapter *sa)
22 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
23 struct sfc_mae *mae = &sa->mae;
24 efx_mae_limits_t limits;
27 sfc_log_init(sa, "entry");
29 if (!encp->enc_mae_supported) {
30 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
34 sfc_log_init(sa, "init MAE");
35 rc = efx_mae_init(sa->nic);
39 sfc_log_init(sa, "get MAE limits");
40 rc = efx_mae_get_limits(sa->nic, &limits);
42 goto fail_mae_get_limits;
44 mae->status = SFC_MAE_STATUS_SUPPORTED;
45 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
46 TAILQ_INIT(&mae->action_sets);
48 sfc_log_init(sa, "done");
53 efx_mae_fini(sa->nic);
56 sfc_log_init(sa, "failed %d", rc);
62 sfc_mae_detach(struct sfc_adapter *sa)
64 struct sfc_mae *mae = &sa->mae;
65 enum sfc_mae_status status_prev = mae->status;
67 sfc_log_init(sa, "entry");
69 mae->nb_action_rule_prios_max = 0;
70 mae->status = SFC_MAE_STATUS_UNKNOWN;
72 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
75 efx_mae_fini(sa->nic);
77 sfc_log_init(sa, "done");
80 static struct sfc_mae_action_set *
81 sfc_mae_action_set_attach(struct sfc_adapter *sa,
82 const efx_mae_actions_t *spec)
84 struct sfc_mae_action_set *action_set;
85 struct sfc_mae *mae = &sa->mae;
87 SFC_ASSERT(sfc_adapter_is_locked(sa));
89 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
90 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
91 ++(action_set->refcnt);
100 sfc_mae_action_set_add(struct sfc_adapter *sa,
101 efx_mae_actions_t *spec,
102 struct sfc_mae_action_set **action_setp)
104 struct sfc_mae_action_set *action_set;
105 struct sfc_mae *mae = &sa->mae;
107 SFC_ASSERT(sfc_adapter_is_locked(sa));
109 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
110 if (action_set == NULL)
113 action_set->refcnt = 1;
114 action_set->spec = spec;
116 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
118 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
120 *action_setp = action_set;
126 sfc_mae_action_set_del(struct sfc_adapter *sa,
127 struct sfc_mae_action_set *action_set)
129 struct sfc_mae *mae = &sa->mae;
131 SFC_ASSERT(sfc_adapter_is_locked(sa));
132 SFC_ASSERT(action_set->refcnt != 0);
134 --(action_set->refcnt);
136 if (action_set->refcnt != 0)
139 SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
140 SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
142 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
143 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
144 rte_free(action_set);
148 sfc_mae_action_set_enable(struct sfc_adapter *sa,
149 struct sfc_mae_action_set *action_set)
151 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
154 SFC_ASSERT(sfc_adapter_is_locked(sa));
156 if (fw_rsrc->refcnt == 0) {
157 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
158 SFC_ASSERT(action_set->spec != NULL);
160 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
172 sfc_mae_action_set_disable(struct sfc_adapter *sa,
173 struct sfc_mae_action_set *action_set)
175 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
178 SFC_ASSERT(sfc_adapter_is_locked(sa));
179 SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
180 SFC_ASSERT(fw_rsrc->refcnt != 0);
182 if (fw_rsrc->refcnt == 1) {
183 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
187 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
196 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
197 struct rte_flow *flow)
199 struct sfc_flow_spec *spec;
200 struct sfc_flow_spec_mae *spec_mae;
210 spec_mae = &spec->mae;
212 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
214 if (spec_mae->action_set != NULL)
215 sfc_mae_action_set_del(sa, spec_mae->action_set);
217 if (spec_mae->match_spec != NULL)
218 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
222 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
223 struct sfc_flow_parse_ctx *ctx,
224 struct rte_flow_error *error)
226 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
227 const struct rte_flow_item_phy_port supp_mask = {
230 const void *def_mask = &rte_flow_item_phy_port_mask;
231 const struct rte_flow_item_phy_port *spec = NULL;
232 const struct rte_flow_item_phy_port *mask = NULL;
233 efx_mport_sel_t mport_v;
236 if (ctx_mae->match_mport_set) {
237 return rte_flow_error_set(error, ENOTSUP,
238 RTE_FLOW_ERROR_TYPE_ITEM, item,
239 "Can't handle multiple traffic source items");
242 rc = sfc_flow_parse_init(item,
243 (const void **)&spec, (const void **)&mask,
244 (const void *)&supp_mask, def_mask,
245 sizeof(struct rte_flow_item_phy_port), error);
249 if (mask->index != supp_mask.index) {
250 return rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ITEM, item,
252 "Bad mask in the PHY_PORT pattern item");
255 /* If "spec" is not set, could be any physical port */
259 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
261 return rte_flow_error_set(error, rc,
262 RTE_FLOW_ERROR_TYPE_ITEM, item,
263 "Failed to convert the PHY_PORT index");
266 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
269 return rte_flow_error_set(error, rc,
270 RTE_FLOW_ERROR_TYPE_ITEM, item,
271 "Failed to set MPORT for the PHY_PORT");
274 ctx_mae->match_mport_set = B_TRUE;
279 struct sfc_mae_field_locator {
280 efx_mae_field_id_t field_id;
282 /* Field offset in the corresponding rte_flow_item_ struct */
287 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
288 unsigned int nb_field_locators, void *mask_ptr,
293 memset(mask_ptr, 0, mask_size);
295 for (i = 0; i < nb_field_locators; ++i) {
296 const struct sfc_mae_field_locator *fl = &field_locators[i];
298 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
299 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
304 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
305 unsigned int nb_field_locators, const uint8_t *spec,
306 const uint8_t *mask, efx_mae_match_spec_t *efx_spec,
307 struct rte_flow_error *error)
312 for (i = 0; i < nb_field_locators; ++i) {
313 const struct sfc_mae_field_locator *fl = &field_locators[i];
315 rc = efx_mae_match_spec_field_set(efx_spec, fl->field_id,
316 fl->size, spec + fl->ofst,
317 fl->size, mask + fl->ofst);
323 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
324 NULL, "Failed to process item fields");
330 static const struct sfc_mae_field_locator flocs_eth[] = {
332 EFX_MAE_FIELD_ETHER_TYPE_BE,
333 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
334 offsetof(struct rte_flow_item_eth, type),
337 EFX_MAE_FIELD_ETH_DADDR_BE,
338 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
339 offsetof(struct rte_flow_item_eth, dst),
342 EFX_MAE_FIELD_ETH_SADDR_BE,
343 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
344 offsetof(struct rte_flow_item_eth, src),
349 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
350 struct sfc_flow_parse_ctx *ctx,
351 struct rte_flow_error *error)
353 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
354 struct rte_flow_item_eth supp_mask;
355 const uint8_t *spec = NULL;
356 const uint8_t *mask = NULL;
359 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
360 &supp_mask, sizeof(supp_mask));
362 rc = sfc_flow_parse_init(item,
363 (const void **)&spec, (const void **)&mask,
364 (const void *)&supp_mask,
365 &rte_flow_item_eth_mask,
366 sizeof(struct rte_flow_item_eth), error);
370 /* If "spec" is not set, could be any Ethernet */
374 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
375 ctx_mae->match_spec_action, error);
378 static const struct sfc_flow_item sfc_flow_items[] = {
380 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
382 * In terms of RTE flow, this item is a META one,
383 * and its position in the pattern is don't care.
385 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
386 .layer = SFC_FLOW_ITEM_ANY_LAYER,
387 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
388 .parse = sfc_mae_rule_parse_item_phy_port,
391 .type = RTE_FLOW_ITEM_TYPE_ETH,
392 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
393 .layer = SFC_FLOW_ITEM_L2,
394 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
395 .parse = sfc_mae_rule_parse_item_eth,
400 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
401 const struct rte_flow_item pattern[],
402 struct sfc_flow_spec_mae *spec,
403 struct rte_flow_error *error)
405 struct sfc_mae_parse_ctx ctx_mae;
406 struct sfc_flow_parse_ctx ctx;
409 memset(&ctx_mae, 0, sizeof(ctx_mae));
411 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
413 &ctx_mae.match_spec_action);
415 rc = rte_flow_error_set(error, rc,
416 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
417 "Failed to initialise action rule match specification");
418 goto fail_init_match_spec_action;
421 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
424 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
425 pattern, &ctx, error);
427 goto fail_parse_pattern;
429 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
430 rc = rte_flow_error_set(error, ENOTSUP,
431 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
432 "Inconsistent pattern");
433 goto fail_validate_match_spec_action;
436 spec->match_spec = ctx_mae.match_spec_action;
440 fail_validate_match_spec_action:
442 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
444 fail_init_match_spec_action:
449 * An action supported by MAE may correspond to a bundle of RTE flow actions,
450 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
451 * That is, related RTE flow actions need to be tracked as parts of a whole
452 * so that they can be combined into a single action and submitted to MAE
453 * representation of a given rule's action set.
455 * Each RTE flow action provided by an application gets classified as
456 * one belonging to some bundle type. If an action is not supposed to
457 * belong to any bundle, or if this action is END, it is described as
458 * one belonging to a dummy bundle of type EMPTY.
460 * A currently tracked bundle will be submitted if a repeating
461 * action or an action of different bundle type follows.
464 enum sfc_mae_actions_bundle_type {
465 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
468 struct sfc_mae_actions_bundle {
469 enum sfc_mae_actions_bundle_type type;
471 /* Indicates actions already tracked by the current bundle */
472 uint64_t actions_mask;
476 * Combine configuration of RTE flow actions tracked by the bundle into a
477 * single action and submit the result to MAE action set specification.
478 * Do nothing in the case of dummy action bundle.
481 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
482 __rte_unused efx_mae_actions_t *spec)
486 switch (bundle->type) {
487 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
498 * Given the type of the next RTE flow action in the line, decide
499 * whether a new bundle is about to start, and, if this is the case,
500 * submit and reset the current bundle.
503 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
504 struct sfc_mae_actions_bundle *bundle,
505 efx_mae_actions_t *spec,
506 struct rte_flow_error *error)
508 enum sfc_mae_actions_bundle_type bundle_type_new;
511 switch (action->type) {
514 * Self-sufficient actions, including END, are handled in this
515 * case. No checks for unsupported actions are needed here
516 * because parsing doesn't occur at this point.
518 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
522 if (bundle_type_new != bundle->type ||
523 (bundle->actions_mask & (1ULL << action->type)) != 0) {
524 rc = sfc_mae_actions_bundle_submit(bundle, spec);
528 memset(bundle, 0, sizeof(*bundle));
531 bundle->type = bundle_type_new;
536 return rte_flow_error_set(error, rc,
537 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
538 "Failed to request the (group of) action(s)");
542 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
543 const struct rte_flow_action_phy_port *conf,
544 efx_mae_actions_t *spec)
546 efx_mport_sel_t mport;
550 if (conf->original != 0)
551 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
553 phy_port = conf->index;
555 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
559 return efx_mae_action_set_populate_deliver(spec, &mport);
563 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
564 const struct rte_flow_action *action,
565 struct sfc_mae_actions_bundle *bundle,
566 efx_mae_actions_t *spec,
567 struct rte_flow_error *error)
571 switch (action->type) {
572 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
573 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
574 bundle->actions_mask);
575 rc = efx_mae_action_set_populate_vlan_pop(spec);
577 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
578 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
579 bundle->actions_mask);
580 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
583 return rte_flow_error_set(error, ENOTSUP,
584 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
585 "Unsupported action");
589 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
590 NULL, "Failed to request the action");
592 bundle->actions_mask |= (1ULL << action->type);
599 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
600 const struct rte_flow_action actions[],
601 struct sfc_mae_action_set **action_setp,
602 struct rte_flow_error *error)
604 struct sfc_mae_actions_bundle bundle = {0};
605 const struct rte_flow_action *action;
606 efx_mae_actions_t *spec;
609 if (actions == NULL) {
610 return rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
615 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
617 goto fail_action_set_spec_init;
619 for (action = actions;
620 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
621 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
623 goto fail_rule_parse_action;
625 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
628 goto fail_rule_parse_action;
631 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
633 goto fail_rule_parse_action;
635 *action_setp = sfc_mae_action_set_attach(sa, spec);
636 if (*action_setp != NULL) {
637 efx_mae_action_set_spec_fini(sa->nic, spec);
641 rc = sfc_mae_action_set_add(sa, spec, action_setp);
643 goto fail_action_set_add;
648 fail_rule_parse_action:
649 efx_mae_action_set_spec_fini(sa->nic, spec);
651 fail_action_set_spec_init:
653 rc = rte_flow_error_set(error, rc,
654 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
655 NULL, "Failed to process the action");
661 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
662 const efx_mae_match_spec_t *left,
663 const efx_mae_match_spec_t *right)
665 bool have_same_class;
668 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
671 return (rc == 0) ? have_same_class : false;
675 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
676 struct sfc_flow_spec_mae *spec)
678 const struct rte_flow *entry;
680 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
681 const struct sfc_flow_spec *entry_spec = &entry->spec;
682 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
683 const efx_mae_match_spec_t *left = es_mae->match_spec;
684 const efx_mae_match_spec_t *right = spec->match_spec;
686 switch (entry_spec->type) {
687 case SFC_FLOW_SPEC_FILTER:
688 /* Ignore VNIC-level flows */
690 case SFC_FLOW_SPEC_MAE:
691 if (sfc_mae_rules_class_cmp(sa, left, right))
699 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
700 "support for inner frame pattern items is not guaranteed; "
701 "other than that, the items are valid from SW standpoint");
706 * Confirm that a given flow can be accepted by the FW.
709 * Software adapter context
711 * Flow to be verified
713 * Zero on success and non-zero in the case of error.
714 * A special value of EAGAIN indicates that the adapter is
715 * not in started state. This state is compulsory because
716 * it only makes sense to compare the rule class of the flow
717 * being validated with classes of the active rules.
718 * Such classes are wittingly supported by the FW.
721 sfc_mae_flow_verify(struct sfc_adapter *sa,
722 struct rte_flow *flow)
724 struct sfc_flow_spec *spec = &flow->spec;
725 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
727 SFC_ASSERT(sfc_adapter_is_locked(sa));
729 if (sa->state != SFC_ADAPTER_STARTED)
732 return sfc_mae_action_rule_class_verify(sa, spec_mae);
736 sfc_mae_flow_insert(struct sfc_adapter *sa,
737 struct rte_flow *flow)
739 struct sfc_flow_spec *spec = &flow->spec;
740 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
741 struct sfc_mae_action_set *action_set = spec_mae->action_set;
742 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
745 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
746 SFC_ASSERT(action_set != NULL);
748 rc = sfc_mae_action_set_enable(sa, action_set);
750 goto fail_action_set_enable;
752 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
753 NULL, &fw_rsrc->aset_id,
756 goto fail_action_rule_insert;
760 fail_action_rule_insert:
761 (void)sfc_mae_action_set_disable(sa, action_set);
763 fail_action_set_enable:
768 sfc_mae_flow_remove(struct sfc_adapter *sa,
769 struct rte_flow *flow)
771 struct sfc_flow_spec *spec = &flow->spec;
772 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
773 struct sfc_mae_action_set *action_set = spec_mae->action_set;
776 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
777 SFC_ASSERT(action_set != NULL);
779 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
783 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
785 return sfc_mae_action_set_disable(sa, action_set);