1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
18 #include "sfc_switch.h"
21 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
22 efx_mport_sel_t *mportp)
24 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
26 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
31 sfc_mae_attach(struct sfc_adapter *sa)
33 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
34 struct sfc_mae_switch_port_request switch_port_request = {0};
35 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
36 efx_mport_sel_t entity_mport;
37 struct sfc_mae *mae = &sa->mae;
38 efx_mae_limits_t limits;
41 sfc_log_init(sa, "entry");
43 if (!encp->enc_mae_supported) {
44 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
48 sfc_log_init(sa, "init MAE");
49 rc = efx_mae_init(sa->nic);
53 sfc_log_init(sa, "get MAE limits");
54 rc = efx_mae_get_limits(sa->nic, &limits);
56 goto fail_mae_get_limits;
58 sfc_log_init(sa, "assign entity MPORT");
59 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
61 goto fail_mae_assign_entity_mport;
63 sfc_log_init(sa, "assign RTE switch domain");
64 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
66 goto fail_mae_assign_switch_domain;
68 sfc_log_init(sa, "assign RTE switch port");
69 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
70 switch_port_request.entity_mportp = &entity_mport;
72 * As of now, the driver does not support representors, so
73 * RTE ethdev MPORT simply matches that of the entity.
75 switch_port_request.ethdev_mportp = &entity_mport;
76 switch_port_request.ethdev_port_id = sas->port_id;
77 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
79 &mae->switch_port_id);
81 goto fail_mae_assign_switch_port;
83 mae->status = SFC_MAE_STATUS_SUPPORTED;
84 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
85 TAILQ_INIT(&mae->action_sets);
87 sfc_log_init(sa, "done");
91 fail_mae_assign_switch_port:
92 fail_mae_assign_switch_domain:
93 fail_mae_assign_entity_mport:
95 efx_mae_fini(sa->nic);
98 sfc_log_init(sa, "failed %d", rc);
104 sfc_mae_detach(struct sfc_adapter *sa)
106 struct sfc_mae *mae = &sa->mae;
107 enum sfc_mae_status status_prev = mae->status;
109 sfc_log_init(sa, "entry");
111 mae->nb_action_rule_prios_max = 0;
112 mae->status = SFC_MAE_STATUS_UNKNOWN;
114 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
117 efx_mae_fini(sa->nic);
119 sfc_log_init(sa, "done");
122 static struct sfc_mae_action_set *
123 sfc_mae_action_set_attach(struct sfc_adapter *sa,
124 const efx_mae_actions_t *spec)
126 struct sfc_mae_action_set *action_set;
127 struct sfc_mae *mae = &sa->mae;
129 SFC_ASSERT(sfc_adapter_is_locked(sa));
131 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
132 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
133 ++(action_set->refcnt);
142 sfc_mae_action_set_add(struct sfc_adapter *sa,
143 efx_mae_actions_t *spec,
144 struct sfc_mae_action_set **action_setp)
146 struct sfc_mae_action_set *action_set;
147 struct sfc_mae *mae = &sa->mae;
149 SFC_ASSERT(sfc_adapter_is_locked(sa));
151 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
152 if (action_set == NULL)
155 action_set->refcnt = 1;
156 action_set->spec = spec;
158 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
160 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
162 *action_setp = action_set;
168 sfc_mae_action_set_del(struct sfc_adapter *sa,
169 struct sfc_mae_action_set *action_set)
171 struct sfc_mae *mae = &sa->mae;
173 SFC_ASSERT(sfc_adapter_is_locked(sa));
174 SFC_ASSERT(action_set->refcnt != 0);
176 --(action_set->refcnt);
178 if (action_set->refcnt != 0)
181 SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
182 SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
184 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
185 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
186 rte_free(action_set);
190 sfc_mae_action_set_enable(struct sfc_adapter *sa,
191 struct sfc_mae_action_set *action_set)
193 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
196 SFC_ASSERT(sfc_adapter_is_locked(sa));
198 if (fw_rsrc->refcnt == 0) {
199 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
200 SFC_ASSERT(action_set->spec != NULL);
202 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
214 sfc_mae_action_set_disable(struct sfc_adapter *sa,
215 struct sfc_mae_action_set *action_set)
217 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
220 SFC_ASSERT(sfc_adapter_is_locked(sa));
221 SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
222 SFC_ASSERT(fw_rsrc->refcnt != 0);
224 if (fw_rsrc->refcnt == 1) {
225 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
229 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
238 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
239 struct rte_flow *flow)
241 struct sfc_flow_spec *spec;
242 struct sfc_flow_spec_mae *spec_mae;
252 spec_mae = &spec->mae;
254 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
256 if (spec_mae->action_set != NULL)
257 sfc_mae_action_set_del(sa, spec_mae->action_set);
259 if (spec_mae->match_spec != NULL)
260 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
264 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
266 efx_mae_match_spec_t *efx_spec = ctx->match_spec_action;
267 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
268 const efx_mae_field_id_t field_ids[] = {
269 EFX_MAE_FIELD_VLAN0_PROTO_BE,
270 EFX_MAE_FIELD_VLAN1_PROTO_BE,
272 const struct sfc_mae_ethertype *et;
277 * In accordance with RTE flow API convention, the innermost L2
278 * item's "type" ("inner_type") is a L3 EtherType. If there is
279 * no L3 item, it's 0x0000/0x0000.
281 et = &pdata->ethertypes[pdata->nb_vlan_tags];
282 rc = efx_mae_match_spec_field_set(efx_spec, EFX_MAE_FIELD_ETHER_TYPE_BE,
284 (const uint8_t *)&et->value,
286 (const uint8_t *)&et->mask);
291 * sfc_mae_rule_parse_item_vlan() has already made sure
292 * that pdata->nb_vlan_tags does not exceed this figure.
294 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
296 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
297 et = &pdata->ethertypes[i];
299 rc = efx_mae_match_spec_field_set(efx_spec, field_ids[i],
301 (const uint8_t *)&et->value,
303 (const uint8_t *)&et->mask);
312 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
313 struct rte_flow_error *error)
315 efx_mae_match_spec_t *efx_spec = ctx->match_spec_action;
316 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
317 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
318 const rte_be16_t supported_tpids[] = {
319 /* VLAN standard TPID (always the first element) */
320 RTE_BE16(RTE_ETHER_TYPE_VLAN),
322 /* Double-tagging TPIDs */
323 RTE_BE16(RTE_ETHER_TYPE_QINQ),
324 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
325 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
326 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
328 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
329 unsigned int ethertype_idx;
330 const uint8_t *valuep;
331 const uint8_t *maskp;
334 if (pdata->innermost_ethertype_restriction.mask != 0 &&
335 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
337 * If a single item VLAN is followed by a L3 item, value
338 * of "type" in item ETH can't be a double-tagging TPID.
340 nb_supported_tpids = 1;
344 * sfc_mae_rule_parse_item_vlan() has already made sure
345 * that pdata->nb_vlan_tags does not exceed this figure.
347 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
349 for (ethertype_idx = 0;
350 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
351 unsigned int tpid_idx;
353 /* Exact match is supported only. */
354 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
359 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
360 tpid_idx < nb_supported_tpids; ++tpid_idx) {
361 if (ethertypes[ethertype_idx].value ==
362 supported_tpids[tpid_idx])
366 if (tpid_idx == nb_supported_tpids) {
371 nb_supported_tpids = 1;
374 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
375 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
378 et->mask = RTE_BE16(0xffff);
380 pdata->innermost_ethertype_restriction.value;
381 } else if (et->mask != RTE_BE16(0xffff) ||
383 pdata->innermost_ethertype_restriction.value) {
390 * Now, when the number of VLAN tags is known, set fields
391 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
392 * one is either a valid L3 EtherType (or 0x0000/0x0000),
393 * and the last two are valid TPIDs (or 0x0000/0x0000).
395 rc = sfc_mae_set_ethertypes(ctx);
399 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
400 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
401 rc = efx_mae_match_spec_field_set(efx_spec, EFX_MAE_FIELD_IP_PROTO,
402 sizeof(pdata->l3_next_proto_value),
404 sizeof(pdata->l3_next_proto_mask),
412 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
413 "Failed to process pattern data");
417 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
418 struct sfc_flow_parse_ctx *ctx,
419 struct rte_flow_error *error)
421 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
422 const struct rte_flow_item_port_id supp_mask = {
425 const void *def_mask = &rte_flow_item_port_id_mask;
426 const struct rte_flow_item_port_id *spec = NULL;
427 const struct rte_flow_item_port_id *mask = NULL;
428 efx_mport_sel_t mport_sel;
431 if (ctx_mae->match_mport_set) {
432 return rte_flow_error_set(error, ENOTSUP,
433 RTE_FLOW_ERROR_TYPE_ITEM, item,
434 "Can't handle multiple traffic source items");
437 rc = sfc_flow_parse_init(item,
438 (const void **)&spec, (const void **)&mask,
439 (const void *)&supp_mask, def_mask,
440 sizeof(struct rte_flow_item_port_id), error);
444 if (mask->id != supp_mask.id) {
445 return rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ITEM, item,
447 "Bad mask in the PORT_ID pattern item");
450 /* If "spec" is not set, could be any port ID */
454 if (spec->id > UINT16_MAX) {
455 return rte_flow_error_set(error, EOVERFLOW,
456 RTE_FLOW_ERROR_TYPE_ITEM, item,
457 "The port ID is too large");
460 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
461 spec->id, &mport_sel);
463 return rte_flow_error_set(error, rc,
464 RTE_FLOW_ERROR_TYPE_ITEM, item,
465 "Can't find RTE ethdev by the port ID");
468 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
471 return rte_flow_error_set(error, rc,
472 RTE_FLOW_ERROR_TYPE_ITEM, item,
473 "Failed to set MPORT for the port ID");
476 ctx_mae->match_mport_set = B_TRUE;
482 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
483 struct sfc_flow_parse_ctx *ctx,
484 struct rte_flow_error *error)
486 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
487 const struct rte_flow_item_phy_port supp_mask = {
490 const void *def_mask = &rte_flow_item_phy_port_mask;
491 const struct rte_flow_item_phy_port *spec = NULL;
492 const struct rte_flow_item_phy_port *mask = NULL;
493 efx_mport_sel_t mport_v;
496 if (ctx_mae->match_mport_set) {
497 return rte_flow_error_set(error, ENOTSUP,
498 RTE_FLOW_ERROR_TYPE_ITEM, item,
499 "Can't handle multiple traffic source items");
502 rc = sfc_flow_parse_init(item,
503 (const void **)&spec, (const void **)&mask,
504 (const void *)&supp_mask, def_mask,
505 sizeof(struct rte_flow_item_phy_port), error);
509 if (mask->index != supp_mask.index) {
510 return rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ITEM, item,
512 "Bad mask in the PHY_PORT pattern item");
515 /* If "spec" is not set, could be any physical port */
519 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
521 return rte_flow_error_set(error, rc,
522 RTE_FLOW_ERROR_TYPE_ITEM, item,
523 "Failed to convert the PHY_PORT index");
526 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
529 return rte_flow_error_set(error, rc,
530 RTE_FLOW_ERROR_TYPE_ITEM, item,
531 "Failed to set MPORT for the PHY_PORT");
534 ctx_mae->match_mport_set = B_TRUE;
540 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
541 struct sfc_flow_parse_ctx *ctx,
542 struct rte_flow_error *error)
544 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
545 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
546 efx_mport_sel_t mport_v;
549 if (ctx_mae->match_mport_set) {
550 return rte_flow_error_set(error, ENOTSUP,
551 RTE_FLOW_ERROR_TYPE_ITEM, item,
552 "Can't handle multiple traffic source items");
555 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
558 return rte_flow_error_set(error, rc,
559 RTE_FLOW_ERROR_TYPE_ITEM, item,
560 "Failed to convert the PF ID");
563 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
566 return rte_flow_error_set(error, rc,
567 RTE_FLOW_ERROR_TYPE_ITEM, item,
568 "Failed to set MPORT for the PF");
571 ctx_mae->match_mport_set = B_TRUE;
577 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
578 struct sfc_flow_parse_ctx *ctx,
579 struct rte_flow_error *error)
581 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
582 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
583 const struct rte_flow_item_vf supp_mask = {
586 const void *def_mask = &rte_flow_item_vf_mask;
587 const struct rte_flow_item_vf *spec = NULL;
588 const struct rte_flow_item_vf *mask = NULL;
589 efx_mport_sel_t mport_v;
592 if (ctx_mae->match_mport_set) {
593 return rte_flow_error_set(error, ENOTSUP,
594 RTE_FLOW_ERROR_TYPE_ITEM, item,
595 "Can't handle multiple traffic source items");
598 rc = sfc_flow_parse_init(item,
599 (const void **)&spec, (const void **)&mask,
600 (const void *)&supp_mask, def_mask,
601 sizeof(struct rte_flow_item_vf), error);
605 if (mask->id != supp_mask.id) {
606 return rte_flow_error_set(error, EINVAL,
607 RTE_FLOW_ERROR_TYPE_ITEM, item,
608 "Bad mask in the VF pattern item");
612 * If "spec" is not set, the item requests any VF related to the
613 * PF of the current DPDK port (but not the PF itself).
614 * Reject this match criterion as unsupported.
617 return rte_flow_error_set(error, EINVAL,
618 RTE_FLOW_ERROR_TYPE_ITEM, item,
619 "Bad spec in the VF pattern item");
622 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
624 return rte_flow_error_set(error, rc,
625 RTE_FLOW_ERROR_TYPE_ITEM, item,
626 "Failed to convert the PF + VF IDs");
629 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
632 return rte_flow_error_set(error, rc,
633 RTE_FLOW_ERROR_TYPE_ITEM, item,
634 "Failed to set MPORT for the PF + VF");
637 ctx_mae->match_mport_set = B_TRUE;
643 * Having this field ID in a field locator means that this
644 * locator cannot be used to actually set the field at the
645 * time when the corresponding item gets encountered. Such
646 * fields get stashed in the parsing context instead. This
647 * is required to resolve dependencies between the stashed
648 * fields. See sfc_mae_rule_process_pattern_data().
650 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
652 struct sfc_mae_field_locator {
653 efx_mae_field_id_t field_id;
655 /* Field offset in the corresponding rte_flow_item_ struct */
660 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
661 unsigned int nb_field_locators, void *mask_ptr,
666 memset(mask_ptr, 0, mask_size);
668 for (i = 0; i < nb_field_locators; ++i) {
669 const struct sfc_mae_field_locator *fl = &field_locators[i];
671 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
672 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
677 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
678 unsigned int nb_field_locators, const uint8_t *spec,
679 const uint8_t *mask, efx_mae_match_spec_t *efx_spec,
680 struct rte_flow_error *error)
685 for (i = 0; i < nb_field_locators; ++i) {
686 const struct sfc_mae_field_locator *fl = &field_locators[i];
688 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
691 rc = efx_mae_match_spec_field_set(efx_spec, fl->field_id,
692 fl->size, spec + fl->ofst,
693 fl->size, mask + fl->ofst);
699 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
700 NULL, "Failed to process item fields");
706 static const struct sfc_mae_field_locator flocs_eth[] = {
709 * This locator is used only for building supported fields mask.
710 * The field is handled by sfc_mae_rule_process_pattern_data().
712 SFC_MAE_FIELD_HANDLING_DEFERRED,
713 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
714 offsetof(struct rte_flow_item_eth, type),
717 EFX_MAE_FIELD_ETH_DADDR_BE,
718 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
719 offsetof(struct rte_flow_item_eth, dst),
722 EFX_MAE_FIELD_ETH_SADDR_BE,
723 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
724 offsetof(struct rte_flow_item_eth, src),
729 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
730 struct sfc_flow_parse_ctx *ctx,
731 struct rte_flow_error *error)
733 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
734 struct rte_flow_item_eth supp_mask;
735 const uint8_t *spec = NULL;
736 const uint8_t *mask = NULL;
739 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
740 &supp_mask, sizeof(supp_mask));
742 rc = sfc_flow_parse_init(item,
743 (const void **)&spec, (const void **)&mask,
744 (const void *)&supp_mask,
745 &rte_flow_item_eth_mask,
746 sizeof(struct rte_flow_item_eth), error);
751 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
752 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
753 const struct rte_flow_item_eth *item_spec;
754 const struct rte_flow_item_eth *item_mask;
756 item_spec = (const struct rte_flow_item_eth *)spec;
757 item_mask = (const struct rte_flow_item_eth *)mask;
759 ethertypes[0].value = item_spec->type;
760 ethertypes[0].mask = item_mask->type;
763 * The specification is empty. This is wrong in the case
764 * when there are more network patterns in line. Other
765 * than that, any Ethernet can match. All of that is
766 * checked at the end of parsing.
771 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
772 ctx_mae->match_spec_action, error);
775 static const struct sfc_mae_field_locator flocs_vlan[] = {
778 EFX_MAE_FIELD_VLAN0_TCI_BE,
779 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
780 offsetof(struct rte_flow_item_vlan, tci),
784 * This locator is used only for building supported fields mask.
785 * The field is handled by sfc_mae_rule_process_pattern_data().
787 SFC_MAE_FIELD_HANDLING_DEFERRED,
788 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
789 offsetof(struct rte_flow_item_vlan, inner_type),
794 EFX_MAE_FIELD_VLAN1_TCI_BE,
795 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
796 offsetof(struct rte_flow_item_vlan, tci),
800 * This locator is used only for building supported fields mask.
801 * The field is handled by sfc_mae_rule_process_pattern_data().
803 SFC_MAE_FIELD_HANDLING_DEFERRED,
804 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
805 offsetof(struct rte_flow_item_vlan, inner_type),
810 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
811 struct sfc_flow_parse_ctx *ctx,
812 struct rte_flow_error *error)
814 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
815 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
816 const struct sfc_mae_field_locator *flocs;
817 struct rte_flow_item_vlan supp_mask;
818 const uint8_t *spec = NULL;
819 const uint8_t *mask = NULL;
820 unsigned int nb_flocs;
823 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
825 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
826 return rte_flow_error_set(error, ENOTSUP,
827 RTE_FLOW_ERROR_TYPE_ITEM, item,
828 "Can't match that many VLAN tags");
831 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
832 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
834 /* If parsing fails, this can remain incremented. */
835 ++pdata->nb_vlan_tags;
837 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
838 &supp_mask, sizeof(supp_mask));
840 rc = sfc_flow_parse_init(item,
841 (const void **)&spec, (const void **)&mask,
842 (const void *)&supp_mask,
843 &rte_flow_item_vlan_mask,
844 sizeof(struct rte_flow_item_vlan), error);
849 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
850 const struct rte_flow_item_vlan *item_spec;
851 const struct rte_flow_item_vlan *item_mask;
853 item_spec = (const struct rte_flow_item_vlan *)spec;
854 item_mask = (const struct rte_flow_item_vlan *)mask;
856 ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
857 ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
860 * The specification is empty. This is wrong in the case
861 * when there are more network patterns in line. Other
862 * than that, any Ethernet can match. All of that is
863 * checked at the end of parsing.
868 return sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
869 ctx_mae->match_spec_action, error);
872 static const struct sfc_mae_field_locator flocs_ipv4[] = {
874 EFX_MAE_FIELD_SRC_IP4_BE,
875 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
876 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
879 EFX_MAE_FIELD_DST_IP4_BE,
880 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
881 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
885 * This locator is used only for building supported fields mask.
886 * The field is handled by sfc_mae_rule_process_pattern_data().
888 SFC_MAE_FIELD_HANDLING_DEFERRED,
889 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
890 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
893 EFX_MAE_FIELD_IP_TOS,
894 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
895 hdr.type_of_service),
896 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
899 EFX_MAE_FIELD_IP_TTL,
900 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
901 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
906 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
907 struct sfc_flow_parse_ctx *ctx,
908 struct rte_flow_error *error)
910 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
911 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
912 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
913 struct rte_flow_item_ipv4 supp_mask;
914 const uint8_t *spec = NULL;
915 const uint8_t *mask = NULL;
918 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
919 &supp_mask, sizeof(supp_mask));
921 rc = sfc_flow_parse_init(item,
922 (const void **)&spec, (const void **)&mask,
923 (const void *)&supp_mask,
924 &rte_flow_item_ipv4_mask,
925 sizeof(struct rte_flow_item_ipv4), error);
929 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
930 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
933 const struct rte_flow_item_ipv4 *item_spec;
934 const struct rte_flow_item_ipv4 *item_mask;
936 item_spec = (const struct rte_flow_item_ipv4 *)spec;
937 item_mask = (const struct rte_flow_item_ipv4 *)mask;
939 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
940 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
945 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
946 ctx_mae->match_spec_action, error);
949 static const struct sfc_flow_item sfc_flow_items[] = {
951 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
953 * In terms of RTE flow, this item is a META one,
954 * and its position in the pattern is don't care.
956 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
957 .layer = SFC_FLOW_ITEM_ANY_LAYER,
958 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
959 .parse = sfc_mae_rule_parse_item_port_id,
962 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
964 * In terms of RTE flow, this item is a META one,
965 * and its position in the pattern is don't care.
967 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
968 .layer = SFC_FLOW_ITEM_ANY_LAYER,
969 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
970 .parse = sfc_mae_rule_parse_item_phy_port,
973 .type = RTE_FLOW_ITEM_TYPE_PF,
975 * In terms of RTE flow, this item is a META one,
976 * and its position in the pattern is don't care.
978 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
979 .layer = SFC_FLOW_ITEM_ANY_LAYER,
980 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
981 .parse = sfc_mae_rule_parse_item_pf,
984 .type = RTE_FLOW_ITEM_TYPE_VF,
986 * In terms of RTE flow, this item is a META one,
987 * and its position in the pattern is don't care.
989 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
990 .layer = SFC_FLOW_ITEM_ANY_LAYER,
991 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
992 .parse = sfc_mae_rule_parse_item_vf,
995 .type = RTE_FLOW_ITEM_TYPE_ETH,
996 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
997 .layer = SFC_FLOW_ITEM_L2,
998 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
999 .parse = sfc_mae_rule_parse_item_eth,
1002 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1003 .prev_layer = SFC_FLOW_ITEM_L2,
1004 .layer = SFC_FLOW_ITEM_L2,
1005 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1006 .parse = sfc_mae_rule_parse_item_vlan,
1009 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1010 .prev_layer = SFC_FLOW_ITEM_L2,
1011 .layer = SFC_FLOW_ITEM_L3,
1012 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1013 .parse = sfc_mae_rule_parse_item_ipv4,
1018 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1019 const struct rte_flow_item pattern[],
1020 struct sfc_flow_spec_mae *spec,
1021 struct rte_flow_error *error)
1023 struct sfc_mae_parse_ctx ctx_mae;
1024 struct sfc_flow_parse_ctx ctx;
1027 memset(&ctx_mae, 0, sizeof(ctx_mae));
1030 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
1032 &ctx_mae.match_spec_action);
1034 rc = rte_flow_error_set(error, rc,
1035 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1036 "Failed to initialise action rule match specification");
1037 goto fail_init_match_spec_action;
1040 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
1043 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
1044 pattern, &ctx, error);
1046 goto fail_parse_pattern;
1048 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
1050 goto fail_process_pattern_data;
1052 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
1053 rc = rte_flow_error_set(error, ENOTSUP,
1054 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1055 "Inconsistent pattern");
1056 goto fail_validate_match_spec_action;
1059 spec->match_spec = ctx_mae.match_spec_action;
1063 fail_validate_match_spec_action:
1064 fail_process_pattern_data:
1066 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
1068 fail_init_match_spec_action:
1073 * An action supported by MAE may correspond to a bundle of RTE flow actions,
1074 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
1075 * That is, related RTE flow actions need to be tracked as parts of a whole
1076 * so that they can be combined into a single action and submitted to MAE
1077 * representation of a given rule's action set.
1079 * Each RTE flow action provided by an application gets classified as
1080 * one belonging to some bundle type. If an action is not supposed to
1081 * belong to any bundle, or if this action is END, it is described as
1082 * one belonging to a dummy bundle of type EMPTY.
1084 * A currently tracked bundle will be submitted if a repeating
1085 * action or an action of different bundle type follows.
1088 enum sfc_mae_actions_bundle_type {
1089 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
1090 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
1093 struct sfc_mae_actions_bundle {
1094 enum sfc_mae_actions_bundle_type type;
1096 /* Indicates actions already tracked by the current bundle */
1097 uint64_t actions_mask;
1099 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
1100 rte_be16_t vlan_push_tpid;
1101 rte_be16_t vlan_push_tci;
1105 * Combine configuration of RTE flow actions tracked by the bundle into a
1106 * single action and submit the result to MAE action set specification.
1107 * Do nothing in the case of dummy action bundle.
1110 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
1111 efx_mae_actions_t *spec)
1115 switch (bundle->type) {
1116 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
1118 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
1119 rc = efx_mae_action_set_populate_vlan_push(
1120 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
1123 SFC_ASSERT(B_FALSE);
1131 * Given the type of the next RTE flow action in the line, decide
1132 * whether a new bundle is about to start, and, if this is the case,
1133 * submit and reset the current bundle.
1136 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
1137 struct sfc_mae_actions_bundle *bundle,
1138 efx_mae_actions_t *spec,
1139 struct rte_flow_error *error)
1141 enum sfc_mae_actions_bundle_type bundle_type_new;
1144 switch (action->type) {
1145 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1146 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1147 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1148 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
1152 * Self-sufficient actions, including END, are handled in this
1153 * case. No checks for unsupported actions are needed here
1154 * because parsing doesn't occur at this point.
1156 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
1160 if (bundle_type_new != bundle->type ||
1161 (bundle->actions_mask & (1ULL << action->type)) != 0) {
1162 rc = sfc_mae_actions_bundle_submit(bundle, spec);
1166 memset(bundle, 0, sizeof(*bundle));
1169 bundle->type = bundle_type_new;
1174 return rte_flow_error_set(error, rc,
1175 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1176 "Failed to request the (group of) action(s)");
1180 sfc_mae_rule_parse_action_of_push_vlan(
1181 const struct rte_flow_action_of_push_vlan *conf,
1182 struct sfc_mae_actions_bundle *bundle)
1184 bundle->vlan_push_tpid = conf->ethertype;
1188 sfc_mae_rule_parse_action_of_set_vlan_vid(
1189 const struct rte_flow_action_of_set_vlan_vid *conf,
1190 struct sfc_mae_actions_bundle *bundle)
1192 bundle->vlan_push_tci |= (conf->vlan_vid &
1193 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
1197 sfc_mae_rule_parse_action_of_set_vlan_pcp(
1198 const struct rte_flow_action_of_set_vlan_pcp *conf,
1199 struct sfc_mae_actions_bundle *bundle)
1201 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
1202 RTE_LEN2MASK(3, uint8_t)) << 13;
1204 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
1208 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
1209 efx_mae_actions_t *spec)
1211 return efx_mae_action_set_populate_mark(spec, conf->id);
1215 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
1216 const struct rte_flow_action_phy_port *conf,
1217 efx_mae_actions_t *spec)
1219 efx_mport_sel_t mport;
1223 if (conf->original != 0)
1224 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
1226 phy_port = conf->index;
1228 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
1232 return efx_mae_action_set_populate_deliver(spec, &mport);
1236 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
1237 const struct rte_flow_action_vf *vf_conf,
1238 efx_mae_actions_t *spec)
1240 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1241 efx_mport_sel_t mport;
1245 if (vf_conf == NULL)
1246 vf = EFX_PCI_VF_INVALID;
1247 else if (vf_conf->original != 0)
1252 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
1256 return efx_mae_action_set_populate_deliver(spec, &mport);
1260 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
1261 const struct rte_flow_action_port_id *conf,
1262 efx_mae_actions_t *spec)
1264 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1265 struct sfc_mae *mae = &sa->mae;
1266 efx_mport_sel_t mport;
1270 port_id = (conf->original != 0) ? sas->port_id : conf->id;
1272 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
1277 return efx_mae_action_set_populate_deliver(spec, &mport);
1281 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
1282 const struct rte_flow_action *action,
1283 struct sfc_mae_actions_bundle *bundle,
1284 efx_mae_actions_t *spec,
1285 struct rte_flow_error *error)
1289 switch (action->type) {
1290 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
1291 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
1292 bundle->actions_mask);
1293 rc = efx_mae_action_set_populate_vlan_pop(spec);
1295 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1296 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
1297 bundle->actions_mask);
1298 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
1300 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1301 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
1302 bundle->actions_mask);
1303 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
1305 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1306 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
1307 bundle->actions_mask);
1308 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
1310 case RTE_FLOW_ACTION_TYPE_FLAG:
1311 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1312 bundle->actions_mask);
1313 rc = efx_mae_action_set_populate_flag(spec);
1315 case RTE_FLOW_ACTION_TYPE_MARK:
1316 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1317 bundle->actions_mask);
1318 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
1320 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
1321 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
1322 bundle->actions_mask);
1323 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
1325 case RTE_FLOW_ACTION_TYPE_PF:
1326 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
1327 bundle->actions_mask);
1328 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
1330 case RTE_FLOW_ACTION_TYPE_VF:
1331 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
1332 bundle->actions_mask);
1333 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
1335 case RTE_FLOW_ACTION_TYPE_PORT_ID:
1336 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
1337 bundle->actions_mask);
1338 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
1340 case RTE_FLOW_ACTION_TYPE_DROP:
1341 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1342 bundle->actions_mask);
1343 rc = efx_mae_action_set_populate_drop(spec);
1346 return rte_flow_error_set(error, ENOTSUP,
1347 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1348 "Unsupported action");
1352 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
1353 NULL, "Failed to request the action");
1355 bundle->actions_mask |= (1ULL << action->type);
1362 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
1363 const struct rte_flow_action actions[],
1364 struct sfc_mae_action_set **action_setp,
1365 struct rte_flow_error *error)
1367 struct sfc_mae_actions_bundle bundle = {0};
1368 const struct rte_flow_action *action;
1369 efx_mae_actions_t *spec;
1372 if (actions == NULL) {
1373 return rte_flow_error_set(error, EINVAL,
1374 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1378 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
1380 goto fail_action_set_spec_init;
1382 for (action = actions;
1383 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
1384 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
1386 goto fail_rule_parse_action;
1388 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
1391 goto fail_rule_parse_action;
1394 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
1396 goto fail_rule_parse_action;
1398 *action_setp = sfc_mae_action_set_attach(sa, spec);
1399 if (*action_setp != NULL) {
1400 efx_mae_action_set_spec_fini(sa->nic, spec);
1404 rc = sfc_mae_action_set_add(sa, spec, action_setp);
1406 goto fail_action_set_add;
1410 fail_action_set_add:
1411 fail_rule_parse_action:
1412 efx_mae_action_set_spec_fini(sa->nic, spec);
1414 fail_action_set_spec_init:
1416 rc = rte_flow_error_set(error, rc,
1417 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1418 NULL, "Failed to process the action");
1424 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
1425 const efx_mae_match_spec_t *left,
1426 const efx_mae_match_spec_t *right)
1428 bool have_same_class;
1431 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
1434 return (rc == 0) ? have_same_class : false;
1438 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
1439 struct sfc_flow_spec_mae *spec)
1441 const struct rte_flow *entry;
1443 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
1444 const struct sfc_flow_spec *entry_spec = &entry->spec;
1445 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
1446 const efx_mae_match_spec_t *left = es_mae->match_spec;
1447 const efx_mae_match_spec_t *right = spec->match_spec;
1449 switch (entry_spec->type) {
1450 case SFC_FLOW_SPEC_FILTER:
1451 /* Ignore VNIC-level flows */
1453 case SFC_FLOW_SPEC_MAE:
1454 if (sfc_mae_rules_class_cmp(sa, left, right))
1462 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
1463 "support for inner frame pattern items is not guaranteed; "
1464 "other than that, the items are valid from SW standpoint");
1469 * Confirm that a given flow can be accepted by the FW.
1472 * Software adapter context
1474 * Flow to be verified
1476 * Zero on success and non-zero in the case of error.
1477 * A special value of EAGAIN indicates that the adapter is
1478 * not in started state. This state is compulsory because
1479 * it only makes sense to compare the rule class of the flow
1480 * being validated with classes of the active rules.
1481 * Such classes are wittingly supported by the FW.
1484 sfc_mae_flow_verify(struct sfc_adapter *sa,
1485 struct rte_flow *flow)
1487 struct sfc_flow_spec *spec = &flow->spec;
1488 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1490 SFC_ASSERT(sfc_adapter_is_locked(sa));
1492 if (sa->state != SFC_ADAPTER_STARTED)
1495 return sfc_mae_action_rule_class_verify(sa, spec_mae);
1499 sfc_mae_flow_insert(struct sfc_adapter *sa,
1500 struct rte_flow *flow)
1502 struct sfc_flow_spec *spec = &flow->spec;
1503 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1504 struct sfc_mae_action_set *action_set = spec_mae->action_set;
1505 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
1508 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
1509 SFC_ASSERT(action_set != NULL);
1511 rc = sfc_mae_action_set_enable(sa, action_set);
1513 goto fail_action_set_enable;
1515 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
1516 NULL, &fw_rsrc->aset_id,
1517 &spec_mae->rule_id);
1519 goto fail_action_rule_insert;
1523 fail_action_rule_insert:
1524 (void)sfc_mae_action_set_disable(sa, action_set);
1526 fail_action_set_enable:
1531 sfc_mae_flow_remove(struct sfc_adapter *sa,
1532 struct rte_flow *flow)
1534 struct sfc_flow_spec *spec = &flow->spec;
1535 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1536 struct sfc_mae_action_set *action_set = spec_mae->action_set;
1539 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
1540 SFC_ASSERT(action_set != NULL);
1542 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
1546 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1548 return sfc_mae_action_set_disable(sa, action_set);