1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
18 #include "sfc_switch.h"
21 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
22 efx_mport_sel_t *mportp)
24 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
26 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
31 sfc_mae_attach(struct sfc_adapter *sa)
33 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
34 struct sfc_mae_switch_port_request switch_port_request = {0};
35 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
36 efx_mport_sel_t entity_mport;
37 struct sfc_mae *mae = &sa->mae;
38 efx_mae_limits_t limits;
41 sfc_log_init(sa, "entry");
43 if (!encp->enc_mae_supported) {
44 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
48 sfc_log_init(sa, "init MAE");
49 rc = efx_mae_init(sa->nic);
53 sfc_log_init(sa, "get MAE limits");
54 rc = efx_mae_get_limits(sa->nic, &limits);
56 goto fail_mae_get_limits;
58 sfc_log_init(sa, "assign entity MPORT");
59 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
61 goto fail_mae_assign_entity_mport;
63 sfc_log_init(sa, "assign RTE switch domain");
64 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
66 goto fail_mae_assign_switch_domain;
68 sfc_log_init(sa, "assign RTE switch port");
69 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
70 switch_port_request.entity_mportp = &entity_mport;
72 * As of now, the driver does not support representors, so
73 * RTE ethdev MPORT simply matches that of the entity.
75 switch_port_request.ethdev_mportp = &entity_mport;
76 switch_port_request.ethdev_port_id = sas->port_id;
77 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
79 &mae->switch_port_id);
81 goto fail_mae_assign_switch_port;
83 mae->status = SFC_MAE_STATUS_SUPPORTED;
84 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
85 TAILQ_INIT(&mae->action_sets);
87 sfc_log_init(sa, "done");
91 fail_mae_assign_switch_port:
92 fail_mae_assign_switch_domain:
93 fail_mae_assign_entity_mport:
95 efx_mae_fini(sa->nic);
98 sfc_log_init(sa, "failed %d", rc);
104 sfc_mae_detach(struct sfc_adapter *sa)
106 struct sfc_mae *mae = &sa->mae;
107 enum sfc_mae_status status_prev = mae->status;
109 sfc_log_init(sa, "entry");
111 mae->nb_action_rule_prios_max = 0;
112 mae->status = SFC_MAE_STATUS_UNKNOWN;
114 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
117 efx_mae_fini(sa->nic);
119 sfc_log_init(sa, "done");
122 static struct sfc_mae_action_set *
123 sfc_mae_action_set_attach(struct sfc_adapter *sa,
124 const efx_mae_actions_t *spec)
126 struct sfc_mae_action_set *action_set;
127 struct sfc_mae *mae = &sa->mae;
129 SFC_ASSERT(sfc_adapter_is_locked(sa));
131 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
132 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
133 ++(action_set->refcnt);
142 sfc_mae_action_set_add(struct sfc_adapter *sa,
143 efx_mae_actions_t *spec,
144 struct sfc_mae_action_set **action_setp)
146 struct sfc_mae_action_set *action_set;
147 struct sfc_mae *mae = &sa->mae;
149 SFC_ASSERT(sfc_adapter_is_locked(sa));
151 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
152 if (action_set == NULL)
155 action_set->refcnt = 1;
156 action_set->spec = spec;
158 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
160 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
162 *action_setp = action_set;
168 sfc_mae_action_set_del(struct sfc_adapter *sa,
169 struct sfc_mae_action_set *action_set)
171 struct sfc_mae *mae = &sa->mae;
173 SFC_ASSERT(sfc_adapter_is_locked(sa));
174 SFC_ASSERT(action_set->refcnt != 0);
176 --(action_set->refcnt);
178 if (action_set->refcnt != 0)
181 SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
182 SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
184 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
185 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
186 rte_free(action_set);
190 sfc_mae_action_set_enable(struct sfc_adapter *sa,
191 struct sfc_mae_action_set *action_set)
193 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
196 SFC_ASSERT(sfc_adapter_is_locked(sa));
198 if (fw_rsrc->refcnt == 0) {
199 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
200 SFC_ASSERT(action_set->spec != NULL);
202 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
214 sfc_mae_action_set_disable(struct sfc_adapter *sa,
215 struct sfc_mae_action_set *action_set)
217 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
220 SFC_ASSERT(sfc_adapter_is_locked(sa));
221 SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
222 SFC_ASSERT(fw_rsrc->refcnt != 0);
224 if (fw_rsrc->refcnt == 1) {
225 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
229 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
238 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
239 struct rte_flow *flow)
241 struct sfc_flow_spec *spec;
242 struct sfc_flow_spec_mae *spec_mae;
252 spec_mae = &spec->mae;
254 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
256 if (spec_mae->action_set != NULL)
257 sfc_mae_action_set_del(sa, spec_mae->action_set);
259 if (spec_mae->match_spec != NULL)
260 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
264 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
266 efx_mae_match_spec_t *efx_spec = ctx->match_spec_action;
267 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
268 const efx_mae_field_id_t field_ids[] = {
269 EFX_MAE_FIELD_VLAN0_PROTO_BE,
270 EFX_MAE_FIELD_VLAN1_PROTO_BE,
272 const struct sfc_mae_ethertype *et;
277 * In accordance with RTE flow API convention, the innermost L2
278 * item's "type" ("inner_type") is a L3 EtherType. If there is
279 * no L3 item, it's 0x0000/0x0000.
281 et = &pdata->ethertypes[pdata->nb_vlan_tags];
282 rc = efx_mae_match_spec_field_set(efx_spec, EFX_MAE_FIELD_ETHER_TYPE_BE,
284 (const uint8_t *)&et->value,
286 (const uint8_t *)&et->mask);
291 * sfc_mae_rule_parse_item_vlan() has already made sure
292 * that pdata->nb_vlan_tags does not exceed this figure.
294 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
296 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
297 et = &pdata->ethertypes[i];
299 rc = efx_mae_match_spec_field_set(efx_spec, field_ids[i],
301 (const uint8_t *)&et->value,
303 (const uint8_t *)&et->mask);
312 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
313 struct rte_flow_error *error)
315 efx_mae_match_spec_t *efx_spec = ctx->match_spec_action;
316 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
317 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
318 const rte_be16_t supported_tpids[] = {
319 /* VLAN standard TPID (always the first element) */
320 RTE_BE16(RTE_ETHER_TYPE_VLAN),
322 /* Double-tagging TPIDs */
323 RTE_BE16(RTE_ETHER_TYPE_QINQ),
324 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
325 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
326 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
328 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
329 unsigned int ethertype_idx;
330 const uint8_t *valuep;
331 const uint8_t *maskp;
334 if (pdata->innermost_ethertype_restriction.mask != 0 &&
335 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
337 * If a single item VLAN is followed by a L3 item, value
338 * of "type" in item ETH can't be a double-tagging TPID.
340 nb_supported_tpids = 1;
344 * sfc_mae_rule_parse_item_vlan() has already made sure
345 * that pdata->nb_vlan_tags does not exceed this figure.
347 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
349 for (ethertype_idx = 0;
350 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
351 unsigned int tpid_idx;
353 /* Exact match is supported only. */
354 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
359 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
360 tpid_idx < nb_supported_tpids; ++tpid_idx) {
361 if (ethertypes[ethertype_idx].value ==
362 supported_tpids[tpid_idx])
366 if (tpid_idx == nb_supported_tpids) {
371 nb_supported_tpids = 1;
374 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
375 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
378 et->mask = RTE_BE16(0xffff);
380 pdata->innermost_ethertype_restriction.value;
381 } else if (et->mask != RTE_BE16(0xffff) ||
383 pdata->innermost_ethertype_restriction.value) {
390 * Now, when the number of VLAN tags is known, set fields
391 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
392 * one is either a valid L3 EtherType (or 0x0000/0x0000),
393 * and the last two are valid TPIDs (or 0x0000/0x0000).
395 rc = sfc_mae_set_ethertypes(ctx);
399 if (pdata->l3_next_proto_restriction_mask == 0xff) {
400 if (pdata->l3_next_proto_mask == 0) {
401 pdata->l3_next_proto_mask = 0xff;
402 pdata->l3_next_proto_value =
403 pdata->l3_next_proto_restriction_value;
404 } else if (pdata->l3_next_proto_mask != 0xff ||
405 pdata->l3_next_proto_value !=
406 pdata->l3_next_proto_restriction_value) {
412 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
413 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
414 rc = efx_mae_match_spec_field_set(efx_spec, EFX_MAE_FIELD_IP_PROTO,
415 sizeof(pdata->l3_next_proto_value),
417 sizeof(pdata->l3_next_proto_mask),
425 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
426 "Failed to process pattern data");
430 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
431 struct sfc_flow_parse_ctx *ctx,
432 struct rte_flow_error *error)
434 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
435 const struct rte_flow_item_port_id supp_mask = {
438 const void *def_mask = &rte_flow_item_port_id_mask;
439 const struct rte_flow_item_port_id *spec = NULL;
440 const struct rte_flow_item_port_id *mask = NULL;
441 efx_mport_sel_t mport_sel;
444 if (ctx_mae->match_mport_set) {
445 return rte_flow_error_set(error, ENOTSUP,
446 RTE_FLOW_ERROR_TYPE_ITEM, item,
447 "Can't handle multiple traffic source items");
450 rc = sfc_flow_parse_init(item,
451 (const void **)&spec, (const void **)&mask,
452 (const void *)&supp_mask, def_mask,
453 sizeof(struct rte_flow_item_port_id), error);
457 if (mask->id != supp_mask.id) {
458 return rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ITEM, item,
460 "Bad mask in the PORT_ID pattern item");
463 /* If "spec" is not set, could be any port ID */
467 if (spec->id > UINT16_MAX) {
468 return rte_flow_error_set(error, EOVERFLOW,
469 RTE_FLOW_ERROR_TYPE_ITEM, item,
470 "The port ID is too large");
473 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
474 spec->id, &mport_sel);
476 return rte_flow_error_set(error, rc,
477 RTE_FLOW_ERROR_TYPE_ITEM, item,
478 "Can't find RTE ethdev by the port ID");
481 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
484 return rte_flow_error_set(error, rc,
485 RTE_FLOW_ERROR_TYPE_ITEM, item,
486 "Failed to set MPORT for the port ID");
489 ctx_mae->match_mport_set = B_TRUE;
495 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
496 struct sfc_flow_parse_ctx *ctx,
497 struct rte_flow_error *error)
499 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
500 const struct rte_flow_item_phy_port supp_mask = {
503 const void *def_mask = &rte_flow_item_phy_port_mask;
504 const struct rte_flow_item_phy_port *spec = NULL;
505 const struct rte_flow_item_phy_port *mask = NULL;
506 efx_mport_sel_t mport_v;
509 if (ctx_mae->match_mport_set) {
510 return rte_flow_error_set(error, ENOTSUP,
511 RTE_FLOW_ERROR_TYPE_ITEM, item,
512 "Can't handle multiple traffic source items");
515 rc = sfc_flow_parse_init(item,
516 (const void **)&spec, (const void **)&mask,
517 (const void *)&supp_mask, def_mask,
518 sizeof(struct rte_flow_item_phy_port), error);
522 if (mask->index != supp_mask.index) {
523 return rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ITEM, item,
525 "Bad mask in the PHY_PORT pattern item");
528 /* If "spec" is not set, could be any physical port */
532 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
534 return rte_flow_error_set(error, rc,
535 RTE_FLOW_ERROR_TYPE_ITEM, item,
536 "Failed to convert the PHY_PORT index");
539 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
542 return rte_flow_error_set(error, rc,
543 RTE_FLOW_ERROR_TYPE_ITEM, item,
544 "Failed to set MPORT for the PHY_PORT");
547 ctx_mae->match_mport_set = B_TRUE;
553 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
554 struct sfc_flow_parse_ctx *ctx,
555 struct rte_flow_error *error)
557 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
558 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
559 efx_mport_sel_t mport_v;
562 if (ctx_mae->match_mport_set) {
563 return rte_flow_error_set(error, ENOTSUP,
564 RTE_FLOW_ERROR_TYPE_ITEM, item,
565 "Can't handle multiple traffic source items");
568 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
571 return rte_flow_error_set(error, rc,
572 RTE_FLOW_ERROR_TYPE_ITEM, item,
573 "Failed to convert the PF ID");
576 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
579 return rte_flow_error_set(error, rc,
580 RTE_FLOW_ERROR_TYPE_ITEM, item,
581 "Failed to set MPORT for the PF");
584 ctx_mae->match_mport_set = B_TRUE;
590 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
591 struct sfc_flow_parse_ctx *ctx,
592 struct rte_flow_error *error)
594 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
595 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
596 const struct rte_flow_item_vf supp_mask = {
599 const void *def_mask = &rte_flow_item_vf_mask;
600 const struct rte_flow_item_vf *spec = NULL;
601 const struct rte_flow_item_vf *mask = NULL;
602 efx_mport_sel_t mport_v;
605 if (ctx_mae->match_mport_set) {
606 return rte_flow_error_set(error, ENOTSUP,
607 RTE_FLOW_ERROR_TYPE_ITEM, item,
608 "Can't handle multiple traffic source items");
611 rc = sfc_flow_parse_init(item,
612 (const void **)&spec, (const void **)&mask,
613 (const void *)&supp_mask, def_mask,
614 sizeof(struct rte_flow_item_vf), error);
618 if (mask->id != supp_mask.id) {
619 return rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ITEM, item,
621 "Bad mask in the VF pattern item");
625 * If "spec" is not set, the item requests any VF related to the
626 * PF of the current DPDK port (but not the PF itself).
627 * Reject this match criterion as unsupported.
630 return rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ITEM, item,
632 "Bad spec in the VF pattern item");
635 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
637 return rte_flow_error_set(error, rc,
638 RTE_FLOW_ERROR_TYPE_ITEM, item,
639 "Failed to convert the PF + VF IDs");
642 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec_action,
645 return rte_flow_error_set(error, rc,
646 RTE_FLOW_ERROR_TYPE_ITEM, item,
647 "Failed to set MPORT for the PF + VF");
650 ctx_mae->match_mport_set = B_TRUE;
656 * Having this field ID in a field locator means that this
657 * locator cannot be used to actually set the field at the
658 * time when the corresponding item gets encountered. Such
659 * fields get stashed in the parsing context instead. This
660 * is required to resolve dependencies between the stashed
661 * fields. See sfc_mae_rule_process_pattern_data().
663 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
665 struct sfc_mae_field_locator {
666 efx_mae_field_id_t field_id;
668 /* Field offset in the corresponding rte_flow_item_ struct */
673 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
674 unsigned int nb_field_locators, void *mask_ptr,
679 memset(mask_ptr, 0, mask_size);
681 for (i = 0; i < nb_field_locators; ++i) {
682 const struct sfc_mae_field_locator *fl = &field_locators[i];
684 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
685 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
690 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
691 unsigned int nb_field_locators, const uint8_t *spec,
692 const uint8_t *mask, efx_mae_match_spec_t *efx_spec,
693 struct rte_flow_error *error)
698 for (i = 0; i < nb_field_locators; ++i) {
699 const struct sfc_mae_field_locator *fl = &field_locators[i];
701 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
704 rc = efx_mae_match_spec_field_set(efx_spec, fl->field_id,
705 fl->size, spec + fl->ofst,
706 fl->size, mask + fl->ofst);
712 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
713 NULL, "Failed to process item fields");
719 static const struct sfc_mae_field_locator flocs_eth[] = {
722 * This locator is used only for building supported fields mask.
723 * The field is handled by sfc_mae_rule_process_pattern_data().
725 SFC_MAE_FIELD_HANDLING_DEFERRED,
726 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
727 offsetof(struct rte_flow_item_eth, type),
730 EFX_MAE_FIELD_ETH_DADDR_BE,
731 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
732 offsetof(struct rte_flow_item_eth, dst),
735 EFX_MAE_FIELD_ETH_SADDR_BE,
736 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
737 offsetof(struct rte_flow_item_eth, src),
742 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
743 struct sfc_flow_parse_ctx *ctx,
744 struct rte_flow_error *error)
746 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
747 struct rte_flow_item_eth supp_mask;
748 const uint8_t *spec = NULL;
749 const uint8_t *mask = NULL;
752 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
753 &supp_mask, sizeof(supp_mask));
755 rc = sfc_flow_parse_init(item,
756 (const void **)&spec, (const void **)&mask,
757 (const void *)&supp_mask,
758 &rte_flow_item_eth_mask,
759 sizeof(struct rte_flow_item_eth), error);
764 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
765 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
766 const struct rte_flow_item_eth *item_spec;
767 const struct rte_flow_item_eth *item_mask;
769 item_spec = (const struct rte_flow_item_eth *)spec;
770 item_mask = (const struct rte_flow_item_eth *)mask;
772 ethertypes[0].value = item_spec->type;
773 ethertypes[0].mask = item_mask->type;
776 * The specification is empty. This is wrong in the case
777 * when there are more network patterns in line. Other
778 * than that, any Ethernet can match. All of that is
779 * checked at the end of parsing.
784 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
785 ctx_mae->match_spec_action, error);
788 static const struct sfc_mae_field_locator flocs_vlan[] = {
791 EFX_MAE_FIELD_VLAN0_TCI_BE,
792 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
793 offsetof(struct rte_flow_item_vlan, tci),
797 * This locator is used only for building supported fields mask.
798 * The field is handled by sfc_mae_rule_process_pattern_data().
800 SFC_MAE_FIELD_HANDLING_DEFERRED,
801 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
802 offsetof(struct rte_flow_item_vlan, inner_type),
807 EFX_MAE_FIELD_VLAN1_TCI_BE,
808 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
809 offsetof(struct rte_flow_item_vlan, tci),
813 * This locator is used only for building supported fields mask.
814 * The field is handled by sfc_mae_rule_process_pattern_data().
816 SFC_MAE_FIELD_HANDLING_DEFERRED,
817 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
818 offsetof(struct rte_flow_item_vlan, inner_type),
823 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
824 struct sfc_flow_parse_ctx *ctx,
825 struct rte_flow_error *error)
827 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
828 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
829 const struct sfc_mae_field_locator *flocs;
830 struct rte_flow_item_vlan supp_mask;
831 const uint8_t *spec = NULL;
832 const uint8_t *mask = NULL;
833 unsigned int nb_flocs;
836 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
838 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
839 return rte_flow_error_set(error, ENOTSUP,
840 RTE_FLOW_ERROR_TYPE_ITEM, item,
841 "Can't match that many VLAN tags");
844 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
845 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
847 /* If parsing fails, this can remain incremented. */
848 ++pdata->nb_vlan_tags;
850 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
851 &supp_mask, sizeof(supp_mask));
853 rc = sfc_flow_parse_init(item,
854 (const void **)&spec, (const void **)&mask,
855 (const void *)&supp_mask,
856 &rte_flow_item_vlan_mask,
857 sizeof(struct rte_flow_item_vlan), error);
862 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
863 const struct rte_flow_item_vlan *item_spec;
864 const struct rte_flow_item_vlan *item_mask;
866 item_spec = (const struct rte_flow_item_vlan *)spec;
867 item_mask = (const struct rte_flow_item_vlan *)mask;
869 ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
870 ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
873 * The specification is empty. This is wrong in the case
874 * when there are more network patterns in line. Other
875 * than that, any Ethernet can match. All of that is
876 * checked at the end of parsing.
881 return sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
882 ctx_mae->match_spec_action, error);
885 static const struct sfc_mae_field_locator flocs_ipv4[] = {
887 EFX_MAE_FIELD_SRC_IP4_BE,
888 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
889 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
892 EFX_MAE_FIELD_DST_IP4_BE,
893 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
894 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
898 * This locator is used only for building supported fields mask.
899 * The field is handled by sfc_mae_rule_process_pattern_data().
901 SFC_MAE_FIELD_HANDLING_DEFERRED,
902 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
903 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
906 EFX_MAE_FIELD_IP_TOS,
907 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
908 hdr.type_of_service),
909 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
912 EFX_MAE_FIELD_IP_TTL,
913 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
914 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
919 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
920 struct sfc_flow_parse_ctx *ctx,
921 struct rte_flow_error *error)
923 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
924 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
925 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
926 struct rte_flow_item_ipv4 supp_mask;
927 const uint8_t *spec = NULL;
928 const uint8_t *mask = NULL;
931 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
932 &supp_mask, sizeof(supp_mask));
934 rc = sfc_flow_parse_init(item,
935 (const void **)&spec, (const void **)&mask,
936 (const void *)&supp_mask,
937 &rte_flow_item_ipv4_mask,
938 sizeof(struct rte_flow_item_ipv4), error);
942 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
943 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
946 const struct rte_flow_item_ipv4 *item_spec;
947 const struct rte_flow_item_ipv4 *item_mask;
949 item_spec = (const struct rte_flow_item_ipv4 *)spec;
950 item_mask = (const struct rte_flow_item_ipv4 *)mask;
952 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
953 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
958 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
959 ctx_mae->match_spec_action, error);
962 static const struct sfc_mae_field_locator flocs_ipv6[] = {
964 EFX_MAE_FIELD_SRC_IP6_BE,
965 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
966 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
969 EFX_MAE_FIELD_DST_IP6_BE,
970 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
971 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
975 * This locator is used only for building supported fields mask.
976 * The field is handled by sfc_mae_rule_process_pattern_data().
978 SFC_MAE_FIELD_HANDLING_DEFERRED,
979 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
980 offsetof(struct rte_flow_item_ipv6, hdr.proto),
983 EFX_MAE_FIELD_IP_TTL,
984 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
985 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
990 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
991 struct sfc_flow_parse_ctx *ctx,
992 struct rte_flow_error *error)
994 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
995 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
996 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
997 struct rte_flow_item_ipv6 supp_mask;
998 const uint8_t *spec = NULL;
999 const uint8_t *mask = NULL;
1000 rte_be32_t vtc_flow_be;
1006 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1007 &supp_mask, sizeof(supp_mask));
1009 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1010 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1012 rc = sfc_flow_parse_init(item,
1013 (const void **)&spec, (const void **)&mask,
1014 (const void *)&supp_mask,
1015 &rte_flow_item_ipv6_mask,
1016 sizeof(struct rte_flow_item_ipv6), error);
1020 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1021 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1024 const struct rte_flow_item_ipv6 *item_spec;
1025 const struct rte_flow_item_ipv6 *item_mask;
1027 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1028 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1030 pdata->l3_next_proto_value = item_spec->hdr.proto;
1031 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1036 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1037 ctx_mae->match_spec_action, error);
1041 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1042 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1043 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1045 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1046 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1047 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1049 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec_action,
1050 EFX_MAE_FIELD_IP_TOS,
1051 sizeof(tc_value), &tc_value,
1052 sizeof(tc_mask), &tc_mask);
1054 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1055 NULL, "Failed to process item fields");
1061 static const struct sfc_mae_field_locator flocs_tcp[] = {
1063 EFX_MAE_FIELD_L4_SPORT_BE,
1064 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1065 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1068 EFX_MAE_FIELD_L4_DPORT_BE,
1069 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1070 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1073 EFX_MAE_FIELD_TCP_FLAGS_BE,
1075 * The values have been picked intentionally since the
1076 * target MAE field is oversize (16 bit). This mapping
1077 * relies on the fact that the MAE field is big-endian.
1079 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1080 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1081 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1086 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1087 struct sfc_flow_parse_ctx *ctx,
1088 struct rte_flow_error *error)
1090 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1091 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1092 struct rte_flow_item_tcp supp_mask;
1093 const uint8_t *spec = NULL;
1094 const uint8_t *mask = NULL;
1097 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1098 &supp_mask, sizeof(supp_mask));
1100 rc = sfc_flow_parse_init(item,
1101 (const void **)&spec, (const void **)&mask,
1102 (const void *)&supp_mask,
1103 &rte_flow_item_tcp_mask,
1104 sizeof(struct rte_flow_item_tcp), error);
1108 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1109 pdata->l3_next_proto_restriction_mask = 0xff;
1114 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1115 ctx_mae->match_spec_action, error);
1118 static const struct sfc_flow_item sfc_flow_items[] = {
1120 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1122 * In terms of RTE flow, this item is a META one,
1123 * and its position in the pattern is don't care.
1125 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1126 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1127 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1128 .parse = sfc_mae_rule_parse_item_port_id,
1131 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1133 * In terms of RTE flow, this item is a META one,
1134 * and its position in the pattern is don't care.
1136 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1137 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1138 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1139 .parse = sfc_mae_rule_parse_item_phy_port,
1142 .type = RTE_FLOW_ITEM_TYPE_PF,
1144 * In terms of RTE flow, this item is a META one,
1145 * and its position in the pattern is don't care.
1147 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1148 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1149 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1150 .parse = sfc_mae_rule_parse_item_pf,
1153 .type = RTE_FLOW_ITEM_TYPE_VF,
1155 * In terms of RTE flow, this item is a META one,
1156 * and its position in the pattern is don't care.
1158 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1159 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1160 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1161 .parse = sfc_mae_rule_parse_item_vf,
1164 .type = RTE_FLOW_ITEM_TYPE_ETH,
1165 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1166 .layer = SFC_FLOW_ITEM_L2,
1167 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1168 .parse = sfc_mae_rule_parse_item_eth,
1171 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1172 .prev_layer = SFC_FLOW_ITEM_L2,
1173 .layer = SFC_FLOW_ITEM_L2,
1174 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1175 .parse = sfc_mae_rule_parse_item_vlan,
1178 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1179 .prev_layer = SFC_FLOW_ITEM_L2,
1180 .layer = SFC_FLOW_ITEM_L3,
1181 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1182 .parse = sfc_mae_rule_parse_item_ipv4,
1185 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1186 .prev_layer = SFC_FLOW_ITEM_L2,
1187 .layer = SFC_FLOW_ITEM_L3,
1188 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1189 .parse = sfc_mae_rule_parse_item_ipv6,
1192 .type = RTE_FLOW_ITEM_TYPE_TCP,
1193 .prev_layer = SFC_FLOW_ITEM_L3,
1194 .layer = SFC_FLOW_ITEM_L4,
1195 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1196 .parse = sfc_mae_rule_parse_item_tcp,
1201 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1202 const struct rte_flow_item pattern[],
1203 struct sfc_flow_spec_mae *spec,
1204 struct rte_flow_error *error)
1206 struct sfc_mae_parse_ctx ctx_mae;
1207 struct sfc_flow_parse_ctx ctx;
1210 memset(&ctx_mae, 0, sizeof(ctx_mae));
1213 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
1215 &ctx_mae.match_spec_action);
1217 rc = rte_flow_error_set(error, rc,
1218 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1219 "Failed to initialise action rule match specification");
1220 goto fail_init_match_spec_action;
1223 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
1226 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
1227 pattern, &ctx, error);
1229 goto fail_parse_pattern;
1231 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
1233 goto fail_process_pattern_data;
1235 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
1236 rc = rte_flow_error_set(error, ENOTSUP,
1237 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1238 "Inconsistent pattern");
1239 goto fail_validate_match_spec_action;
1242 spec->match_spec = ctx_mae.match_spec_action;
1246 fail_validate_match_spec_action:
1247 fail_process_pattern_data:
1249 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
1251 fail_init_match_spec_action:
1256 * An action supported by MAE may correspond to a bundle of RTE flow actions,
1257 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
1258 * That is, related RTE flow actions need to be tracked as parts of a whole
1259 * so that they can be combined into a single action and submitted to MAE
1260 * representation of a given rule's action set.
1262 * Each RTE flow action provided by an application gets classified as
1263 * one belonging to some bundle type. If an action is not supposed to
1264 * belong to any bundle, or if this action is END, it is described as
1265 * one belonging to a dummy bundle of type EMPTY.
1267 * A currently tracked bundle will be submitted if a repeating
1268 * action or an action of different bundle type follows.
1271 enum sfc_mae_actions_bundle_type {
1272 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
1273 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
1276 struct sfc_mae_actions_bundle {
1277 enum sfc_mae_actions_bundle_type type;
1279 /* Indicates actions already tracked by the current bundle */
1280 uint64_t actions_mask;
1282 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
1283 rte_be16_t vlan_push_tpid;
1284 rte_be16_t vlan_push_tci;
1288 * Combine configuration of RTE flow actions tracked by the bundle into a
1289 * single action and submit the result to MAE action set specification.
1290 * Do nothing in the case of dummy action bundle.
1293 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
1294 efx_mae_actions_t *spec)
1298 switch (bundle->type) {
1299 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
1301 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
1302 rc = efx_mae_action_set_populate_vlan_push(
1303 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
1306 SFC_ASSERT(B_FALSE);
1314 * Given the type of the next RTE flow action in the line, decide
1315 * whether a new bundle is about to start, and, if this is the case,
1316 * submit and reset the current bundle.
1319 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
1320 struct sfc_mae_actions_bundle *bundle,
1321 efx_mae_actions_t *spec,
1322 struct rte_flow_error *error)
1324 enum sfc_mae_actions_bundle_type bundle_type_new;
1327 switch (action->type) {
1328 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1329 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1330 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1331 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
1335 * Self-sufficient actions, including END, are handled in this
1336 * case. No checks for unsupported actions are needed here
1337 * because parsing doesn't occur at this point.
1339 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
1343 if (bundle_type_new != bundle->type ||
1344 (bundle->actions_mask & (1ULL << action->type)) != 0) {
1345 rc = sfc_mae_actions_bundle_submit(bundle, spec);
1349 memset(bundle, 0, sizeof(*bundle));
1352 bundle->type = bundle_type_new;
1357 return rte_flow_error_set(error, rc,
1358 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1359 "Failed to request the (group of) action(s)");
1363 sfc_mae_rule_parse_action_of_push_vlan(
1364 const struct rte_flow_action_of_push_vlan *conf,
1365 struct sfc_mae_actions_bundle *bundle)
1367 bundle->vlan_push_tpid = conf->ethertype;
1371 sfc_mae_rule_parse_action_of_set_vlan_vid(
1372 const struct rte_flow_action_of_set_vlan_vid *conf,
1373 struct sfc_mae_actions_bundle *bundle)
1375 bundle->vlan_push_tci |= (conf->vlan_vid &
1376 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
1380 sfc_mae_rule_parse_action_of_set_vlan_pcp(
1381 const struct rte_flow_action_of_set_vlan_pcp *conf,
1382 struct sfc_mae_actions_bundle *bundle)
1384 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
1385 RTE_LEN2MASK(3, uint8_t)) << 13;
1387 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
1391 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
1392 efx_mae_actions_t *spec)
1394 return efx_mae_action_set_populate_mark(spec, conf->id);
1398 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
1399 const struct rte_flow_action_phy_port *conf,
1400 efx_mae_actions_t *spec)
1402 efx_mport_sel_t mport;
1406 if (conf->original != 0)
1407 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
1409 phy_port = conf->index;
1411 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
1415 return efx_mae_action_set_populate_deliver(spec, &mport);
1419 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
1420 const struct rte_flow_action_vf *vf_conf,
1421 efx_mae_actions_t *spec)
1423 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1424 efx_mport_sel_t mport;
1428 if (vf_conf == NULL)
1429 vf = EFX_PCI_VF_INVALID;
1430 else if (vf_conf->original != 0)
1435 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
1439 return efx_mae_action_set_populate_deliver(spec, &mport);
1443 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
1444 const struct rte_flow_action_port_id *conf,
1445 efx_mae_actions_t *spec)
1447 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1448 struct sfc_mae *mae = &sa->mae;
1449 efx_mport_sel_t mport;
1453 port_id = (conf->original != 0) ? sas->port_id : conf->id;
1455 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
1460 return efx_mae_action_set_populate_deliver(spec, &mport);
1464 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
1465 const struct rte_flow_action *action,
1466 struct sfc_mae_actions_bundle *bundle,
1467 efx_mae_actions_t *spec,
1468 struct rte_flow_error *error)
1472 switch (action->type) {
1473 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
1474 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
1475 bundle->actions_mask);
1476 rc = efx_mae_action_set_populate_vlan_pop(spec);
1478 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1479 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
1480 bundle->actions_mask);
1481 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
1483 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1484 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
1485 bundle->actions_mask);
1486 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
1488 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1489 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
1490 bundle->actions_mask);
1491 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
1493 case RTE_FLOW_ACTION_TYPE_FLAG:
1494 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1495 bundle->actions_mask);
1496 rc = efx_mae_action_set_populate_flag(spec);
1498 case RTE_FLOW_ACTION_TYPE_MARK:
1499 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1500 bundle->actions_mask);
1501 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
1503 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
1504 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
1505 bundle->actions_mask);
1506 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
1508 case RTE_FLOW_ACTION_TYPE_PF:
1509 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
1510 bundle->actions_mask);
1511 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
1513 case RTE_FLOW_ACTION_TYPE_VF:
1514 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
1515 bundle->actions_mask);
1516 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
1518 case RTE_FLOW_ACTION_TYPE_PORT_ID:
1519 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
1520 bundle->actions_mask);
1521 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
1523 case RTE_FLOW_ACTION_TYPE_DROP:
1524 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1525 bundle->actions_mask);
1526 rc = efx_mae_action_set_populate_drop(spec);
1529 return rte_flow_error_set(error, ENOTSUP,
1530 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1531 "Unsupported action");
1535 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
1536 NULL, "Failed to request the action");
1538 bundle->actions_mask |= (1ULL << action->type);
1545 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
1546 const struct rte_flow_action actions[],
1547 struct sfc_mae_action_set **action_setp,
1548 struct rte_flow_error *error)
1550 struct sfc_mae_actions_bundle bundle = {0};
1551 const struct rte_flow_action *action;
1552 efx_mae_actions_t *spec;
1555 if (actions == NULL) {
1556 return rte_flow_error_set(error, EINVAL,
1557 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1561 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
1563 goto fail_action_set_spec_init;
1565 for (action = actions;
1566 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
1567 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
1569 goto fail_rule_parse_action;
1571 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
1574 goto fail_rule_parse_action;
1577 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
1579 goto fail_rule_parse_action;
1581 *action_setp = sfc_mae_action_set_attach(sa, spec);
1582 if (*action_setp != NULL) {
1583 efx_mae_action_set_spec_fini(sa->nic, spec);
1587 rc = sfc_mae_action_set_add(sa, spec, action_setp);
1589 goto fail_action_set_add;
1593 fail_action_set_add:
1594 fail_rule_parse_action:
1595 efx_mae_action_set_spec_fini(sa->nic, spec);
1597 fail_action_set_spec_init:
1599 rc = rte_flow_error_set(error, rc,
1600 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1601 NULL, "Failed to process the action");
1607 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
1608 const efx_mae_match_spec_t *left,
1609 const efx_mae_match_spec_t *right)
1611 bool have_same_class;
1614 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
1617 return (rc == 0) ? have_same_class : false;
1621 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
1622 struct sfc_flow_spec_mae *spec)
1624 const struct rte_flow *entry;
1626 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
1627 const struct sfc_flow_spec *entry_spec = &entry->spec;
1628 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
1629 const efx_mae_match_spec_t *left = es_mae->match_spec;
1630 const efx_mae_match_spec_t *right = spec->match_spec;
1632 switch (entry_spec->type) {
1633 case SFC_FLOW_SPEC_FILTER:
1634 /* Ignore VNIC-level flows */
1636 case SFC_FLOW_SPEC_MAE:
1637 if (sfc_mae_rules_class_cmp(sa, left, right))
1645 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
1646 "support for inner frame pattern items is not guaranteed; "
1647 "other than that, the items are valid from SW standpoint");
1652 * Confirm that a given flow can be accepted by the FW.
1655 * Software adapter context
1657 * Flow to be verified
1659 * Zero on success and non-zero in the case of error.
1660 * A special value of EAGAIN indicates that the adapter is
1661 * not in started state. This state is compulsory because
1662 * it only makes sense to compare the rule class of the flow
1663 * being validated with classes of the active rules.
1664 * Such classes are wittingly supported by the FW.
1667 sfc_mae_flow_verify(struct sfc_adapter *sa,
1668 struct rte_flow *flow)
1670 struct sfc_flow_spec *spec = &flow->spec;
1671 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1673 SFC_ASSERT(sfc_adapter_is_locked(sa));
1675 if (sa->state != SFC_ADAPTER_STARTED)
1678 return sfc_mae_action_rule_class_verify(sa, spec_mae);
1682 sfc_mae_flow_insert(struct sfc_adapter *sa,
1683 struct rte_flow *flow)
1685 struct sfc_flow_spec *spec = &flow->spec;
1686 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1687 struct sfc_mae_action_set *action_set = spec_mae->action_set;
1688 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
1691 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
1692 SFC_ASSERT(action_set != NULL);
1694 rc = sfc_mae_action_set_enable(sa, action_set);
1696 goto fail_action_set_enable;
1698 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
1699 NULL, &fw_rsrc->aset_id,
1700 &spec_mae->rule_id);
1702 goto fail_action_rule_insert;
1706 fail_action_rule_insert:
1707 (void)sfc_mae_action_set_disable(sa, action_set);
1709 fail_action_set_enable:
1714 sfc_mae_flow_remove(struct sfc_adapter *sa,
1715 struct rte_flow *flow)
1717 struct sfc_flow_spec *spec = &flow->spec;
1718 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1719 struct sfc_mae_action_set *action_set = spec_mae->action_set;
1722 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
1723 SFC_ASSERT(action_set != NULL);
1725 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
1729 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1731 return sfc_mae_action_set_disable(sa, action_set);