1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
20 #include "sfc_switch.h"
23 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
24 efx_mport_sel_t *mportp)
26 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
28 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
33 sfc_mae_attach(struct sfc_adapter *sa)
35 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
36 struct sfc_mae_switch_port_request switch_port_request = {0};
37 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
38 efx_mport_sel_t entity_mport;
39 struct sfc_mae *mae = &sa->mae;
40 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
41 efx_mae_limits_t limits;
44 sfc_log_init(sa, "entry");
46 if (!encp->enc_mae_supported) {
47 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
51 sfc_log_init(sa, "init MAE");
52 rc = efx_mae_init(sa->nic);
56 sfc_log_init(sa, "get MAE limits");
57 rc = efx_mae_get_limits(sa->nic, &limits);
59 goto fail_mae_get_limits;
61 sfc_log_init(sa, "assign entity MPORT");
62 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
64 goto fail_mae_assign_entity_mport;
66 sfc_log_init(sa, "assign RTE switch domain");
67 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
69 goto fail_mae_assign_switch_domain;
71 sfc_log_init(sa, "assign RTE switch port");
72 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
73 switch_port_request.entity_mportp = &entity_mport;
75 * As of now, the driver does not support representors, so
76 * RTE ethdev MPORT simply matches that of the entity.
78 switch_port_request.ethdev_mportp = &entity_mport;
79 switch_port_request.ethdev_port_id = sas->port_id;
80 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
82 &mae->switch_port_id);
84 goto fail_mae_assign_switch_port;
86 sfc_log_init(sa, "allocate encap. header bounce buffer");
87 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
88 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
89 bounce_eh->buf_size, 0);
90 if (bounce_eh->buf == NULL)
91 goto fail_mae_alloc_bounce_eh;
93 mae->status = SFC_MAE_STATUS_SUPPORTED;
94 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
95 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
96 mae->encap_types_supported = limits.eml_encap_types_supported;
97 TAILQ_INIT(&mae->outer_rules);
98 TAILQ_INIT(&mae->encap_headers);
99 TAILQ_INIT(&mae->action_sets);
101 sfc_log_init(sa, "done");
105 fail_mae_alloc_bounce_eh:
106 fail_mae_assign_switch_port:
107 fail_mae_assign_switch_domain:
108 fail_mae_assign_entity_mport:
110 efx_mae_fini(sa->nic);
113 sfc_log_init(sa, "failed %d", rc);
119 sfc_mae_detach(struct sfc_adapter *sa)
121 struct sfc_mae *mae = &sa->mae;
122 enum sfc_mae_status status_prev = mae->status;
124 sfc_log_init(sa, "entry");
126 mae->nb_action_rule_prios_max = 0;
127 mae->status = SFC_MAE_STATUS_UNKNOWN;
129 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
132 rte_free(mae->bounce_eh.buf);
134 efx_mae_fini(sa->nic);
136 sfc_log_init(sa, "done");
139 static struct sfc_mae_outer_rule *
140 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
141 const efx_mae_match_spec_t *match_spec,
142 efx_tunnel_protocol_t encap_type)
144 struct sfc_mae_outer_rule *rule;
145 struct sfc_mae *mae = &sa->mae;
147 SFC_ASSERT(sfc_adapter_is_locked(sa));
149 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
150 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
151 rule->encap_type == encap_type) {
152 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
162 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
163 efx_mae_match_spec_t *match_spec,
164 efx_tunnel_protocol_t encap_type,
165 struct sfc_mae_outer_rule **rulep)
167 struct sfc_mae_outer_rule *rule;
168 struct sfc_mae *mae = &sa->mae;
170 SFC_ASSERT(sfc_adapter_is_locked(sa));
172 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
177 rule->match_spec = match_spec;
178 rule->encap_type = encap_type;
180 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
182 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
186 sfc_dbg(sa, "added outer_rule=%p", rule);
192 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
193 struct sfc_mae_outer_rule *rule)
195 struct sfc_mae *mae = &sa->mae;
197 SFC_ASSERT(sfc_adapter_is_locked(sa));
198 SFC_ASSERT(rule->refcnt != 0);
202 if (rule->refcnt != 0)
205 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
206 rule->fw_rsrc.refcnt != 0) {
207 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
208 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
211 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
213 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
216 sfc_dbg(sa, "deleted outer_rule=%p", rule);
220 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
221 struct sfc_mae_outer_rule *rule,
222 efx_mae_match_spec_t *match_spec_action)
224 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
227 SFC_ASSERT(sfc_adapter_is_locked(sa));
229 if (fw_rsrc->refcnt == 0) {
230 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
231 SFC_ASSERT(rule->match_spec != NULL);
233 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
237 sfc_err(sa, "failed to enable outer_rule=%p: %s",
243 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
246 if (fw_rsrc->refcnt == 0) {
247 (void)efx_mae_outer_rule_remove(sa->nic,
249 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
252 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
257 if (fw_rsrc->refcnt == 0) {
258 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
259 rule, fw_rsrc->rule_id.id);
268 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
269 struct sfc_mae_outer_rule *rule)
271 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
274 SFC_ASSERT(sfc_adapter_is_locked(sa));
276 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
277 fw_rsrc->refcnt == 0) {
278 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
279 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
283 if (fw_rsrc->refcnt == 1) {
284 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
286 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
287 rule, fw_rsrc->rule_id.id);
289 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
290 rule, fw_rsrc->rule_id.id, strerror(rc));
292 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
298 static struct sfc_mae_encap_header *
299 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
300 const struct sfc_mae_bounce_eh *bounce_eh)
302 struct sfc_mae_encap_header *encap_header;
303 struct sfc_mae *mae = &sa->mae;
305 SFC_ASSERT(sfc_adapter_is_locked(sa));
307 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
308 if (encap_header->size == bounce_eh->size &&
309 memcmp(encap_header->buf, bounce_eh->buf,
310 bounce_eh->size) == 0) {
311 sfc_dbg(sa, "attaching to encap_header=%p",
313 ++(encap_header->refcnt);
322 sfc_mae_encap_header_add(struct sfc_adapter *sa,
323 const struct sfc_mae_bounce_eh *bounce_eh,
324 struct sfc_mae_encap_header **encap_headerp)
326 struct sfc_mae_encap_header *encap_header;
327 struct sfc_mae *mae = &sa->mae;
329 SFC_ASSERT(sfc_adapter_is_locked(sa));
331 encap_header = rte_zmalloc("sfc_mae_encap_header",
332 sizeof(*encap_header), 0);
333 if (encap_header == NULL)
336 encap_header->size = bounce_eh->size;
338 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
339 encap_header->size, 0);
340 if (encap_header->buf == NULL) {
341 rte_free(encap_header);
345 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
347 encap_header->refcnt = 1;
348 encap_header->type = bounce_eh->type;
349 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
351 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
353 *encap_headerp = encap_header;
355 sfc_dbg(sa, "added encap_header=%p", encap_header);
361 sfc_mae_encap_header_del(struct sfc_adapter *sa,
362 struct sfc_mae_encap_header *encap_header)
364 struct sfc_mae *mae = &sa->mae;
366 if (encap_header == NULL)
369 SFC_ASSERT(sfc_adapter_is_locked(sa));
370 SFC_ASSERT(encap_header->refcnt != 0);
372 --(encap_header->refcnt);
374 if (encap_header->refcnt != 0)
377 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
378 encap_header->fw_rsrc.refcnt != 0) {
379 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
380 encap_header, encap_header->fw_rsrc.eh_id.id,
381 encap_header->fw_rsrc.refcnt);
384 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
385 rte_free(encap_header->buf);
386 rte_free(encap_header);
388 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
392 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
393 struct sfc_mae_encap_header *encap_header,
394 efx_mae_actions_t *action_set_spec)
396 struct sfc_mae_fw_rsrc *fw_rsrc;
399 if (encap_header == NULL)
402 SFC_ASSERT(sfc_adapter_is_locked(sa));
404 fw_rsrc = &encap_header->fw_rsrc;
406 if (fw_rsrc->refcnt == 0) {
407 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
408 SFC_ASSERT(encap_header->buf != NULL);
409 SFC_ASSERT(encap_header->size != 0);
411 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
416 sfc_err(sa, "failed to enable encap_header=%p: %s",
417 encap_header, strerror(rc));
422 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
425 if (fw_rsrc->refcnt == 0) {
426 (void)efx_mae_encap_header_free(sa->nic,
428 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
431 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
436 if (fw_rsrc->refcnt == 0) {
437 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
438 encap_header, fw_rsrc->eh_id.id);
447 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
448 struct sfc_mae_encap_header *encap_header)
450 struct sfc_mae_fw_rsrc *fw_rsrc;
453 if (encap_header == NULL)
456 SFC_ASSERT(sfc_adapter_is_locked(sa));
458 fw_rsrc = &encap_header->fw_rsrc;
460 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
461 fw_rsrc->refcnt == 0) {
462 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
463 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
467 if (fw_rsrc->refcnt == 1) {
468 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
470 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
471 encap_header, fw_rsrc->eh_id.id);
473 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
474 encap_header, fw_rsrc->eh_id.id, strerror(rc));
476 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
482 static struct sfc_mae_action_set *
483 sfc_mae_action_set_attach(struct sfc_adapter *sa,
484 const struct sfc_mae_encap_header *encap_header,
485 const efx_mae_actions_t *spec)
487 struct sfc_mae_action_set *action_set;
488 struct sfc_mae *mae = &sa->mae;
490 SFC_ASSERT(sfc_adapter_is_locked(sa));
492 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
493 if (action_set->encap_header == encap_header &&
494 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
495 sfc_dbg(sa, "attaching to action_set=%p", action_set);
496 ++(action_set->refcnt);
505 sfc_mae_action_set_add(struct sfc_adapter *sa,
506 efx_mae_actions_t *spec,
507 struct sfc_mae_encap_header *encap_header,
508 struct sfc_mae_action_set **action_setp)
510 struct sfc_mae_action_set *action_set;
511 struct sfc_mae *mae = &sa->mae;
513 SFC_ASSERT(sfc_adapter_is_locked(sa));
515 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
516 if (action_set == NULL)
519 action_set->refcnt = 1;
520 action_set->spec = spec;
521 action_set->encap_header = encap_header;
523 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
525 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
527 *action_setp = action_set;
529 sfc_dbg(sa, "added action_set=%p", action_set);
535 sfc_mae_action_set_del(struct sfc_adapter *sa,
536 struct sfc_mae_action_set *action_set)
538 struct sfc_mae *mae = &sa->mae;
540 SFC_ASSERT(sfc_adapter_is_locked(sa));
541 SFC_ASSERT(action_set->refcnt != 0);
543 --(action_set->refcnt);
545 if (action_set->refcnt != 0)
548 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
549 action_set->fw_rsrc.refcnt != 0) {
550 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
551 action_set, action_set->fw_rsrc.aset_id.id,
552 action_set->fw_rsrc.refcnt);
555 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
556 sfc_mae_encap_header_del(sa, action_set->encap_header);
557 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
558 rte_free(action_set);
560 sfc_dbg(sa, "deleted action_set=%p", action_set);
564 sfc_mae_action_set_enable(struct sfc_adapter *sa,
565 struct sfc_mae_action_set *action_set)
567 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
568 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
571 SFC_ASSERT(sfc_adapter_is_locked(sa));
573 if (fw_rsrc->refcnt == 0) {
574 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
575 SFC_ASSERT(action_set->spec != NULL);
577 rc = sfc_mae_encap_header_enable(sa, encap_header,
582 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
585 sfc_mae_encap_header_disable(sa, encap_header);
587 sfc_err(sa, "failed to enable action_set=%p: %s",
588 action_set, strerror(rc));
593 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
594 action_set, fw_rsrc->aset_id.id);
603 sfc_mae_action_set_disable(struct sfc_adapter *sa,
604 struct sfc_mae_action_set *action_set)
606 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
609 SFC_ASSERT(sfc_adapter_is_locked(sa));
611 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
612 fw_rsrc->refcnt == 0) {
613 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
614 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
618 if (fw_rsrc->refcnt == 1) {
619 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
621 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
622 action_set, fw_rsrc->aset_id.id);
624 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
625 action_set, fw_rsrc->aset_id.id, strerror(rc));
627 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
629 sfc_mae_encap_header_disable(sa, action_set->encap_header);
636 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
637 struct rte_flow *flow)
639 struct sfc_flow_spec *spec;
640 struct sfc_flow_spec_mae *spec_mae;
650 spec_mae = &spec->mae;
652 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
654 if (spec_mae->outer_rule != NULL)
655 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
657 if (spec_mae->action_set != NULL)
658 sfc_mae_action_set_del(sa, spec_mae->action_set);
660 if (spec_mae->match_spec != NULL)
661 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
665 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
667 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
668 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
669 const efx_mae_field_id_t field_ids[] = {
670 EFX_MAE_FIELD_VLAN0_PROTO_BE,
671 EFX_MAE_FIELD_VLAN1_PROTO_BE,
673 const struct sfc_mae_ethertype *et;
678 * In accordance with RTE flow API convention, the innermost L2
679 * item's "type" ("inner_type") is a L3 EtherType. If there is
680 * no L3 item, it's 0x0000/0x0000.
682 et = &pdata->ethertypes[pdata->nb_vlan_tags];
683 rc = efx_mae_match_spec_field_set(ctx->match_spec,
684 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
686 (const uint8_t *)&et->value,
688 (const uint8_t *)&et->mask);
693 * sfc_mae_rule_parse_item_vlan() has already made sure
694 * that pdata->nb_vlan_tags does not exceed this figure.
696 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
698 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
699 et = &pdata->ethertypes[i];
701 rc = efx_mae_match_spec_field_set(ctx->match_spec,
702 fremap[field_ids[i]],
704 (const uint8_t *)&et->value,
706 (const uint8_t *)&et->mask);
715 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
716 struct rte_flow_error *error)
718 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
719 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
720 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
721 const rte_be16_t supported_tpids[] = {
722 /* VLAN standard TPID (always the first element) */
723 RTE_BE16(RTE_ETHER_TYPE_VLAN),
725 /* Double-tagging TPIDs */
726 RTE_BE16(RTE_ETHER_TYPE_QINQ),
727 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
728 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
729 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
731 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
732 unsigned int ethertype_idx;
733 const uint8_t *valuep;
734 const uint8_t *maskp;
737 if (pdata->innermost_ethertype_restriction.mask != 0 &&
738 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
740 * If a single item VLAN is followed by a L3 item, value
741 * of "type" in item ETH can't be a double-tagging TPID.
743 nb_supported_tpids = 1;
747 * sfc_mae_rule_parse_item_vlan() has already made sure
748 * that pdata->nb_vlan_tags does not exceed this figure.
750 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
752 for (ethertype_idx = 0;
753 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
754 unsigned int tpid_idx;
756 /* Exact match is supported only. */
757 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
762 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
763 tpid_idx < nb_supported_tpids; ++tpid_idx) {
764 if (ethertypes[ethertype_idx].value ==
765 supported_tpids[tpid_idx])
769 if (tpid_idx == nb_supported_tpids) {
774 nb_supported_tpids = 1;
777 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
778 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
781 et->mask = RTE_BE16(0xffff);
783 pdata->innermost_ethertype_restriction.value;
784 } else if (et->mask != RTE_BE16(0xffff) ||
786 pdata->innermost_ethertype_restriction.value) {
793 * Now, when the number of VLAN tags is known, set fields
794 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
795 * one is either a valid L3 EtherType (or 0x0000/0x0000),
796 * and the last two are valid TPIDs (or 0x0000/0x0000).
798 rc = sfc_mae_set_ethertypes(ctx);
802 if (pdata->l3_next_proto_restriction_mask == 0xff) {
803 if (pdata->l3_next_proto_mask == 0) {
804 pdata->l3_next_proto_mask = 0xff;
805 pdata->l3_next_proto_value =
806 pdata->l3_next_proto_restriction_value;
807 } else if (pdata->l3_next_proto_mask != 0xff ||
808 pdata->l3_next_proto_value !=
809 pdata->l3_next_proto_restriction_value) {
815 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
816 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
817 rc = efx_mae_match_spec_field_set(ctx->match_spec,
818 fremap[EFX_MAE_FIELD_IP_PROTO],
819 sizeof(pdata->l3_next_proto_value),
821 sizeof(pdata->l3_next_proto_mask),
829 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
830 "Failed to process pattern data");
834 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
835 struct sfc_flow_parse_ctx *ctx,
836 struct rte_flow_error *error)
838 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
839 const struct rte_flow_item_port_id supp_mask = {
842 const void *def_mask = &rte_flow_item_port_id_mask;
843 const struct rte_flow_item_port_id *spec = NULL;
844 const struct rte_flow_item_port_id *mask = NULL;
845 efx_mport_sel_t mport_sel;
848 if (ctx_mae->match_mport_set) {
849 return rte_flow_error_set(error, ENOTSUP,
850 RTE_FLOW_ERROR_TYPE_ITEM, item,
851 "Can't handle multiple traffic source items");
854 rc = sfc_flow_parse_init(item,
855 (const void **)&spec, (const void **)&mask,
856 (const void *)&supp_mask, def_mask,
857 sizeof(struct rte_flow_item_port_id), error);
861 if (mask->id != supp_mask.id) {
862 return rte_flow_error_set(error, EINVAL,
863 RTE_FLOW_ERROR_TYPE_ITEM, item,
864 "Bad mask in the PORT_ID pattern item");
867 /* If "spec" is not set, could be any port ID */
871 if (spec->id > UINT16_MAX) {
872 return rte_flow_error_set(error, EOVERFLOW,
873 RTE_FLOW_ERROR_TYPE_ITEM, item,
874 "The port ID is too large");
877 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
878 spec->id, &mport_sel);
880 return rte_flow_error_set(error, rc,
881 RTE_FLOW_ERROR_TYPE_ITEM, item,
882 "Can't find RTE ethdev by the port ID");
885 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
888 return rte_flow_error_set(error, rc,
889 RTE_FLOW_ERROR_TYPE_ITEM, item,
890 "Failed to set MPORT for the port ID");
893 ctx_mae->match_mport_set = B_TRUE;
899 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
900 struct sfc_flow_parse_ctx *ctx,
901 struct rte_flow_error *error)
903 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
904 const struct rte_flow_item_phy_port supp_mask = {
907 const void *def_mask = &rte_flow_item_phy_port_mask;
908 const struct rte_flow_item_phy_port *spec = NULL;
909 const struct rte_flow_item_phy_port *mask = NULL;
910 efx_mport_sel_t mport_v;
913 if (ctx_mae->match_mport_set) {
914 return rte_flow_error_set(error, ENOTSUP,
915 RTE_FLOW_ERROR_TYPE_ITEM, item,
916 "Can't handle multiple traffic source items");
919 rc = sfc_flow_parse_init(item,
920 (const void **)&spec, (const void **)&mask,
921 (const void *)&supp_mask, def_mask,
922 sizeof(struct rte_flow_item_phy_port), error);
926 if (mask->index != supp_mask.index) {
927 return rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM, item,
929 "Bad mask in the PHY_PORT pattern item");
932 /* If "spec" is not set, could be any physical port */
936 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
938 return rte_flow_error_set(error, rc,
939 RTE_FLOW_ERROR_TYPE_ITEM, item,
940 "Failed to convert the PHY_PORT index");
943 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
945 return rte_flow_error_set(error, rc,
946 RTE_FLOW_ERROR_TYPE_ITEM, item,
947 "Failed to set MPORT for the PHY_PORT");
950 ctx_mae->match_mport_set = B_TRUE;
956 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
957 struct sfc_flow_parse_ctx *ctx,
958 struct rte_flow_error *error)
960 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
961 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
962 efx_mport_sel_t mport_v;
965 if (ctx_mae->match_mport_set) {
966 return rte_flow_error_set(error, ENOTSUP,
967 RTE_FLOW_ERROR_TYPE_ITEM, item,
968 "Can't handle multiple traffic source items");
971 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
974 return rte_flow_error_set(error, rc,
975 RTE_FLOW_ERROR_TYPE_ITEM, item,
976 "Failed to convert the PF ID");
979 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
981 return rte_flow_error_set(error, rc,
982 RTE_FLOW_ERROR_TYPE_ITEM, item,
983 "Failed to set MPORT for the PF");
986 ctx_mae->match_mport_set = B_TRUE;
992 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
993 struct sfc_flow_parse_ctx *ctx,
994 struct rte_flow_error *error)
996 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
997 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
998 const struct rte_flow_item_vf supp_mask = {
1001 const void *def_mask = &rte_flow_item_vf_mask;
1002 const struct rte_flow_item_vf *spec = NULL;
1003 const struct rte_flow_item_vf *mask = NULL;
1004 efx_mport_sel_t mport_v;
1007 if (ctx_mae->match_mport_set) {
1008 return rte_flow_error_set(error, ENOTSUP,
1009 RTE_FLOW_ERROR_TYPE_ITEM, item,
1010 "Can't handle multiple traffic source items");
1013 rc = sfc_flow_parse_init(item,
1014 (const void **)&spec, (const void **)&mask,
1015 (const void *)&supp_mask, def_mask,
1016 sizeof(struct rte_flow_item_vf), error);
1020 if (mask->id != supp_mask.id) {
1021 return rte_flow_error_set(error, EINVAL,
1022 RTE_FLOW_ERROR_TYPE_ITEM, item,
1023 "Bad mask in the VF pattern item");
1027 * If "spec" is not set, the item requests any VF related to the
1028 * PF of the current DPDK port (but not the PF itself).
1029 * Reject this match criterion as unsupported.
1032 return rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ITEM, item,
1034 "Bad spec in the VF pattern item");
1037 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1039 return rte_flow_error_set(error, rc,
1040 RTE_FLOW_ERROR_TYPE_ITEM, item,
1041 "Failed to convert the PF + VF IDs");
1044 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1046 return rte_flow_error_set(error, rc,
1047 RTE_FLOW_ERROR_TYPE_ITEM, item,
1048 "Failed to set MPORT for the PF + VF");
1051 ctx_mae->match_mport_set = B_TRUE;
1057 * Having this field ID in a field locator means that this
1058 * locator cannot be used to actually set the field at the
1059 * time when the corresponding item gets encountered. Such
1060 * fields get stashed in the parsing context instead. This
1061 * is required to resolve dependencies between the stashed
1062 * fields. See sfc_mae_rule_process_pattern_data().
1064 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1066 struct sfc_mae_field_locator {
1067 efx_mae_field_id_t field_id;
1069 /* Field offset in the corresponding rte_flow_item_ struct */
1074 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1075 unsigned int nb_field_locators, void *mask_ptr,
1080 memset(mask_ptr, 0, mask_size);
1082 for (i = 0; i < nb_field_locators; ++i) {
1083 const struct sfc_mae_field_locator *fl = &field_locators[i];
1085 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1086 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1091 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1092 unsigned int nb_field_locators, const uint8_t *spec,
1093 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1094 struct rte_flow_error *error)
1096 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1100 for (i = 0; i < nb_field_locators; ++i) {
1101 const struct sfc_mae_field_locator *fl = &field_locators[i];
1103 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1106 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1107 fremap[fl->field_id],
1108 fl->size, spec + fl->ofst,
1109 fl->size, mask + fl->ofst);
1115 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1116 NULL, "Failed to process item fields");
1122 static const struct sfc_mae_field_locator flocs_eth[] = {
1125 * This locator is used only for building supported fields mask.
1126 * The field is handled by sfc_mae_rule_process_pattern_data().
1128 SFC_MAE_FIELD_HANDLING_DEFERRED,
1129 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1130 offsetof(struct rte_flow_item_eth, type),
1133 EFX_MAE_FIELD_ETH_DADDR_BE,
1134 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1135 offsetof(struct rte_flow_item_eth, dst),
1138 EFX_MAE_FIELD_ETH_SADDR_BE,
1139 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1140 offsetof(struct rte_flow_item_eth, src),
1145 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1146 struct sfc_flow_parse_ctx *ctx,
1147 struct rte_flow_error *error)
1149 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1150 struct rte_flow_item_eth supp_mask;
1151 const uint8_t *spec = NULL;
1152 const uint8_t *mask = NULL;
1155 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1156 &supp_mask, sizeof(supp_mask));
1158 rc = sfc_flow_parse_init(item,
1159 (const void **)&spec, (const void **)&mask,
1160 (const void *)&supp_mask,
1161 &rte_flow_item_eth_mask,
1162 sizeof(struct rte_flow_item_eth), error);
1167 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1168 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1169 const struct rte_flow_item_eth *item_spec;
1170 const struct rte_flow_item_eth *item_mask;
1172 item_spec = (const struct rte_flow_item_eth *)spec;
1173 item_mask = (const struct rte_flow_item_eth *)mask;
1175 ethertypes[0].value = item_spec->type;
1176 ethertypes[0].mask = item_mask->type;
1179 * The specification is empty. This is wrong in the case
1180 * when there are more network patterns in line. Other
1181 * than that, any Ethernet can match. All of that is
1182 * checked at the end of parsing.
1187 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1191 static const struct sfc_mae_field_locator flocs_vlan[] = {
1194 EFX_MAE_FIELD_VLAN0_TCI_BE,
1195 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1196 offsetof(struct rte_flow_item_vlan, tci),
1200 * This locator is used only for building supported fields mask.
1201 * The field is handled by sfc_mae_rule_process_pattern_data().
1203 SFC_MAE_FIELD_HANDLING_DEFERRED,
1204 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1205 offsetof(struct rte_flow_item_vlan, inner_type),
1210 EFX_MAE_FIELD_VLAN1_TCI_BE,
1211 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1212 offsetof(struct rte_flow_item_vlan, tci),
1216 * This locator is used only for building supported fields mask.
1217 * The field is handled by sfc_mae_rule_process_pattern_data().
1219 SFC_MAE_FIELD_HANDLING_DEFERRED,
1220 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1221 offsetof(struct rte_flow_item_vlan, inner_type),
1226 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1227 struct sfc_flow_parse_ctx *ctx,
1228 struct rte_flow_error *error)
1230 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1231 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1232 const struct sfc_mae_field_locator *flocs;
1233 struct rte_flow_item_vlan supp_mask;
1234 const uint8_t *spec = NULL;
1235 const uint8_t *mask = NULL;
1236 unsigned int nb_flocs;
1239 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1241 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1242 return rte_flow_error_set(error, ENOTSUP,
1243 RTE_FLOW_ERROR_TYPE_ITEM, item,
1244 "Can't match that many VLAN tags");
1247 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1248 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1250 /* If parsing fails, this can remain incremented. */
1251 ++pdata->nb_vlan_tags;
1253 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1254 &supp_mask, sizeof(supp_mask));
1256 rc = sfc_flow_parse_init(item,
1257 (const void **)&spec, (const void **)&mask,
1258 (const void *)&supp_mask,
1259 &rte_flow_item_vlan_mask,
1260 sizeof(struct rte_flow_item_vlan), error);
1265 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1266 const struct rte_flow_item_vlan *item_spec;
1267 const struct rte_flow_item_vlan *item_mask;
1269 item_spec = (const struct rte_flow_item_vlan *)spec;
1270 item_mask = (const struct rte_flow_item_vlan *)mask;
1272 ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
1273 ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
1276 * The specification is empty. This is wrong in the case
1277 * when there are more network patterns in line. Other
1278 * than that, any Ethernet can match. All of that is
1279 * checked at the end of parsing.
1284 return sfc_mae_parse_item(flocs, nb_flocs, spec, mask, ctx_mae, error);
1287 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1289 EFX_MAE_FIELD_SRC_IP4_BE,
1290 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1291 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1294 EFX_MAE_FIELD_DST_IP4_BE,
1295 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1296 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1300 * This locator is used only for building supported fields mask.
1301 * The field is handled by sfc_mae_rule_process_pattern_data().
1303 SFC_MAE_FIELD_HANDLING_DEFERRED,
1304 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1305 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1308 EFX_MAE_FIELD_IP_TOS,
1309 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1310 hdr.type_of_service),
1311 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1314 EFX_MAE_FIELD_IP_TTL,
1315 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1316 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1321 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1322 struct sfc_flow_parse_ctx *ctx,
1323 struct rte_flow_error *error)
1325 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1326 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1327 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1328 struct rte_flow_item_ipv4 supp_mask;
1329 const uint8_t *spec = NULL;
1330 const uint8_t *mask = NULL;
1333 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1334 &supp_mask, sizeof(supp_mask));
1336 rc = sfc_flow_parse_init(item,
1337 (const void **)&spec, (const void **)&mask,
1338 (const void *)&supp_mask,
1339 &rte_flow_item_ipv4_mask,
1340 sizeof(struct rte_flow_item_ipv4), error);
1344 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1345 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1348 const struct rte_flow_item_ipv4 *item_spec;
1349 const struct rte_flow_item_ipv4 *item_mask;
1351 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1352 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1354 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1355 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1360 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1364 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1366 EFX_MAE_FIELD_SRC_IP6_BE,
1367 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1368 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1371 EFX_MAE_FIELD_DST_IP6_BE,
1372 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1373 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1377 * This locator is used only for building supported fields mask.
1378 * The field is handled by sfc_mae_rule_process_pattern_data().
1380 SFC_MAE_FIELD_HANDLING_DEFERRED,
1381 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1382 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1385 EFX_MAE_FIELD_IP_TTL,
1386 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1387 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1392 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1393 struct sfc_flow_parse_ctx *ctx,
1394 struct rte_flow_error *error)
1396 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1397 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1398 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1399 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1400 struct rte_flow_item_ipv6 supp_mask;
1401 const uint8_t *spec = NULL;
1402 const uint8_t *mask = NULL;
1403 rte_be32_t vtc_flow_be;
1409 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1410 &supp_mask, sizeof(supp_mask));
1412 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1413 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1415 rc = sfc_flow_parse_init(item,
1416 (const void **)&spec, (const void **)&mask,
1417 (const void *)&supp_mask,
1418 &rte_flow_item_ipv6_mask,
1419 sizeof(struct rte_flow_item_ipv6), error);
1423 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1424 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1427 const struct rte_flow_item_ipv6 *item_spec;
1428 const struct rte_flow_item_ipv6 *item_mask;
1430 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1431 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1433 pdata->l3_next_proto_value = item_spec->hdr.proto;
1434 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1439 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1444 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1445 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1446 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1448 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1449 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1450 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1452 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1453 fremap[EFX_MAE_FIELD_IP_TOS],
1454 sizeof(tc_value), &tc_value,
1455 sizeof(tc_mask), &tc_mask);
1457 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1458 NULL, "Failed to process item fields");
1464 static const struct sfc_mae_field_locator flocs_tcp[] = {
1466 EFX_MAE_FIELD_L4_SPORT_BE,
1467 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1468 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1471 EFX_MAE_FIELD_L4_DPORT_BE,
1472 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1473 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1476 EFX_MAE_FIELD_TCP_FLAGS_BE,
1478 * The values have been picked intentionally since the
1479 * target MAE field is oversize (16 bit). This mapping
1480 * relies on the fact that the MAE field is big-endian.
1482 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1483 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1484 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1489 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1490 struct sfc_flow_parse_ctx *ctx,
1491 struct rte_flow_error *error)
1493 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1494 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1495 struct rte_flow_item_tcp supp_mask;
1496 const uint8_t *spec = NULL;
1497 const uint8_t *mask = NULL;
1501 * When encountered among outermost items, item TCP is invalid.
1502 * Check which match specification is being constructed now.
1504 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1505 return rte_flow_error_set(error, EINVAL,
1506 RTE_FLOW_ERROR_TYPE_ITEM, item,
1507 "TCP in outer frame is invalid");
1510 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1511 &supp_mask, sizeof(supp_mask));
1513 rc = sfc_flow_parse_init(item,
1514 (const void **)&spec, (const void **)&mask,
1515 (const void *)&supp_mask,
1516 &rte_flow_item_tcp_mask,
1517 sizeof(struct rte_flow_item_tcp), error);
1521 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1522 pdata->l3_next_proto_restriction_mask = 0xff;
1527 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1531 static const struct sfc_mae_field_locator flocs_udp[] = {
1533 EFX_MAE_FIELD_L4_SPORT_BE,
1534 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1535 offsetof(struct rte_flow_item_udp, hdr.src_port),
1538 EFX_MAE_FIELD_L4_DPORT_BE,
1539 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1540 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1545 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1546 struct sfc_flow_parse_ctx *ctx,
1547 struct rte_flow_error *error)
1549 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1550 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1551 struct rte_flow_item_udp supp_mask;
1552 const uint8_t *spec = NULL;
1553 const uint8_t *mask = NULL;
1556 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1557 &supp_mask, sizeof(supp_mask));
1559 rc = sfc_flow_parse_init(item,
1560 (const void **)&spec, (const void **)&mask,
1561 (const void *)&supp_mask,
1562 &rte_flow_item_udp_mask,
1563 sizeof(struct rte_flow_item_udp), error);
1567 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1568 pdata->l3_next_proto_restriction_mask = 0xff;
1573 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1577 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1580 * The size and offset values are relevant
1581 * for Geneve and NVGRE, too.
1583 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1584 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1589 * An auxiliary registry which allows using non-encap. field IDs
1590 * directly when building a match specification of type ACTION.
1592 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1594 static const efx_mae_field_id_t field_ids_no_remap[] = {
1595 #define FIELD_ID_NO_REMAP(_field) \
1596 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1598 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1599 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1600 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1601 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1602 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1603 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1604 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1605 FIELD_ID_NO_REMAP(SRC_IP4_BE),
1606 FIELD_ID_NO_REMAP(DST_IP4_BE),
1607 FIELD_ID_NO_REMAP(IP_PROTO),
1608 FIELD_ID_NO_REMAP(IP_TOS),
1609 FIELD_ID_NO_REMAP(IP_TTL),
1610 FIELD_ID_NO_REMAP(SRC_IP6_BE),
1611 FIELD_ID_NO_REMAP(DST_IP6_BE),
1612 FIELD_ID_NO_REMAP(L4_SPORT_BE),
1613 FIELD_ID_NO_REMAP(L4_DPORT_BE),
1614 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1616 #undef FIELD_ID_NO_REMAP
1620 * An auxiliary registry which allows using "ENC" field IDs
1621 * when building a match specification of type OUTER.
1623 * See sfc_mae_rule_encap_parse_init().
1625 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1626 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1627 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1629 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1630 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1631 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1632 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1633 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1634 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1635 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1636 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1637 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1638 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1639 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1640 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1641 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1642 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1643 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1644 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1646 #undef FIELD_ID_REMAP_TO_ENCAP
1650 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1651 struct sfc_flow_parse_ctx *ctx,
1652 struct rte_flow_error *error)
1654 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1655 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1656 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1657 const struct rte_flow_item_vxlan *vxp;
1658 uint8_t supp_mask[sizeof(uint64_t)];
1659 const uint8_t *spec = NULL;
1660 const uint8_t *mask = NULL;
1664 * We're about to start processing inner frame items.
1665 * Process pattern data that has been deferred so far
1666 * and reset pattern data storage.
1668 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1672 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1674 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1675 &supp_mask, sizeof(supp_mask));
1678 * This tunnel item was preliminarily detected by
1679 * sfc_mae_rule_encap_parse_init(). Default mask
1680 * was also picked by that helper. Use it here.
1682 rc = sfc_flow_parse_init(item,
1683 (const void **)&spec, (const void **)&mask,
1684 (const void *)&supp_mask,
1685 ctx_mae->tunnel_def_mask,
1686 ctx_mae->tunnel_def_mask_size, error);
1691 * This item and later ones comprise a
1692 * match specification of type ACTION.
1694 ctx_mae->match_spec = ctx_mae->match_spec_action;
1696 /* This item and later ones use non-encap. EFX MAE field IDs. */
1697 ctx_mae->field_ids_remap = field_ids_no_remap;
1703 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1704 * Copy 24-bit VNI, which is BE, at offset 1 in it.
1705 * The extra byte is 0 both in the mask and in the value.
1707 vxp = (const struct rte_flow_item_vxlan *)spec;
1708 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1710 vxp = (const struct rte_flow_item_vxlan *)mask;
1711 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1713 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1714 EFX_MAE_FIELD_ENC_VNET_ID_BE,
1715 sizeof(vnet_id_v), vnet_id_v,
1716 sizeof(vnet_id_m), vnet_id_m);
1718 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1719 item, "Failed to set VXLAN VNI");
1725 static const struct sfc_flow_item sfc_flow_items[] = {
1727 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1729 * In terms of RTE flow, this item is a META one,
1730 * and its position in the pattern is don't care.
1732 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1733 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1734 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1735 .parse = sfc_mae_rule_parse_item_port_id,
1738 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1740 * In terms of RTE flow, this item is a META one,
1741 * and its position in the pattern is don't care.
1743 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1744 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1745 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1746 .parse = sfc_mae_rule_parse_item_phy_port,
1749 .type = RTE_FLOW_ITEM_TYPE_PF,
1751 * In terms of RTE flow, this item is a META one,
1752 * and its position in the pattern is don't care.
1754 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1755 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1756 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1757 .parse = sfc_mae_rule_parse_item_pf,
1760 .type = RTE_FLOW_ITEM_TYPE_VF,
1762 * In terms of RTE flow, this item is a META one,
1763 * and its position in the pattern is don't care.
1765 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1766 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1767 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1768 .parse = sfc_mae_rule_parse_item_vf,
1771 .type = RTE_FLOW_ITEM_TYPE_ETH,
1772 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1773 .layer = SFC_FLOW_ITEM_L2,
1774 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1775 .parse = sfc_mae_rule_parse_item_eth,
1778 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1779 .prev_layer = SFC_FLOW_ITEM_L2,
1780 .layer = SFC_FLOW_ITEM_L2,
1781 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1782 .parse = sfc_mae_rule_parse_item_vlan,
1785 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1786 .prev_layer = SFC_FLOW_ITEM_L2,
1787 .layer = SFC_FLOW_ITEM_L3,
1788 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1789 .parse = sfc_mae_rule_parse_item_ipv4,
1792 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1793 .prev_layer = SFC_FLOW_ITEM_L2,
1794 .layer = SFC_FLOW_ITEM_L3,
1795 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1796 .parse = sfc_mae_rule_parse_item_ipv6,
1799 .type = RTE_FLOW_ITEM_TYPE_TCP,
1800 .prev_layer = SFC_FLOW_ITEM_L3,
1801 .layer = SFC_FLOW_ITEM_L4,
1802 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1803 .parse = sfc_mae_rule_parse_item_tcp,
1806 .type = RTE_FLOW_ITEM_TYPE_UDP,
1807 .prev_layer = SFC_FLOW_ITEM_L3,
1808 .layer = SFC_FLOW_ITEM_L4,
1809 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1810 .parse = sfc_mae_rule_parse_item_udp,
1813 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1814 .prev_layer = SFC_FLOW_ITEM_L4,
1815 .layer = SFC_FLOW_ITEM_START_LAYER,
1816 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1817 .parse = sfc_mae_rule_parse_item_tunnel,
1820 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1821 .prev_layer = SFC_FLOW_ITEM_L4,
1822 .layer = SFC_FLOW_ITEM_START_LAYER,
1823 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1824 .parse = sfc_mae_rule_parse_item_tunnel,
1827 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1828 .prev_layer = SFC_FLOW_ITEM_L3,
1829 .layer = SFC_FLOW_ITEM_START_LAYER,
1830 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1831 .parse = sfc_mae_rule_parse_item_tunnel,
1836 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
1837 struct sfc_mae_parse_ctx *ctx,
1838 struct sfc_mae_outer_rule **rulep,
1839 struct rte_flow_error *error)
1841 struct sfc_mae_outer_rule *rule;
1844 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
1849 SFC_ASSERT(ctx->match_spec_outer != NULL);
1851 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
1852 return rte_flow_error_set(error, ENOTSUP,
1853 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1854 "Inconsistent pattern (outer)");
1857 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
1859 if (*rulep != NULL) {
1860 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1862 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
1863 ctx->encap_type, rulep);
1865 return rte_flow_error_set(error, rc,
1866 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1867 "Failed to process the pattern");
1871 /* The spec has now been tracked by the outer rule entry. */
1872 ctx->match_spec_outer = NULL;
1875 * Depending on whether we reuse an existing outer rule or create a
1876 * new one (see above), outer rule ID is either a valid value or
1877 * EFX_MAE_RSRC_ID_INVALID. Set it in the action rule match
1878 * specification (and the full mask, too) in order to have correct
1879 * class comparisons of the new rule with existing ones.
1880 * Also, action rule match specification will be validated shortly,
1881 * and having the full mask set for outer rule ID indicates that we
1882 * will use this field, and support for this field has to be checked.
1885 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
1886 &rule->fw_rsrc.rule_id);
1888 sfc_mae_outer_rule_del(sa, *rulep);
1891 return rte_flow_error_set(error, rc,
1892 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1893 "Failed to process the pattern");
1900 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
1901 const struct rte_flow_item pattern[],
1902 struct sfc_mae_parse_ctx *ctx,
1903 struct rte_flow_error *error)
1905 struct sfc_mae *mae = &sa->mae;
1908 if (pattern == NULL) {
1909 rte_flow_error_set(error, EINVAL,
1910 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1916 switch (pattern->type) {
1917 case RTE_FLOW_ITEM_TYPE_VXLAN:
1918 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
1919 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
1920 ctx->tunnel_def_mask_size =
1921 sizeof(rte_flow_item_vxlan_mask);
1923 case RTE_FLOW_ITEM_TYPE_GENEVE:
1924 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
1925 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
1926 ctx->tunnel_def_mask_size =
1927 sizeof(rte_flow_item_geneve_mask);
1929 case RTE_FLOW_ITEM_TYPE_NVGRE:
1930 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1931 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
1932 ctx->tunnel_def_mask_size =
1933 sizeof(rte_flow_item_nvgre_mask);
1935 case RTE_FLOW_ITEM_TYPE_END:
1945 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
1948 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
1949 return rte_flow_error_set(error, ENOTSUP,
1950 RTE_FLOW_ERROR_TYPE_ITEM,
1951 pattern, "Unsupported tunnel item");
1954 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
1955 return rte_flow_error_set(error, ENOTSUP,
1956 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1957 NULL, "Unsupported priority level");
1960 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
1961 &ctx->match_spec_outer);
1963 return rte_flow_error_set(error, rc,
1964 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1965 "Failed to initialise outer rule match specification");
1968 /* Outermost items comprise a match specification of type OUTER. */
1969 ctx->match_spec = ctx->match_spec_outer;
1971 /* Outermost items use "ENC" EFX MAE field IDs. */
1972 ctx->field_ids_remap = field_ids_remap_to_encap;
1978 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
1979 struct sfc_mae_parse_ctx *ctx)
1981 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1984 if (ctx->match_spec_outer != NULL)
1985 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1989 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1990 const struct rte_flow_item pattern[],
1991 struct sfc_flow_spec_mae *spec,
1992 struct rte_flow_error *error)
1994 struct sfc_mae_parse_ctx ctx_mae;
1995 struct sfc_flow_parse_ctx ctx;
1998 memset(&ctx_mae, 0, sizeof(ctx_mae));
1999 ctx_mae.priority = spec->priority;
2002 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2004 &ctx_mae.match_spec_action);
2006 rc = rte_flow_error_set(error, rc,
2007 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2008 "Failed to initialise action rule match specification");
2009 goto fail_init_match_spec_action;
2013 * As a preliminary setting, assume that there is no encapsulation
2014 * in the pattern. That is, pattern items are about to comprise a
2015 * match specification of type ACTION and use non-encap. field IDs.
2017 * sfc_mae_rule_encap_parse_init() below may override this.
2019 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2020 ctx_mae.match_spec = ctx_mae.match_spec_action;
2021 ctx_mae.field_ids_remap = field_ids_no_remap;
2023 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2026 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2028 goto fail_encap_parse_init;
2030 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2031 pattern, &ctx, error);
2033 goto fail_parse_pattern;
2035 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2037 goto fail_process_pattern_data;
2039 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2041 goto fail_process_outer;
2043 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2044 rc = rte_flow_error_set(error, ENOTSUP,
2045 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2046 "Inconsistent pattern");
2047 goto fail_validate_match_spec_action;
2050 spec->match_spec = ctx_mae.match_spec_action;
2054 fail_validate_match_spec_action:
2056 fail_process_pattern_data:
2058 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2060 fail_encap_parse_init:
2061 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2063 fail_init_match_spec_action:
2068 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2069 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2070 * That is, related RTE flow actions need to be tracked as parts of a whole
2071 * so that they can be combined into a single action and submitted to MAE
2072 * representation of a given rule's action set.
2074 * Each RTE flow action provided by an application gets classified as
2075 * one belonging to some bundle type. If an action is not supposed to
2076 * belong to any bundle, or if this action is END, it is described as
2077 * one belonging to a dummy bundle of type EMPTY.
2079 * A currently tracked bundle will be submitted if a repeating
2080 * action or an action of different bundle type follows.
2083 enum sfc_mae_actions_bundle_type {
2084 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2085 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2088 struct sfc_mae_actions_bundle {
2089 enum sfc_mae_actions_bundle_type type;
2091 /* Indicates actions already tracked by the current bundle */
2092 uint64_t actions_mask;
2094 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2095 rte_be16_t vlan_push_tpid;
2096 rte_be16_t vlan_push_tci;
2100 * Combine configuration of RTE flow actions tracked by the bundle into a
2101 * single action and submit the result to MAE action set specification.
2102 * Do nothing in the case of dummy action bundle.
2105 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2106 efx_mae_actions_t *spec)
2110 switch (bundle->type) {
2111 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2113 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2114 rc = efx_mae_action_set_populate_vlan_push(
2115 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2118 SFC_ASSERT(B_FALSE);
2126 * Given the type of the next RTE flow action in the line, decide
2127 * whether a new bundle is about to start, and, if this is the case,
2128 * submit and reset the current bundle.
2131 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2132 struct sfc_mae_actions_bundle *bundle,
2133 efx_mae_actions_t *spec,
2134 struct rte_flow_error *error)
2136 enum sfc_mae_actions_bundle_type bundle_type_new;
2139 switch (action->type) {
2140 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2141 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2142 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2143 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2147 * Self-sufficient actions, including END, are handled in this
2148 * case. No checks for unsupported actions are needed here
2149 * because parsing doesn't occur at this point.
2151 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2155 if (bundle_type_new != bundle->type ||
2156 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2157 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2161 memset(bundle, 0, sizeof(*bundle));
2164 bundle->type = bundle_type_new;
2169 return rte_flow_error_set(error, rc,
2170 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2171 "Failed to request the (group of) action(s)");
2175 sfc_mae_rule_parse_action_of_push_vlan(
2176 const struct rte_flow_action_of_push_vlan *conf,
2177 struct sfc_mae_actions_bundle *bundle)
2179 bundle->vlan_push_tpid = conf->ethertype;
2183 sfc_mae_rule_parse_action_of_set_vlan_vid(
2184 const struct rte_flow_action_of_set_vlan_vid *conf,
2185 struct sfc_mae_actions_bundle *bundle)
2187 bundle->vlan_push_tci |= (conf->vlan_vid &
2188 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2192 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2193 const struct rte_flow_action_of_set_vlan_pcp *conf,
2194 struct sfc_mae_actions_bundle *bundle)
2196 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2197 RTE_LEN2MASK(3, uint8_t)) << 13;
2199 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2202 struct sfc_mae_parsed_item {
2203 const struct rte_flow_item *item;
2204 size_t proto_header_ofst;
2205 size_t proto_header_size;
2209 * For each 16-bit word of the given header, override
2210 * bits enforced by the corresponding 16-bit mask.
2213 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2214 const struct sfc_mae_parsed_item *parsed_items,
2215 unsigned int nb_parsed_items)
2217 unsigned int item_idx;
2219 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2220 const struct sfc_mae_parsed_item *parsed_item;
2221 const struct rte_flow_item *item;
2222 size_t proto_header_size;
2225 parsed_item = &parsed_items[item_idx];
2226 proto_header_size = parsed_item->proto_header_size;
2227 item = parsed_item->item;
2229 for (ofst = 0; ofst < proto_header_size;
2230 ofst += sizeof(rte_be16_t)) {
2231 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2232 const rte_be16_t *w_maskp;
2233 const rte_be16_t *w_specp;
2235 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2236 w_specp = RTE_PTR_ADD(item->spec, ofst);
2239 *wp |= (*w_specp & *w_maskp);
2242 header_buf += proto_header_size;
2246 #define SFC_IPV4_TTL_DEF 0x40
2247 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
2248 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2249 #define SFC_VXLAN_FLAGS_DEF 0x08000000
2252 sfc_mae_rule_parse_action_vxlan_encap(
2253 struct sfc_mae *mae,
2254 const struct rte_flow_action_vxlan_encap *conf,
2255 efx_mae_actions_t *spec,
2256 struct rte_flow_error *error)
2258 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2259 struct rte_flow_item *pattern = conf->definition;
2260 uint8_t *buf = bounce_eh->buf;
2262 /* This array will keep track of non-VOID pattern items. */
2263 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2265 1 /* IPv4 or IPv6 */ +
2268 unsigned int nb_parsed_items = 0;
2270 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2271 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2272 sizeof(struct rte_ipv6_hdr))];
2273 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2274 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2275 struct rte_vxlan_hdr *vxlan = NULL;
2276 struct rte_udp_hdr *udp = NULL;
2277 unsigned int nb_vlan_tags = 0;
2278 size_t next_proto_ofst = 0;
2279 size_t ethertype_ofst = 0;
2282 if (pattern == NULL) {
2283 return rte_flow_error_set(error, EINVAL,
2284 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2285 "The encap. header definition is NULL");
2288 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2289 bounce_eh->size = 0;
2292 * Process pattern items and remember non-VOID ones.
2293 * Defer applying masks until after the complete header
2294 * has been built from the pattern items.
2296 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2298 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2299 struct sfc_mae_parsed_item *parsed_item;
2300 const uint64_t exp_items_extra_vlan[] = {
2301 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2303 size_t proto_header_size;
2304 rte_be16_t *ethertypep;
2305 uint8_t *next_protop;
2308 if (pattern->spec == NULL) {
2309 return rte_flow_error_set(error, EINVAL,
2310 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2311 "NULL item spec in the encap. header");
2314 if (pattern->mask == NULL) {
2315 return rte_flow_error_set(error, EINVAL,
2316 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2317 "NULL item mask in the encap. header");
2320 if (pattern->last != NULL) {
2321 /* This is not a match pattern, so disallow range. */
2322 return rte_flow_error_set(error, EINVAL,
2323 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2324 "Range item in the encap. header");
2327 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2328 /* Handle VOID separately, for clarity. */
2332 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2333 return rte_flow_error_set(error, ENOTSUP,
2334 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2335 "Unexpected item in the encap. header");
2338 parsed_item = &parsed_items[nb_parsed_items];
2339 buf_cur = buf + bounce_eh->size;
2341 switch (pattern->type) {
2342 case RTE_FLOW_ITEM_TYPE_ETH:
2343 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2345 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2348 proto_header_size = sizeof(struct rte_ether_hdr);
2350 ethertype_ofst = eth_ethertype_ofst;
2352 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2353 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2354 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2356 case RTE_FLOW_ITEM_TYPE_VLAN:
2357 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2359 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2362 proto_header_size = sizeof(struct rte_vlan_hdr);
2364 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2365 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2367 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2368 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2372 offsetof(struct rte_vlan_hdr, eth_proto);
2374 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2375 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2376 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2380 case RTE_FLOW_ITEM_TYPE_IPV4:
2381 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2383 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2386 proto_header_size = sizeof(struct rte_ipv4_hdr);
2388 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2389 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2393 offsetof(struct rte_ipv4_hdr, next_proto_id);
2395 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2397 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2399 case RTE_FLOW_ITEM_TYPE_IPV6:
2400 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2402 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2405 proto_header_size = sizeof(struct rte_ipv6_hdr);
2407 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2408 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2410 next_proto_ofst = bounce_eh->size +
2411 offsetof(struct rte_ipv6_hdr, proto);
2413 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2415 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2417 case RTE_FLOW_ITEM_TYPE_UDP:
2418 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2420 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2423 proto_header_size = sizeof(struct rte_udp_hdr);
2425 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2426 *next_protop = IPPROTO_UDP;
2428 udp = (struct rte_udp_hdr *)buf_cur;
2430 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2432 case RTE_FLOW_ITEM_TYPE_VXLAN:
2433 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2435 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2438 proto_header_size = sizeof(struct rte_vxlan_hdr);
2440 vxlan = (struct rte_vxlan_hdr *)buf_cur;
2442 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2443 udp->dgram_len = RTE_BE16(sizeof(*udp) +
2445 udp->dgram_cksum = 0;
2450 return rte_flow_error_set(error, ENOTSUP,
2451 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2452 "Unknown item in the encap. header");
2455 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2456 return rte_flow_error_set(error, E2BIG,
2457 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2458 "The encap. header is too big");
2461 if ((proto_header_size & 1) != 0) {
2462 return rte_flow_error_set(error, EINVAL,
2463 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2464 "Odd layer size in the encap. header");
2467 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2468 bounce_eh->size += proto_header_size;
2470 parsed_item->item = pattern;
2471 parsed_item->proto_header_size = proto_header_size;
2475 if (exp_items != 0) {
2476 /* Parsing item VXLAN would have reset exp_items to 0. */
2477 return rte_flow_error_set(error, ENOTSUP,
2478 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2479 "No item VXLAN in the encap. header");
2482 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2483 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2484 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2485 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2487 /* The HW cannot compute this checksum. */
2488 ipv4->hdr_checksum = 0;
2489 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2491 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2492 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2493 ipv6->payload_len = udp->dgram_len;
2495 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2497 /* Take care of the masks. */
2498 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2500 return (spec != NULL) ? efx_mae_action_set_populate_encap(spec) : 0;
2504 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
2505 efx_mae_actions_t *spec)
2507 return efx_mae_action_set_populate_mark(spec, conf->id);
2511 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2512 const struct rte_flow_action_phy_port *conf,
2513 efx_mae_actions_t *spec)
2515 efx_mport_sel_t mport;
2519 if (conf->original != 0)
2520 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2522 phy_port = conf->index;
2524 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2528 return efx_mae_action_set_populate_deliver(spec, &mport);
2532 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
2533 const struct rte_flow_action_vf *vf_conf,
2534 efx_mae_actions_t *spec)
2536 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2537 efx_mport_sel_t mport;
2541 if (vf_conf == NULL)
2542 vf = EFX_PCI_VF_INVALID;
2543 else if (vf_conf->original != 0)
2548 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
2552 return efx_mae_action_set_populate_deliver(spec, &mport);
2556 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
2557 const struct rte_flow_action_port_id *conf,
2558 efx_mae_actions_t *spec)
2560 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2561 struct sfc_mae *mae = &sa->mae;
2562 efx_mport_sel_t mport;
2566 port_id = (conf->original != 0) ? sas->port_id : conf->id;
2568 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2573 return efx_mae_action_set_populate_deliver(spec, &mport);
2577 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2578 const struct rte_flow_action *action,
2579 const struct sfc_mae_outer_rule *outer_rule,
2580 struct sfc_mae_actions_bundle *bundle,
2581 efx_mae_actions_t *spec,
2582 struct rte_flow_error *error)
2584 bool custom_error = B_FALSE;
2587 switch (action->type) {
2588 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2589 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
2590 bundle->actions_mask);
2591 if (outer_rule == NULL ||
2592 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
2595 rc = efx_mae_action_set_populate_decap(spec);
2597 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2598 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2599 bundle->actions_mask);
2600 rc = efx_mae_action_set_populate_vlan_pop(spec);
2602 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2603 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2604 bundle->actions_mask);
2605 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2607 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2608 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2609 bundle->actions_mask);
2610 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2612 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2613 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2614 bundle->actions_mask);
2615 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2617 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2618 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
2619 bundle->actions_mask);
2620 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
2623 custom_error = B_TRUE;
2625 case RTE_FLOW_ACTION_TYPE_FLAG:
2626 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2627 bundle->actions_mask);
2628 rc = efx_mae_action_set_populate_flag(spec);
2630 case RTE_FLOW_ACTION_TYPE_MARK:
2631 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2632 bundle->actions_mask);
2633 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2635 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2636 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2637 bundle->actions_mask);
2638 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2640 case RTE_FLOW_ACTION_TYPE_PF:
2641 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2642 bundle->actions_mask);
2643 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2645 case RTE_FLOW_ACTION_TYPE_VF:
2646 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2647 bundle->actions_mask);
2648 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2650 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2651 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2652 bundle->actions_mask);
2653 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2655 case RTE_FLOW_ACTION_TYPE_DROP:
2656 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2657 bundle->actions_mask);
2658 rc = efx_mae_action_set_populate_drop(spec);
2661 return rte_flow_error_set(error, ENOTSUP,
2662 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663 "Unsupported action");
2667 bundle->actions_mask |= (1ULL << action->type);
2668 } else if (!custom_error) {
2669 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2670 NULL, "Failed to request the action");
2677 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
2679 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
2683 sfc_mae_process_encap_header(struct sfc_adapter *sa,
2684 const struct sfc_mae_bounce_eh *bounce_eh,
2685 struct sfc_mae_encap_header **encap_headerp)
2687 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
2688 encap_headerp = NULL;
2692 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
2693 if (*encap_headerp != NULL)
2696 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
2700 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
2701 const struct rte_flow_action actions[],
2702 struct sfc_flow_spec_mae *spec_mae,
2703 struct rte_flow_error *error)
2705 struct sfc_mae_encap_header *encap_header = NULL;
2706 struct sfc_mae_actions_bundle bundle = {0};
2707 const struct rte_flow_action *action;
2708 struct sfc_mae *mae = &sa->mae;
2709 efx_mae_actions_t *spec;
2714 if (actions == NULL) {
2715 return rte_flow_error_set(error, EINVAL,
2716 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
2720 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
2722 goto fail_action_set_spec_init;
2724 /* Cleanup after previous encap. header bounce buffer usage. */
2725 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
2727 for (action = actions;
2728 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
2729 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2731 goto fail_rule_parse_action;
2733 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
2734 &bundle, spec, error);
2736 goto fail_rule_parse_action;
2739 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2741 goto fail_rule_parse_action;
2743 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
2745 goto fail_process_encap_header;
2747 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
2749 if (spec_mae->action_set != NULL) {
2750 sfc_mae_encap_header_del(sa, encap_header);
2751 efx_mae_action_set_spec_fini(sa->nic, spec);
2755 rc = sfc_mae_action_set_add(sa, spec, encap_header,
2756 &spec_mae->action_set);
2758 goto fail_action_set_add;
2762 fail_action_set_add:
2763 sfc_mae_encap_header_del(sa, encap_header);
2765 fail_process_encap_header:
2766 fail_rule_parse_action:
2767 efx_mae_action_set_spec_fini(sa->nic, spec);
2769 fail_action_set_spec_init:
2770 if (rc > 0 && rte_errno == 0) {
2771 rc = rte_flow_error_set(error, rc,
2772 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2773 NULL, "Failed to process the action");
2779 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
2780 const efx_mae_match_spec_t *left,
2781 const efx_mae_match_spec_t *right)
2783 bool have_same_class;
2786 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
2789 return (rc == 0) ? have_same_class : false;
2793 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
2794 struct sfc_mae_outer_rule *rule)
2796 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
2797 struct sfc_mae_outer_rule *entry;
2798 struct sfc_mae *mae = &sa->mae;
2800 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
2801 /* An active rule is reused. It's class is wittingly valid. */
2805 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
2806 sfc_mae_outer_rules, entries) {
2807 const efx_mae_match_spec_t *left = entry->match_spec;
2808 const efx_mae_match_spec_t *right = rule->match_spec;
2813 if (sfc_mae_rules_class_cmp(sa, left, right))
2817 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2818 "support for outer frame pattern items is not guaranteed; "
2819 "other than that, the items are valid from SW standpoint");
2824 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
2825 struct sfc_flow_spec_mae *spec)
2827 const struct rte_flow *entry;
2829 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
2830 const struct sfc_flow_spec *entry_spec = &entry->spec;
2831 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
2832 const efx_mae_match_spec_t *left = es_mae->match_spec;
2833 const efx_mae_match_spec_t *right = spec->match_spec;
2835 switch (entry_spec->type) {
2836 case SFC_FLOW_SPEC_FILTER:
2837 /* Ignore VNIC-level flows */
2839 case SFC_FLOW_SPEC_MAE:
2840 if (sfc_mae_rules_class_cmp(sa, left, right))
2848 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2849 "support for inner frame pattern items is not guaranteed; "
2850 "other than that, the items are valid from SW standpoint");
2855 * Confirm that a given flow can be accepted by the FW.
2858 * Software adapter context
2860 * Flow to be verified
2862 * Zero on success and non-zero in the case of error.
2863 * A special value of EAGAIN indicates that the adapter is
2864 * not in started state. This state is compulsory because
2865 * it only makes sense to compare the rule class of the flow
2866 * being validated with classes of the active rules.
2867 * Such classes are wittingly supported by the FW.
2870 sfc_mae_flow_verify(struct sfc_adapter *sa,
2871 struct rte_flow *flow)
2873 struct sfc_flow_spec *spec = &flow->spec;
2874 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2875 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2878 SFC_ASSERT(sfc_adapter_is_locked(sa));
2880 if (sa->state != SFC_ADAPTER_STARTED)
2883 if (outer_rule != NULL) {
2884 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
2889 return sfc_mae_action_rule_class_verify(sa, spec_mae);
2893 sfc_mae_flow_insert(struct sfc_adapter *sa,
2894 struct rte_flow *flow)
2896 struct sfc_flow_spec *spec = &flow->spec;
2897 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2898 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2899 struct sfc_mae_action_set *action_set = spec_mae->action_set;
2900 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
2903 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
2904 SFC_ASSERT(action_set != NULL);
2906 if (outer_rule != NULL) {
2907 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
2908 spec_mae->match_spec);
2910 goto fail_outer_rule_enable;
2913 rc = sfc_mae_action_set_enable(sa, action_set);
2915 goto fail_action_set_enable;
2917 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
2918 NULL, &fw_rsrc->aset_id,
2919 &spec_mae->rule_id);
2921 goto fail_action_rule_insert;
2923 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
2924 flow, spec_mae->rule_id.id);
2928 fail_action_rule_insert:
2929 sfc_mae_action_set_disable(sa, action_set);
2931 fail_action_set_enable:
2932 if (outer_rule != NULL)
2933 sfc_mae_outer_rule_disable(sa, outer_rule);
2935 fail_outer_rule_enable:
2940 sfc_mae_flow_remove(struct sfc_adapter *sa,
2941 struct rte_flow *flow)
2943 struct sfc_flow_spec *spec = &flow->spec;
2944 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2945 struct sfc_mae_action_set *action_set = spec_mae->action_set;
2946 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2949 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
2950 SFC_ASSERT(action_set != NULL);
2952 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
2954 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
2955 flow, spec_mae->rule_id.id, strerror(rc));
2957 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
2958 flow, spec_mae->rule_id.id);
2959 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
2961 sfc_mae_action_set_disable(sa, action_set);
2963 if (outer_rule != NULL)
2964 sfc_mae_outer_rule_disable(sa, outer_rule);