1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
19 #include "sfc_mae_counter.h"
21 #include "sfc_switch.h"
22 #include "sfc_service.h"
25 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
26 efx_mport_sel_t *mportp)
28 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
30 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
35 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
36 uint32_t nb_counters_max)
38 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
42 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
44 sfc_mae_counters_fini(®istry->counters);
48 sfc_mae_attach(struct sfc_adapter *sa)
50 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
51 struct sfc_mae_switch_port_request switch_port_request = {0};
52 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
53 efx_mport_sel_t entity_mport;
54 struct sfc_mae *mae = &sa->mae;
55 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
56 efx_mae_limits_t limits;
59 sfc_log_init(sa, "entry");
61 if (!encp->enc_mae_supported) {
62 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
66 sfc_log_init(sa, "init MAE");
67 rc = efx_mae_init(sa->nic);
71 sfc_log_init(sa, "get MAE limits");
72 rc = efx_mae_get_limits(sa->nic, &limits);
74 goto fail_mae_get_limits;
76 sfc_log_init(sa, "init MAE counter registry");
77 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
78 limits.eml_max_n_counters);
80 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
81 limits.eml_max_n_counters, rte_strerror(rc));
82 goto fail_counter_registry_init;
85 sfc_log_init(sa, "assign entity MPORT");
86 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
88 goto fail_mae_assign_entity_mport;
90 sfc_log_init(sa, "assign RTE switch domain");
91 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
93 goto fail_mae_assign_switch_domain;
95 sfc_log_init(sa, "assign RTE switch port");
96 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
97 switch_port_request.entity_mportp = &entity_mport;
99 * As of now, the driver does not support representors, so
100 * RTE ethdev MPORT simply matches that of the entity.
102 switch_port_request.ethdev_mportp = &entity_mport;
103 switch_port_request.ethdev_port_id = sas->port_id;
104 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
105 &switch_port_request,
106 &mae->switch_port_id);
108 goto fail_mae_assign_switch_port;
110 sfc_log_init(sa, "allocate encap. header bounce buffer");
111 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
112 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
113 bounce_eh->buf_size, 0);
114 if (bounce_eh->buf == NULL)
115 goto fail_mae_alloc_bounce_eh;
117 mae->status = SFC_MAE_STATUS_SUPPORTED;
118 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
119 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
120 mae->encap_types_supported = limits.eml_encap_types_supported;
121 TAILQ_INIT(&mae->outer_rules);
122 TAILQ_INIT(&mae->encap_headers);
123 TAILQ_INIT(&mae->action_sets);
125 sfc_log_init(sa, "done");
129 fail_mae_alloc_bounce_eh:
130 fail_mae_assign_switch_port:
131 fail_mae_assign_switch_domain:
132 fail_mae_assign_entity_mport:
133 sfc_mae_counter_registry_fini(&mae->counter_registry);
135 fail_counter_registry_init:
137 efx_mae_fini(sa->nic);
140 sfc_log_init(sa, "failed %d", rc);
146 sfc_mae_detach(struct sfc_adapter *sa)
148 struct sfc_mae *mae = &sa->mae;
149 enum sfc_mae_status status_prev = mae->status;
151 sfc_log_init(sa, "entry");
153 mae->nb_action_rule_prios_max = 0;
154 mae->status = SFC_MAE_STATUS_UNKNOWN;
156 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
159 rte_free(mae->bounce_eh.buf);
160 sfc_mae_counter_registry_fini(&mae->counter_registry);
162 efx_mae_fini(sa->nic);
164 sfc_log_init(sa, "done");
167 static struct sfc_mae_outer_rule *
168 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
169 const efx_mae_match_spec_t *match_spec,
170 efx_tunnel_protocol_t encap_type)
172 struct sfc_mae_outer_rule *rule;
173 struct sfc_mae *mae = &sa->mae;
175 SFC_ASSERT(sfc_adapter_is_locked(sa));
177 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
178 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
179 rule->encap_type == encap_type) {
180 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
190 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
191 efx_mae_match_spec_t *match_spec,
192 efx_tunnel_protocol_t encap_type,
193 struct sfc_mae_outer_rule **rulep)
195 struct sfc_mae_outer_rule *rule;
196 struct sfc_mae *mae = &sa->mae;
198 SFC_ASSERT(sfc_adapter_is_locked(sa));
200 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
205 rule->match_spec = match_spec;
206 rule->encap_type = encap_type;
208 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
210 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
214 sfc_dbg(sa, "added outer_rule=%p", rule);
220 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
221 struct sfc_mae_outer_rule *rule)
223 struct sfc_mae *mae = &sa->mae;
225 SFC_ASSERT(sfc_adapter_is_locked(sa));
226 SFC_ASSERT(rule->refcnt != 0);
230 if (rule->refcnt != 0)
233 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
234 rule->fw_rsrc.refcnt != 0) {
235 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
236 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
239 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
241 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
244 sfc_dbg(sa, "deleted outer_rule=%p", rule);
248 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
249 struct sfc_mae_outer_rule *rule,
250 efx_mae_match_spec_t *match_spec_action)
252 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
255 SFC_ASSERT(sfc_adapter_is_locked(sa));
257 if (fw_rsrc->refcnt == 0) {
258 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
259 SFC_ASSERT(rule->match_spec != NULL);
261 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
265 sfc_err(sa, "failed to enable outer_rule=%p: %s",
271 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
274 if (fw_rsrc->refcnt == 0) {
275 (void)efx_mae_outer_rule_remove(sa->nic,
277 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
280 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
285 if (fw_rsrc->refcnt == 0) {
286 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
287 rule, fw_rsrc->rule_id.id);
296 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
297 struct sfc_mae_outer_rule *rule)
299 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
302 SFC_ASSERT(sfc_adapter_is_locked(sa));
304 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
305 fw_rsrc->refcnt == 0) {
306 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
307 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
311 if (fw_rsrc->refcnt == 1) {
312 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
314 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
315 rule, fw_rsrc->rule_id.id);
317 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
318 rule, fw_rsrc->rule_id.id, strerror(rc));
320 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
326 static struct sfc_mae_encap_header *
327 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
328 const struct sfc_mae_bounce_eh *bounce_eh)
330 struct sfc_mae_encap_header *encap_header;
331 struct sfc_mae *mae = &sa->mae;
333 SFC_ASSERT(sfc_adapter_is_locked(sa));
335 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
336 if (encap_header->size == bounce_eh->size &&
337 memcmp(encap_header->buf, bounce_eh->buf,
338 bounce_eh->size) == 0) {
339 sfc_dbg(sa, "attaching to encap_header=%p",
341 ++(encap_header->refcnt);
350 sfc_mae_encap_header_add(struct sfc_adapter *sa,
351 const struct sfc_mae_bounce_eh *bounce_eh,
352 struct sfc_mae_encap_header **encap_headerp)
354 struct sfc_mae_encap_header *encap_header;
355 struct sfc_mae *mae = &sa->mae;
357 SFC_ASSERT(sfc_adapter_is_locked(sa));
359 encap_header = rte_zmalloc("sfc_mae_encap_header",
360 sizeof(*encap_header), 0);
361 if (encap_header == NULL)
364 encap_header->size = bounce_eh->size;
366 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
367 encap_header->size, 0);
368 if (encap_header->buf == NULL) {
369 rte_free(encap_header);
373 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
375 encap_header->refcnt = 1;
376 encap_header->type = bounce_eh->type;
377 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
379 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
381 *encap_headerp = encap_header;
383 sfc_dbg(sa, "added encap_header=%p", encap_header);
389 sfc_mae_encap_header_del(struct sfc_adapter *sa,
390 struct sfc_mae_encap_header *encap_header)
392 struct sfc_mae *mae = &sa->mae;
394 if (encap_header == NULL)
397 SFC_ASSERT(sfc_adapter_is_locked(sa));
398 SFC_ASSERT(encap_header->refcnt != 0);
400 --(encap_header->refcnt);
402 if (encap_header->refcnt != 0)
405 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
406 encap_header->fw_rsrc.refcnt != 0) {
407 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
408 encap_header, encap_header->fw_rsrc.eh_id.id,
409 encap_header->fw_rsrc.refcnt);
412 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
413 rte_free(encap_header->buf);
414 rte_free(encap_header);
416 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
420 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
421 struct sfc_mae_encap_header *encap_header,
422 efx_mae_actions_t *action_set_spec)
424 struct sfc_mae_fw_rsrc *fw_rsrc;
427 if (encap_header == NULL)
430 SFC_ASSERT(sfc_adapter_is_locked(sa));
432 fw_rsrc = &encap_header->fw_rsrc;
434 if (fw_rsrc->refcnt == 0) {
435 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
436 SFC_ASSERT(encap_header->buf != NULL);
437 SFC_ASSERT(encap_header->size != 0);
439 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
444 sfc_err(sa, "failed to enable encap_header=%p: %s",
445 encap_header, strerror(rc));
450 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
453 if (fw_rsrc->refcnt == 0) {
454 (void)efx_mae_encap_header_free(sa->nic,
456 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
459 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
464 if (fw_rsrc->refcnt == 0) {
465 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
466 encap_header, fw_rsrc->eh_id.id);
475 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
476 struct sfc_mae_encap_header *encap_header)
478 struct sfc_mae_fw_rsrc *fw_rsrc;
481 if (encap_header == NULL)
484 SFC_ASSERT(sfc_adapter_is_locked(sa));
486 fw_rsrc = &encap_header->fw_rsrc;
488 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
489 fw_rsrc->refcnt == 0) {
490 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
491 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
495 if (fw_rsrc->refcnt == 1) {
496 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
498 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
499 encap_header, fw_rsrc->eh_id.id);
501 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
502 encap_header, fw_rsrc->eh_id.id, strerror(rc));
504 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
511 sfc_mae_counters_enable(struct sfc_adapter *sa,
512 struct sfc_mae_counter_id *counters,
513 unsigned int n_counters,
514 efx_mae_actions_t *action_set_spec)
518 sfc_log_init(sa, "entry");
520 if (n_counters == 0) {
521 sfc_log_init(sa, "no counters - skip");
525 SFC_ASSERT(sfc_adapter_is_locked(sa));
526 SFC_ASSERT(n_counters == 1);
528 rc = sfc_mae_counter_enable(sa, &counters[0]);
530 sfc_err(sa, "failed to enable MAE counter %u: %s",
531 counters[0].mae_id.id, rte_strerror(rc));
532 goto fail_counter_add;
535 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
536 &counters[0].mae_id);
538 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
539 counters[0].mae_id.id, rte_strerror(rc));
540 goto fail_fill_in_id;
546 (void)sfc_mae_counter_disable(sa, &counters[0]);
549 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
554 sfc_mae_counters_disable(struct sfc_adapter *sa,
555 struct sfc_mae_counter_id *counters,
556 unsigned int n_counters)
561 SFC_ASSERT(sfc_adapter_is_locked(sa));
562 SFC_ASSERT(n_counters == 1);
564 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
565 sfc_err(sa, "failed to disable: already disabled");
569 return sfc_mae_counter_disable(sa, &counters[0]);
572 static struct sfc_mae_action_set *
573 sfc_mae_action_set_attach(struct sfc_adapter *sa,
574 const struct sfc_mae_encap_header *encap_header,
575 unsigned int n_count,
576 const efx_mae_actions_t *spec)
578 struct sfc_mae_action_set *action_set;
579 struct sfc_mae *mae = &sa->mae;
581 SFC_ASSERT(sfc_adapter_is_locked(sa));
583 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
585 * Shared counters are not supported, hence action sets with
586 * COUNT are not attachable.
588 if (action_set->encap_header == encap_header &&
590 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
591 sfc_dbg(sa, "attaching to action_set=%p", action_set);
592 ++(action_set->refcnt);
601 sfc_mae_action_set_add(struct sfc_adapter *sa,
602 const struct rte_flow_action actions[],
603 efx_mae_actions_t *spec,
604 struct sfc_mae_encap_header *encap_header,
605 unsigned int n_counters,
606 struct sfc_mae_action_set **action_setp)
608 struct sfc_mae_action_set *action_set;
609 struct sfc_mae *mae = &sa->mae;
612 SFC_ASSERT(sfc_adapter_is_locked(sa));
614 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
615 if (action_set == NULL) {
616 sfc_err(sa, "failed to alloc action set");
620 if (n_counters > 0) {
621 const struct rte_flow_action *action;
623 action_set->counters = rte_malloc("sfc_mae_counter_ids",
624 sizeof(action_set->counters[0]) * n_counters, 0);
625 if (action_set->counters == NULL) {
626 rte_free(action_set);
627 sfc_err(sa, "failed to alloc counters");
631 for (action = actions, i = 0;
632 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
634 const struct rte_flow_action_count *conf;
636 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
641 action_set->counters[i].mae_id.id =
642 EFX_MAE_RSRC_ID_INVALID;
643 action_set->counters[i].rte_id = conf->id;
646 action_set->n_counters = n_counters;
649 action_set->refcnt = 1;
650 action_set->spec = spec;
651 action_set->encap_header = encap_header;
653 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
655 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
657 *action_setp = action_set;
659 sfc_dbg(sa, "added action_set=%p", action_set);
665 sfc_mae_action_set_del(struct sfc_adapter *sa,
666 struct sfc_mae_action_set *action_set)
668 struct sfc_mae *mae = &sa->mae;
670 SFC_ASSERT(sfc_adapter_is_locked(sa));
671 SFC_ASSERT(action_set->refcnt != 0);
673 --(action_set->refcnt);
675 if (action_set->refcnt != 0)
678 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
679 action_set->fw_rsrc.refcnt != 0) {
680 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
681 action_set, action_set->fw_rsrc.aset_id.id,
682 action_set->fw_rsrc.refcnt);
685 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
686 sfc_mae_encap_header_del(sa, action_set->encap_header);
687 if (action_set->n_counters > 0) {
688 SFC_ASSERT(action_set->n_counters == 1);
689 SFC_ASSERT(action_set->counters[0].mae_id.id ==
690 EFX_MAE_RSRC_ID_INVALID);
691 rte_free(action_set->counters);
693 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
694 rte_free(action_set);
696 sfc_dbg(sa, "deleted action_set=%p", action_set);
700 sfc_mae_action_set_enable(struct sfc_adapter *sa,
701 struct sfc_mae_action_set *action_set)
703 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
704 struct sfc_mae_counter_id *counters = action_set->counters;
705 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
708 SFC_ASSERT(sfc_adapter_is_locked(sa));
710 if (fw_rsrc->refcnt == 0) {
711 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
712 SFC_ASSERT(action_set->spec != NULL);
714 rc = sfc_mae_encap_header_enable(sa, encap_header,
719 rc = sfc_mae_counters_enable(sa, counters,
720 action_set->n_counters,
723 sfc_err(sa, "failed to enable %u MAE counters: %s",
724 action_set->n_counters, rte_strerror(rc));
726 sfc_mae_encap_header_disable(sa, encap_header);
730 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
733 sfc_err(sa, "failed to enable action_set=%p: %s",
734 action_set, strerror(rc));
736 (void)sfc_mae_counters_disable(sa, counters,
737 action_set->n_counters);
738 sfc_mae_encap_header_disable(sa, encap_header);
742 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
743 action_set, fw_rsrc->aset_id.id);
752 sfc_mae_action_set_disable(struct sfc_adapter *sa,
753 struct sfc_mae_action_set *action_set)
755 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
758 SFC_ASSERT(sfc_adapter_is_locked(sa));
760 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
761 fw_rsrc->refcnt == 0) {
762 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
763 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
767 if (fw_rsrc->refcnt == 1) {
768 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
770 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
771 action_set, fw_rsrc->aset_id.id);
773 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
774 action_set, fw_rsrc->aset_id.id, strerror(rc));
776 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
778 rc = sfc_mae_counters_disable(sa, action_set->counters,
779 action_set->n_counters);
781 sfc_err(sa, "failed to disable %u MAE counters: %s",
782 action_set->n_counters, rte_strerror(rc));
785 sfc_mae_encap_header_disable(sa, action_set->encap_header);
792 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
793 struct rte_flow *flow)
795 struct sfc_flow_spec *spec;
796 struct sfc_flow_spec_mae *spec_mae;
806 spec_mae = &spec->mae;
808 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
810 if (spec_mae->outer_rule != NULL)
811 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
813 if (spec_mae->action_set != NULL)
814 sfc_mae_action_set_del(sa, spec_mae->action_set);
816 if (spec_mae->match_spec != NULL)
817 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
821 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
823 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
824 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
825 const efx_mae_field_id_t field_ids[] = {
826 EFX_MAE_FIELD_VLAN0_PROTO_BE,
827 EFX_MAE_FIELD_VLAN1_PROTO_BE,
829 const struct sfc_mae_ethertype *et;
834 * In accordance with RTE flow API convention, the innermost L2
835 * item's "type" ("inner_type") is a L3 EtherType. If there is
836 * no L3 item, it's 0x0000/0x0000.
838 et = &pdata->ethertypes[pdata->nb_vlan_tags];
839 rc = efx_mae_match_spec_field_set(ctx->match_spec,
840 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
842 (const uint8_t *)&et->value,
844 (const uint8_t *)&et->mask);
849 * sfc_mae_rule_parse_item_vlan() has already made sure
850 * that pdata->nb_vlan_tags does not exceed this figure.
852 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
854 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
855 et = &pdata->ethertypes[i];
857 rc = efx_mae_match_spec_field_set(ctx->match_spec,
858 fremap[field_ids[i]],
860 (const uint8_t *)&et->value,
862 (const uint8_t *)&et->mask);
871 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
872 struct rte_flow_error *error)
874 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
875 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
876 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
877 const rte_be16_t supported_tpids[] = {
878 /* VLAN standard TPID (always the first element) */
879 RTE_BE16(RTE_ETHER_TYPE_VLAN),
881 /* Double-tagging TPIDs */
882 RTE_BE16(RTE_ETHER_TYPE_QINQ),
883 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
884 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
885 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
887 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
888 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
889 unsigned int ethertype_idx;
890 const uint8_t *valuep;
891 const uint8_t *maskp;
894 if (pdata->innermost_ethertype_restriction.mask != 0 &&
895 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
897 * If a single item VLAN is followed by a L3 item, value
898 * of "type" in item ETH can't be a double-tagging TPID.
900 nb_supported_tpids = 1;
904 * sfc_mae_rule_parse_item_vlan() has already made sure
905 * that pdata->nb_vlan_tags does not exceed this figure.
907 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
909 for (ethertype_idx = 0;
910 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
911 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
912 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
913 unsigned int tpid_idx;
916 * This loop can have only two iterations. On the second one,
917 * drop outer tag presence enforcement bit because the inner
918 * tag presence automatically assumes that for the outer tag.
920 enforce_tag_presence[0] = B_FALSE;
922 if (tpid_m == RTE_BE16(0)) {
923 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
924 enforce_tag_presence[ethertype_idx] = B_TRUE;
926 /* No match on this field, and no value check. */
927 nb_supported_tpids = 1;
931 /* Exact match is supported only. */
932 if (tpid_m != RTE_BE16(0xffff)) {
933 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
934 rte_be_to_cpu_16(tpid_m));
939 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
940 tpid_idx < nb_supported_tpids; ++tpid_idx) {
941 if (tpid_v == supported_tpids[tpid_idx])
945 if (tpid_idx == nb_supported_tpids) {
946 sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
947 rte_be_to_cpu_16(tpid_v));
952 nb_supported_tpids = 1;
955 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
956 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
957 rte_be16_t enforced_et;
959 enforced_et = pdata->innermost_ethertype_restriction.value;
962 et->mask = RTE_BE16(0xffff);
963 et->value = enforced_et;
964 } else if (et->mask != RTE_BE16(0xffff) ||
965 et->value != enforced_et) {
966 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
967 rte_be_to_cpu_16(enforced_et),
968 rte_be_to_cpu_16(et->value),
969 rte_be_to_cpu_16(et->mask));
976 * Now, when the number of VLAN tags is known, set fields
977 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
978 * one is either a valid L3 EtherType (or 0x0000/0x0000),
979 * and the last two are valid TPIDs (or 0x0000/0x0000).
981 rc = sfc_mae_set_ethertypes(ctx);
985 if (pdata->l3_next_proto_restriction_mask == 0xff) {
986 if (pdata->l3_next_proto_mask == 0) {
987 pdata->l3_next_proto_mask = 0xff;
988 pdata->l3_next_proto_value =
989 pdata->l3_next_proto_restriction_value;
990 } else if (pdata->l3_next_proto_mask != 0xff ||
991 pdata->l3_next_proto_value !=
992 pdata->l3_next_proto_restriction_value) {
993 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
994 pdata->l3_next_proto_restriction_value,
995 pdata->l3_next_proto_value,
996 pdata->l3_next_proto_mask);
1002 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1003 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1004 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1005 enforce_tag_presence[0] ||
1006 pdata->has_ovlan_value);
1011 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1012 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1013 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1014 enforce_tag_presence[1] ||
1015 pdata->has_ivlan_value);
1020 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1021 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1022 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1023 fremap[EFX_MAE_FIELD_IP_PROTO],
1024 sizeof(pdata->l3_next_proto_value),
1026 sizeof(pdata->l3_next_proto_mask),
1034 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1035 "Failed to process pattern data");
1039 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1040 struct sfc_flow_parse_ctx *ctx,
1041 struct rte_flow_error *error)
1043 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1044 const struct rte_flow_item_port_id supp_mask = {
1047 const void *def_mask = &rte_flow_item_port_id_mask;
1048 const struct rte_flow_item_port_id *spec = NULL;
1049 const struct rte_flow_item_port_id *mask = NULL;
1050 efx_mport_sel_t mport_sel;
1053 if (ctx_mae->match_mport_set) {
1054 return rte_flow_error_set(error, ENOTSUP,
1055 RTE_FLOW_ERROR_TYPE_ITEM, item,
1056 "Can't handle multiple traffic source items");
1059 rc = sfc_flow_parse_init(item,
1060 (const void **)&spec, (const void **)&mask,
1061 (const void *)&supp_mask, def_mask,
1062 sizeof(struct rte_flow_item_port_id), error);
1066 if (mask->id != supp_mask.id) {
1067 return rte_flow_error_set(error, EINVAL,
1068 RTE_FLOW_ERROR_TYPE_ITEM, item,
1069 "Bad mask in the PORT_ID pattern item");
1072 /* If "spec" is not set, could be any port ID */
1076 if (spec->id > UINT16_MAX) {
1077 return rte_flow_error_set(error, EOVERFLOW,
1078 RTE_FLOW_ERROR_TYPE_ITEM, item,
1079 "The port ID is too large");
1082 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1083 spec->id, &mport_sel);
1085 return rte_flow_error_set(error, rc,
1086 RTE_FLOW_ERROR_TYPE_ITEM, item,
1087 "Can't find RTE ethdev by the port ID");
1090 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1093 return rte_flow_error_set(error, rc,
1094 RTE_FLOW_ERROR_TYPE_ITEM, item,
1095 "Failed to set MPORT for the port ID");
1098 ctx_mae->match_mport_set = B_TRUE;
1104 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1105 struct sfc_flow_parse_ctx *ctx,
1106 struct rte_flow_error *error)
1108 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1109 const struct rte_flow_item_phy_port supp_mask = {
1110 .index = 0xffffffff,
1112 const void *def_mask = &rte_flow_item_phy_port_mask;
1113 const struct rte_flow_item_phy_port *spec = NULL;
1114 const struct rte_flow_item_phy_port *mask = NULL;
1115 efx_mport_sel_t mport_v;
1118 if (ctx_mae->match_mport_set) {
1119 return rte_flow_error_set(error, ENOTSUP,
1120 RTE_FLOW_ERROR_TYPE_ITEM, item,
1121 "Can't handle multiple traffic source items");
1124 rc = sfc_flow_parse_init(item,
1125 (const void **)&spec, (const void **)&mask,
1126 (const void *)&supp_mask, def_mask,
1127 sizeof(struct rte_flow_item_phy_port), error);
1131 if (mask->index != supp_mask.index) {
1132 return rte_flow_error_set(error, EINVAL,
1133 RTE_FLOW_ERROR_TYPE_ITEM, item,
1134 "Bad mask in the PHY_PORT pattern item");
1137 /* If "spec" is not set, could be any physical port */
1141 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1143 return rte_flow_error_set(error, rc,
1144 RTE_FLOW_ERROR_TYPE_ITEM, item,
1145 "Failed to convert the PHY_PORT index");
1148 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1150 return rte_flow_error_set(error, rc,
1151 RTE_FLOW_ERROR_TYPE_ITEM, item,
1152 "Failed to set MPORT for the PHY_PORT");
1155 ctx_mae->match_mport_set = B_TRUE;
1161 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1162 struct sfc_flow_parse_ctx *ctx,
1163 struct rte_flow_error *error)
1165 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1166 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1167 efx_mport_sel_t mport_v;
1170 if (ctx_mae->match_mport_set) {
1171 return rte_flow_error_set(error, ENOTSUP,
1172 RTE_FLOW_ERROR_TYPE_ITEM, item,
1173 "Can't handle multiple traffic source items");
1176 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1179 return rte_flow_error_set(error, rc,
1180 RTE_FLOW_ERROR_TYPE_ITEM, item,
1181 "Failed to convert the PF ID");
1184 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1186 return rte_flow_error_set(error, rc,
1187 RTE_FLOW_ERROR_TYPE_ITEM, item,
1188 "Failed to set MPORT for the PF");
1191 ctx_mae->match_mport_set = B_TRUE;
1197 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1198 struct sfc_flow_parse_ctx *ctx,
1199 struct rte_flow_error *error)
1201 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1202 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1203 const struct rte_flow_item_vf supp_mask = {
1206 const void *def_mask = &rte_flow_item_vf_mask;
1207 const struct rte_flow_item_vf *spec = NULL;
1208 const struct rte_flow_item_vf *mask = NULL;
1209 efx_mport_sel_t mport_v;
1212 if (ctx_mae->match_mport_set) {
1213 return rte_flow_error_set(error, ENOTSUP,
1214 RTE_FLOW_ERROR_TYPE_ITEM, item,
1215 "Can't handle multiple traffic source items");
1218 rc = sfc_flow_parse_init(item,
1219 (const void **)&spec, (const void **)&mask,
1220 (const void *)&supp_mask, def_mask,
1221 sizeof(struct rte_flow_item_vf), error);
1225 if (mask->id != supp_mask.id) {
1226 return rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ITEM, item,
1228 "Bad mask in the VF pattern item");
1232 * If "spec" is not set, the item requests any VF related to the
1233 * PF of the current DPDK port (but not the PF itself).
1234 * Reject this match criterion as unsupported.
1237 return rte_flow_error_set(error, EINVAL,
1238 RTE_FLOW_ERROR_TYPE_ITEM, item,
1239 "Bad spec in the VF pattern item");
1242 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1244 return rte_flow_error_set(error, rc,
1245 RTE_FLOW_ERROR_TYPE_ITEM, item,
1246 "Failed to convert the PF + VF IDs");
1249 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1251 return rte_flow_error_set(error, rc,
1252 RTE_FLOW_ERROR_TYPE_ITEM, item,
1253 "Failed to set MPORT for the PF + VF");
1256 ctx_mae->match_mport_set = B_TRUE;
1262 * Having this field ID in a field locator means that this
1263 * locator cannot be used to actually set the field at the
1264 * time when the corresponding item gets encountered. Such
1265 * fields get stashed in the parsing context instead. This
1266 * is required to resolve dependencies between the stashed
1267 * fields. See sfc_mae_rule_process_pattern_data().
1269 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1271 struct sfc_mae_field_locator {
1272 efx_mae_field_id_t field_id;
1274 /* Field offset in the corresponding rte_flow_item_ struct */
1279 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1280 unsigned int nb_field_locators, void *mask_ptr,
1285 memset(mask_ptr, 0, mask_size);
1287 for (i = 0; i < nb_field_locators; ++i) {
1288 const struct sfc_mae_field_locator *fl = &field_locators[i];
1290 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1291 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1296 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1297 unsigned int nb_field_locators, const uint8_t *spec,
1298 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1299 struct rte_flow_error *error)
1301 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1305 for (i = 0; i < nb_field_locators; ++i) {
1306 const struct sfc_mae_field_locator *fl = &field_locators[i];
1308 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1311 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1312 fremap[fl->field_id],
1313 fl->size, spec + fl->ofst,
1314 fl->size, mask + fl->ofst);
1320 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1321 NULL, "Failed to process item fields");
1327 static const struct sfc_mae_field_locator flocs_eth[] = {
1330 * This locator is used only for building supported fields mask.
1331 * The field is handled by sfc_mae_rule_process_pattern_data().
1333 SFC_MAE_FIELD_HANDLING_DEFERRED,
1334 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1335 offsetof(struct rte_flow_item_eth, type),
1338 EFX_MAE_FIELD_ETH_DADDR_BE,
1339 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1340 offsetof(struct rte_flow_item_eth, dst),
1343 EFX_MAE_FIELD_ETH_SADDR_BE,
1344 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1345 offsetof(struct rte_flow_item_eth, src),
1350 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1351 struct sfc_flow_parse_ctx *ctx,
1352 struct rte_flow_error *error)
1354 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1355 struct rte_flow_item_eth supp_mask;
1356 const uint8_t *spec = NULL;
1357 const uint8_t *mask = NULL;
1360 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1361 &supp_mask, sizeof(supp_mask));
1362 supp_mask.has_vlan = 1;
1364 rc = sfc_flow_parse_init(item,
1365 (const void **)&spec, (const void **)&mask,
1366 (const void *)&supp_mask,
1367 &rte_flow_item_eth_mask,
1368 sizeof(struct rte_flow_item_eth), error);
1373 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1374 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1375 const struct rte_flow_item_eth *item_spec;
1376 const struct rte_flow_item_eth *item_mask;
1378 item_spec = (const struct rte_flow_item_eth *)spec;
1379 item_mask = (const struct rte_flow_item_eth *)mask;
1382 * Remember various match criteria in the parsing context.
1383 * sfc_mae_rule_process_pattern_data() will consider them
1384 * altogether when the rest of the items have been parsed.
1386 ethertypes[0].value = item_spec->type;
1387 ethertypes[0].mask = item_mask->type;
1388 if (item_mask->has_vlan) {
1389 pdata->has_ovlan_mask = B_TRUE;
1390 if (item_spec->has_vlan)
1391 pdata->has_ovlan_value = B_TRUE;
1395 * The specification is empty. The overall pattern
1396 * validity will be enforced at the end of parsing.
1397 * See sfc_mae_rule_process_pattern_data().
1402 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1406 static const struct sfc_mae_field_locator flocs_vlan[] = {
1409 EFX_MAE_FIELD_VLAN0_TCI_BE,
1410 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1411 offsetof(struct rte_flow_item_vlan, tci),
1415 * This locator is used only for building supported fields mask.
1416 * The field is handled by sfc_mae_rule_process_pattern_data().
1418 SFC_MAE_FIELD_HANDLING_DEFERRED,
1419 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1420 offsetof(struct rte_flow_item_vlan, inner_type),
1425 EFX_MAE_FIELD_VLAN1_TCI_BE,
1426 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1427 offsetof(struct rte_flow_item_vlan, tci),
1431 * This locator is used only for building supported fields mask.
1432 * The field is handled by sfc_mae_rule_process_pattern_data().
1434 SFC_MAE_FIELD_HANDLING_DEFERRED,
1435 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1436 offsetof(struct rte_flow_item_vlan, inner_type),
1441 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1442 struct sfc_flow_parse_ctx *ctx,
1443 struct rte_flow_error *error)
1445 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1446 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1447 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1448 &pdata->has_ovlan_mask,
1449 &pdata->has_ivlan_mask,
1451 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1452 &pdata->has_ovlan_value,
1453 &pdata->has_ivlan_value,
1455 boolean_t *cur_tag_presence_bit_mp;
1456 boolean_t *cur_tag_presence_bit_vp;
1457 const struct sfc_mae_field_locator *flocs;
1458 struct rte_flow_item_vlan supp_mask;
1459 const uint8_t *spec = NULL;
1460 const uint8_t *mask = NULL;
1461 unsigned int nb_flocs;
1464 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1466 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1467 return rte_flow_error_set(error, ENOTSUP,
1468 RTE_FLOW_ERROR_TYPE_ITEM, item,
1469 "Can't match that many VLAN tags");
1472 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1473 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1475 if (*cur_tag_presence_bit_mp == B_TRUE &&
1476 *cur_tag_presence_bit_vp == B_FALSE) {
1477 return rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_ITEM, item,
1479 "The previous item enforces no (more) VLAN, "
1480 "so the current item (VLAN) must not exist");
1483 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1484 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1486 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1487 &supp_mask, sizeof(supp_mask));
1489 * This only means that the field is supported by the driver and libefx.
1490 * Support on NIC level will be checked when all items have been parsed.
1492 supp_mask.has_more_vlan = 1;
1494 rc = sfc_flow_parse_init(item,
1495 (const void **)&spec, (const void **)&mask,
1496 (const void *)&supp_mask,
1497 &rte_flow_item_vlan_mask,
1498 sizeof(struct rte_flow_item_vlan), error);
1503 struct sfc_mae_ethertype *et = pdata->ethertypes;
1504 const struct rte_flow_item_vlan *item_spec;
1505 const struct rte_flow_item_vlan *item_mask;
1507 item_spec = (const struct rte_flow_item_vlan *)spec;
1508 item_mask = (const struct rte_flow_item_vlan *)mask;
1511 * Remember various match criteria in the parsing context.
1512 * sfc_mae_rule_process_pattern_data() will consider them
1513 * altogether when the rest of the items have been parsed.
1515 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1516 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1517 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1518 if (item_mask->has_more_vlan) {
1519 if (pdata->nb_vlan_tags ==
1520 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1521 return rte_flow_error_set(error, ENOTSUP,
1522 RTE_FLOW_ERROR_TYPE_ITEM, item,
1523 "Can't use 'has_more_vlan' in "
1524 "the second item VLAN");
1526 pdata->has_ivlan_mask = B_TRUE;
1527 if (item_spec->has_more_vlan)
1528 pdata->has_ivlan_value = B_TRUE;
1531 /* Convert TCI to MAE representation right now. */
1532 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1538 ++(pdata->nb_vlan_tags);
1543 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1545 EFX_MAE_FIELD_SRC_IP4_BE,
1546 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1547 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1550 EFX_MAE_FIELD_DST_IP4_BE,
1551 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1552 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1556 * This locator is used only for building supported fields mask.
1557 * The field is handled by sfc_mae_rule_process_pattern_data().
1559 SFC_MAE_FIELD_HANDLING_DEFERRED,
1560 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1561 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1564 EFX_MAE_FIELD_IP_TOS,
1565 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1566 hdr.type_of_service),
1567 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1570 EFX_MAE_FIELD_IP_TTL,
1571 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1572 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1577 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1578 struct sfc_flow_parse_ctx *ctx,
1579 struct rte_flow_error *error)
1581 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1582 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1583 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1584 struct rte_flow_item_ipv4 supp_mask;
1585 const uint8_t *spec = NULL;
1586 const uint8_t *mask = NULL;
1589 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1590 &supp_mask, sizeof(supp_mask));
1592 rc = sfc_flow_parse_init(item,
1593 (const void **)&spec, (const void **)&mask,
1594 (const void *)&supp_mask,
1595 &rte_flow_item_ipv4_mask,
1596 sizeof(struct rte_flow_item_ipv4), error);
1600 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1601 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1604 const struct rte_flow_item_ipv4 *item_spec;
1605 const struct rte_flow_item_ipv4 *item_mask;
1607 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1608 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1610 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1611 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1616 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1620 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1622 EFX_MAE_FIELD_SRC_IP6_BE,
1623 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1624 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1627 EFX_MAE_FIELD_DST_IP6_BE,
1628 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1629 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1633 * This locator is used only for building supported fields mask.
1634 * The field is handled by sfc_mae_rule_process_pattern_data().
1636 SFC_MAE_FIELD_HANDLING_DEFERRED,
1637 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1638 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1641 EFX_MAE_FIELD_IP_TTL,
1642 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1643 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1648 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1649 struct sfc_flow_parse_ctx *ctx,
1650 struct rte_flow_error *error)
1652 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1653 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1654 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1655 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1656 struct rte_flow_item_ipv6 supp_mask;
1657 const uint8_t *spec = NULL;
1658 const uint8_t *mask = NULL;
1659 rte_be32_t vtc_flow_be;
1665 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1666 &supp_mask, sizeof(supp_mask));
1668 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1669 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1671 rc = sfc_flow_parse_init(item,
1672 (const void **)&spec, (const void **)&mask,
1673 (const void *)&supp_mask,
1674 &rte_flow_item_ipv6_mask,
1675 sizeof(struct rte_flow_item_ipv6), error);
1679 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1680 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1683 const struct rte_flow_item_ipv6 *item_spec;
1684 const struct rte_flow_item_ipv6 *item_mask;
1686 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1687 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1689 pdata->l3_next_proto_value = item_spec->hdr.proto;
1690 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1695 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1700 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1701 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1702 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1704 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1705 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1706 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1708 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1709 fremap[EFX_MAE_FIELD_IP_TOS],
1710 sizeof(tc_value), &tc_value,
1711 sizeof(tc_mask), &tc_mask);
1713 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1714 NULL, "Failed to process item fields");
1720 static const struct sfc_mae_field_locator flocs_tcp[] = {
1722 EFX_MAE_FIELD_L4_SPORT_BE,
1723 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1724 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1727 EFX_MAE_FIELD_L4_DPORT_BE,
1728 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1729 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1732 EFX_MAE_FIELD_TCP_FLAGS_BE,
1734 * The values have been picked intentionally since the
1735 * target MAE field is oversize (16 bit). This mapping
1736 * relies on the fact that the MAE field is big-endian.
1738 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1739 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1740 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1745 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1746 struct sfc_flow_parse_ctx *ctx,
1747 struct rte_flow_error *error)
1749 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1750 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1751 struct rte_flow_item_tcp supp_mask;
1752 const uint8_t *spec = NULL;
1753 const uint8_t *mask = NULL;
1757 * When encountered among outermost items, item TCP is invalid.
1758 * Check which match specification is being constructed now.
1760 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1761 return rte_flow_error_set(error, EINVAL,
1762 RTE_FLOW_ERROR_TYPE_ITEM, item,
1763 "TCP in outer frame is invalid");
1766 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1767 &supp_mask, sizeof(supp_mask));
1769 rc = sfc_flow_parse_init(item,
1770 (const void **)&spec, (const void **)&mask,
1771 (const void *)&supp_mask,
1772 &rte_flow_item_tcp_mask,
1773 sizeof(struct rte_flow_item_tcp), error);
1777 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1778 pdata->l3_next_proto_restriction_mask = 0xff;
1783 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1787 static const struct sfc_mae_field_locator flocs_udp[] = {
1789 EFX_MAE_FIELD_L4_SPORT_BE,
1790 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1791 offsetof(struct rte_flow_item_udp, hdr.src_port),
1794 EFX_MAE_FIELD_L4_DPORT_BE,
1795 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1796 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1801 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1802 struct sfc_flow_parse_ctx *ctx,
1803 struct rte_flow_error *error)
1805 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1806 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1807 struct rte_flow_item_udp supp_mask;
1808 const uint8_t *spec = NULL;
1809 const uint8_t *mask = NULL;
1812 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1813 &supp_mask, sizeof(supp_mask));
1815 rc = sfc_flow_parse_init(item,
1816 (const void **)&spec, (const void **)&mask,
1817 (const void *)&supp_mask,
1818 &rte_flow_item_udp_mask,
1819 sizeof(struct rte_flow_item_udp), error);
1823 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1824 pdata->l3_next_proto_restriction_mask = 0xff;
1829 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1833 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1836 * The size and offset values are relevant
1837 * for Geneve and NVGRE, too.
1839 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1840 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1845 * An auxiliary registry which allows using non-encap. field IDs
1846 * directly when building a match specification of type ACTION.
1848 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1850 static const efx_mae_field_id_t field_ids_no_remap[] = {
1851 #define FIELD_ID_NO_REMAP(_field) \
1852 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1854 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1855 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1856 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1857 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1858 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1859 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1860 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1861 FIELD_ID_NO_REMAP(SRC_IP4_BE),
1862 FIELD_ID_NO_REMAP(DST_IP4_BE),
1863 FIELD_ID_NO_REMAP(IP_PROTO),
1864 FIELD_ID_NO_REMAP(IP_TOS),
1865 FIELD_ID_NO_REMAP(IP_TTL),
1866 FIELD_ID_NO_REMAP(SRC_IP6_BE),
1867 FIELD_ID_NO_REMAP(DST_IP6_BE),
1868 FIELD_ID_NO_REMAP(L4_SPORT_BE),
1869 FIELD_ID_NO_REMAP(L4_DPORT_BE),
1870 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1871 FIELD_ID_NO_REMAP(HAS_OVLAN),
1872 FIELD_ID_NO_REMAP(HAS_IVLAN),
1874 #undef FIELD_ID_NO_REMAP
1878 * An auxiliary registry which allows using "ENC" field IDs
1879 * when building a match specification of type OUTER.
1881 * See sfc_mae_rule_encap_parse_init().
1883 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1884 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1885 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1887 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1888 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1889 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1890 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1891 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1892 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1893 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1894 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1895 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1896 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1897 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1898 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1899 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1900 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1901 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1902 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1903 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
1904 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
1906 #undef FIELD_ID_REMAP_TO_ENCAP
1910 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1911 struct sfc_flow_parse_ctx *ctx,
1912 struct rte_flow_error *error)
1914 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1915 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1916 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1917 const struct rte_flow_item_vxlan *vxp;
1918 uint8_t supp_mask[sizeof(uint64_t)];
1919 const uint8_t *spec = NULL;
1920 const uint8_t *mask = NULL;
1924 * We're about to start processing inner frame items.
1925 * Process pattern data that has been deferred so far
1926 * and reset pattern data storage.
1928 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1932 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1934 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1935 &supp_mask, sizeof(supp_mask));
1938 * This tunnel item was preliminarily detected by
1939 * sfc_mae_rule_encap_parse_init(). Default mask
1940 * was also picked by that helper. Use it here.
1942 rc = sfc_flow_parse_init(item,
1943 (const void **)&spec, (const void **)&mask,
1944 (const void *)&supp_mask,
1945 ctx_mae->tunnel_def_mask,
1946 ctx_mae->tunnel_def_mask_size, error);
1951 * This item and later ones comprise a
1952 * match specification of type ACTION.
1954 ctx_mae->match_spec = ctx_mae->match_spec_action;
1956 /* This item and later ones use non-encap. EFX MAE field IDs. */
1957 ctx_mae->field_ids_remap = field_ids_no_remap;
1963 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1964 * Copy 24-bit VNI, which is BE, at offset 1 in it.
1965 * The extra byte is 0 both in the mask and in the value.
1967 vxp = (const struct rte_flow_item_vxlan *)spec;
1968 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1970 vxp = (const struct rte_flow_item_vxlan *)mask;
1971 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1973 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1974 EFX_MAE_FIELD_ENC_VNET_ID_BE,
1975 sizeof(vnet_id_v), vnet_id_v,
1976 sizeof(vnet_id_m), vnet_id_m);
1978 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1979 item, "Failed to set VXLAN VNI");
1985 static const struct sfc_flow_item sfc_flow_items[] = {
1987 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1990 * In terms of RTE flow, this item is a META one,
1991 * and its position in the pattern is don't care.
1993 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1994 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1995 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1996 .parse = sfc_mae_rule_parse_item_port_id,
1999 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2002 * In terms of RTE flow, this item is a META one,
2003 * and its position in the pattern is don't care.
2005 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2006 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2007 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2008 .parse = sfc_mae_rule_parse_item_phy_port,
2011 .type = RTE_FLOW_ITEM_TYPE_PF,
2014 * In terms of RTE flow, this item is a META one,
2015 * and its position in the pattern is don't care.
2017 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2018 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2019 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2020 .parse = sfc_mae_rule_parse_item_pf,
2023 .type = RTE_FLOW_ITEM_TYPE_VF,
2026 * In terms of RTE flow, this item is a META one,
2027 * and its position in the pattern is don't care.
2029 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2030 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2031 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2032 .parse = sfc_mae_rule_parse_item_vf,
2035 .type = RTE_FLOW_ITEM_TYPE_ETH,
2037 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2038 .layer = SFC_FLOW_ITEM_L2,
2039 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2040 .parse = sfc_mae_rule_parse_item_eth,
2043 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2045 .prev_layer = SFC_FLOW_ITEM_L2,
2046 .layer = SFC_FLOW_ITEM_L2,
2047 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2048 .parse = sfc_mae_rule_parse_item_vlan,
2051 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2053 .prev_layer = SFC_FLOW_ITEM_L2,
2054 .layer = SFC_FLOW_ITEM_L3,
2055 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2056 .parse = sfc_mae_rule_parse_item_ipv4,
2059 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2061 .prev_layer = SFC_FLOW_ITEM_L2,
2062 .layer = SFC_FLOW_ITEM_L3,
2063 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2064 .parse = sfc_mae_rule_parse_item_ipv6,
2067 .type = RTE_FLOW_ITEM_TYPE_TCP,
2069 .prev_layer = SFC_FLOW_ITEM_L3,
2070 .layer = SFC_FLOW_ITEM_L4,
2071 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2072 .parse = sfc_mae_rule_parse_item_tcp,
2075 .type = RTE_FLOW_ITEM_TYPE_UDP,
2077 .prev_layer = SFC_FLOW_ITEM_L3,
2078 .layer = SFC_FLOW_ITEM_L4,
2079 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2080 .parse = sfc_mae_rule_parse_item_udp,
2083 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2085 .prev_layer = SFC_FLOW_ITEM_L4,
2086 .layer = SFC_FLOW_ITEM_START_LAYER,
2087 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2088 .parse = sfc_mae_rule_parse_item_tunnel,
2091 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2093 .prev_layer = SFC_FLOW_ITEM_L4,
2094 .layer = SFC_FLOW_ITEM_START_LAYER,
2095 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2096 .parse = sfc_mae_rule_parse_item_tunnel,
2099 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2101 .prev_layer = SFC_FLOW_ITEM_L3,
2102 .layer = SFC_FLOW_ITEM_START_LAYER,
2103 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2104 .parse = sfc_mae_rule_parse_item_tunnel,
2109 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2110 struct sfc_mae_parse_ctx *ctx,
2111 struct sfc_mae_outer_rule **rulep,
2112 struct rte_flow_error *error)
2114 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2117 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2122 SFC_ASSERT(ctx->match_spec_outer != NULL);
2124 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2125 return rte_flow_error_set(error, ENOTSUP,
2126 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2127 "Inconsistent pattern (outer)");
2130 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2132 if (*rulep != NULL) {
2133 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2135 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2136 ctx->encap_type, rulep);
2138 return rte_flow_error_set(error, rc,
2139 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2140 "Failed to process the pattern");
2144 /* The spec has now been tracked by the outer rule entry. */
2145 ctx->match_spec_outer = NULL;
2149 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2150 * inner parse (when some outer rule is hit) and action rule lookup.
2151 * If the currently processed flow does not come with an outer rule,
2152 * its action rule must be available only for packets which miss in
2153 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2154 * in the action rule specification; this ensures correct behaviour.
2156 * If, on the other hand, this flow does have an outer rule, its ID
2157 * may be unknown at the moment (not yet allocated), but OR_ID mask
2158 * has to be set to 0xffffffff anyway for correct class comparisons.
2159 * When the outer rule has been allocated, this match field will be
2160 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2162 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2166 sfc_mae_outer_rule_del(sa, *rulep);
2170 return rte_flow_error_set(error, rc,
2171 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2172 "Failed to process the pattern");
2179 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2180 const struct rte_flow_item pattern[],
2181 struct sfc_mae_parse_ctx *ctx,
2182 struct rte_flow_error *error)
2184 struct sfc_mae *mae = &sa->mae;
2187 if (pattern == NULL) {
2188 rte_flow_error_set(error, EINVAL,
2189 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2195 switch (pattern->type) {
2196 case RTE_FLOW_ITEM_TYPE_VXLAN:
2197 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2198 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2199 ctx->tunnel_def_mask_size =
2200 sizeof(rte_flow_item_vxlan_mask);
2202 case RTE_FLOW_ITEM_TYPE_GENEVE:
2203 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2204 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2205 ctx->tunnel_def_mask_size =
2206 sizeof(rte_flow_item_geneve_mask);
2208 case RTE_FLOW_ITEM_TYPE_NVGRE:
2209 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2210 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2211 ctx->tunnel_def_mask_size =
2212 sizeof(rte_flow_item_nvgre_mask);
2214 case RTE_FLOW_ITEM_TYPE_END:
2224 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2227 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2228 return rte_flow_error_set(error, ENOTSUP,
2229 RTE_FLOW_ERROR_TYPE_ITEM,
2230 pattern, "Unsupported tunnel item");
2233 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2234 return rte_flow_error_set(error, ENOTSUP,
2235 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2236 NULL, "Unsupported priority level");
2239 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
2240 &ctx->match_spec_outer);
2242 return rte_flow_error_set(error, rc,
2243 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
2244 "Failed to initialise outer rule match specification");
2247 /* Outermost items comprise a match specification of type OUTER. */
2248 ctx->match_spec = ctx->match_spec_outer;
2250 /* Outermost items use "ENC" EFX MAE field IDs. */
2251 ctx->field_ids_remap = field_ids_remap_to_encap;
2257 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2258 struct sfc_mae_parse_ctx *ctx)
2260 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2263 if (ctx->match_spec_outer != NULL)
2264 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2268 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2269 const struct rte_flow_item pattern[],
2270 struct sfc_flow_spec_mae *spec,
2271 struct rte_flow_error *error)
2273 struct sfc_mae_parse_ctx ctx_mae;
2274 struct sfc_flow_parse_ctx ctx;
2277 memset(&ctx_mae, 0, sizeof(ctx_mae));
2278 ctx_mae.priority = spec->priority;
2281 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2283 &ctx_mae.match_spec_action);
2285 rc = rte_flow_error_set(error, rc,
2286 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2287 "Failed to initialise action rule match specification");
2288 goto fail_init_match_spec_action;
2292 * As a preliminary setting, assume that there is no encapsulation
2293 * in the pattern. That is, pattern items are about to comprise a
2294 * match specification of type ACTION and use non-encap. field IDs.
2296 * sfc_mae_rule_encap_parse_init() below may override this.
2298 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2299 ctx_mae.match_spec = ctx_mae.match_spec_action;
2300 ctx_mae.field_ids_remap = field_ids_no_remap;
2302 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2305 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2307 goto fail_encap_parse_init;
2309 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2310 pattern, &ctx, error);
2312 goto fail_parse_pattern;
2314 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2316 goto fail_process_pattern_data;
2318 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2320 goto fail_process_outer;
2322 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2323 rc = rte_flow_error_set(error, ENOTSUP,
2324 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2325 "Inconsistent pattern");
2326 goto fail_validate_match_spec_action;
2329 spec->match_spec = ctx_mae.match_spec_action;
2333 fail_validate_match_spec_action:
2335 fail_process_pattern_data:
2337 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2339 fail_encap_parse_init:
2340 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2342 fail_init_match_spec_action:
2347 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2348 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2349 * That is, related RTE flow actions need to be tracked as parts of a whole
2350 * so that they can be combined into a single action and submitted to MAE
2351 * representation of a given rule's action set.
2353 * Each RTE flow action provided by an application gets classified as
2354 * one belonging to some bundle type. If an action is not supposed to
2355 * belong to any bundle, or if this action is END, it is described as
2356 * one belonging to a dummy bundle of type EMPTY.
2358 * A currently tracked bundle will be submitted if a repeating
2359 * action or an action of different bundle type follows.
2362 enum sfc_mae_actions_bundle_type {
2363 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2364 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2367 struct sfc_mae_actions_bundle {
2368 enum sfc_mae_actions_bundle_type type;
2370 /* Indicates actions already tracked by the current bundle */
2371 uint64_t actions_mask;
2373 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2374 rte_be16_t vlan_push_tpid;
2375 rte_be16_t vlan_push_tci;
2379 * Combine configuration of RTE flow actions tracked by the bundle into a
2380 * single action and submit the result to MAE action set specification.
2381 * Do nothing in the case of dummy action bundle.
2384 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2385 efx_mae_actions_t *spec)
2389 switch (bundle->type) {
2390 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2392 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2393 rc = efx_mae_action_set_populate_vlan_push(
2394 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2397 SFC_ASSERT(B_FALSE);
2405 * Given the type of the next RTE flow action in the line, decide
2406 * whether a new bundle is about to start, and, if this is the case,
2407 * submit and reset the current bundle.
2410 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2411 struct sfc_mae_actions_bundle *bundle,
2412 efx_mae_actions_t *spec,
2413 struct rte_flow_error *error)
2415 enum sfc_mae_actions_bundle_type bundle_type_new;
2418 switch (action->type) {
2419 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2420 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2421 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2422 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2426 * Self-sufficient actions, including END, are handled in this
2427 * case. No checks for unsupported actions are needed here
2428 * because parsing doesn't occur at this point.
2430 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2434 if (bundle_type_new != bundle->type ||
2435 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2436 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2440 memset(bundle, 0, sizeof(*bundle));
2443 bundle->type = bundle_type_new;
2448 return rte_flow_error_set(error, rc,
2449 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2450 "Failed to request the (group of) action(s)");
2454 sfc_mae_rule_parse_action_of_push_vlan(
2455 const struct rte_flow_action_of_push_vlan *conf,
2456 struct sfc_mae_actions_bundle *bundle)
2458 bundle->vlan_push_tpid = conf->ethertype;
2462 sfc_mae_rule_parse_action_of_set_vlan_vid(
2463 const struct rte_flow_action_of_set_vlan_vid *conf,
2464 struct sfc_mae_actions_bundle *bundle)
2466 bundle->vlan_push_tci |= (conf->vlan_vid &
2467 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2471 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2472 const struct rte_flow_action_of_set_vlan_pcp *conf,
2473 struct sfc_mae_actions_bundle *bundle)
2475 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2476 RTE_LEN2MASK(3, uint8_t)) << 13;
2478 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2481 struct sfc_mae_parsed_item {
2482 const struct rte_flow_item *item;
2483 size_t proto_header_ofst;
2484 size_t proto_header_size;
2488 * For each 16-bit word of the given header, override
2489 * bits enforced by the corresponding 16-bit mask.
2492 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2493 const struct sfc_mae_parsed_item *parsed_items,
2494 unsigned int nb_parsed_items)
2496 unsigned int item_idx;
2498 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2499 const struct sfc_mae_parsed_item *parsed_item;
2500 const struct rte_flow_item *item;
2501 size_t proto_header_size;
2504 parsed_item = &parsed_items[item_idx];
2505 proto_header_size = parsed_item->proto_header_size;
2506 item = parsed_item->item;
2508 for (ofst = 0; ofst < proto_header_size;
2509 ofst += sizeof(rte_be16_t)) {
2510 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2511 const rte_be16_t *w_maskp;
2512 const rte_be16_t *w_specp;
2514 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2515 w_specp = RTE_PTR_ADD(item->spec, ofst);
2518 *wp |= (*w_specp & *w_maskp);
2521 header_buf += proto_header_size;
2525 #define SFC_IPV4_TTL_DEF 0x40
2526 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
2527 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2528 #define SFC_VXLAN_FLAGS_DEF 0x08000000
2531 sfc_mae_rule_parse_action_vxlan_encap(
2532 struct sfc_mae *mae,
2533 const struct rte_flow_action_vxlan_encap *conf,
2534 efx_mae_actions_t *spec,
2535 struct rte_flow_error *error)
2537 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2538 struct rte_flow_item *pattern = conf->definition;
2539 uint8_t *buf = bounce_eh->buf;
2541 /* This array will keep track of non-VOID pattern items. */
2542 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2544 1 /* IPv4 or IPv6 */ +
2547 unsigned int nb_parsed_items = 0;
2549 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2550 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2551 sizeof(struct rte_ipv6_hdr))];
2552 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2553 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2554 struct rte_vxlan_hdr *vxlan = NULL;
2555 struct rte_udp_hdr *udp = NULL;
2556 unsigned int nb_vlan_tags = 0;
2557 size_t next_proto_ofst = 0;
2558 size_t ethertype_ofst = 0;
2562 if (pattern == NULL) {
2563 return rte_flow_error_set(error, EINVAL,
2564 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2565 "The encap. header definition is NULL");
2568 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2569 bounce_eh->size = 0;
2572 * Process pattern items and remember non-VOID ones.
2573 * Defer applying masks until after the complete header
2574 * has been built from the pattern items.
2576 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2578 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2579 struct sfc_mae_parsed_item *parsed_item;
2580 const uint64_t exp_items_extra_vlan[] = {
2581 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2583 size_t proto_header_size;
2584 rte_be16_t *ethertypep;
2585 uint8_t *next_protop;
2588 if (pattern->spec == NULL) {
2589 return rte_flow_error_set(error, EINVAL,
2590 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2591 "NULL item spec in the encap. header");
2594 if (pattern->mask == NULL) {
2595 return rte_flow_error_set(error, EINVAL,
2596 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2597 "NULL item mask in the encap. header");
2600 if (pattern->last != NULL) {
2601 /* This is not a match pattern, so disallow range. */
2602 return rte_flow_error_set(error, EINVAL,
2603 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2604 "Range item in the encap. header");
2607 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2608 /* Handle VOID separately, for clarity. */
2612 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2613 return rte_flow_error_set(error, ENOTSUP,
2614 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2615 "Unexpected item in the encap. header");
2618 parsed_item = &parsed_items[nb_parsed_items];
2619 buf_cur = buf + bounce_eh->size;
2621 switch (pattern->type) {
2622 case RTE_FLOW_ITEM_TYPE_ETH:
2623 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2625 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2628 proto_header_size = sizeof(struct rte_ether_hdr);
2630 ethertype_ofst = eth_ethertype_ofst;
2632 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2633 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2634 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2636 case RTE_FLOW_ITEM_TYPE_VLAN:
2637 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2639 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2642 proto_header_size = sizeof(struct rte_vlan_hdr);
2644 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2645 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2647 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2648 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2652 offsetof(struct rte_vlan_hdr, eth_proto);
2654 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2655 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2656 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2660 case RTE_FLOW_ITEM_TYPE_IPV4:
2661 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2663 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2666 proto_header_size = sizeof(struct rte_ipv4_hdr);
2668 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2669 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2673 offsetof(struct rte_ipv4_hdr, next_proto_id);
2675 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2677 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2679 case RTE_FLOW_ITEM_TYPE_IPV6:
2680 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2682 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2685 proto_header_size = sizeof(struct rte_ipv6_hdr);
2687 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2688 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2690 next_proto_ofst = bounce_eh->size +
2691 offsetof(struct rte_ipv6_hdr, proto);
2693 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2695 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2697 case RTE_FLOW_ITEM_TYPE_UDP:
2698 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2700 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2703 proto_header_size = sizeof(struct rte_udp_hdr);
2705 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2706 *next_protop = IPPROTO_UDP;
2708 udp = (struct rte_udp_hdr *)buf_cur;
2710 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2712 case RTE_FLOW_ITEM_TYPE_VXLAN:
2713 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2715 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2718 proto_header_size = sizeof(struct rte_vxlan_hdr);
2720 vxlan = (struct rte_vxlan_hdr *)buf_cur;
2722 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2723 udp->dgram_len = RTE_BE16(sizeof(*udp) +
2725 udp->dgram_cksum = 0;
2730 return rte_flow_error_set(error, ENOTSUP,
2731 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2732 "Unknown item in the encap. header");
2735 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2736 return rte_flow_error_set(error, E2BIG,
2737 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2738 "The encap. header is too big");
2741 if ((proto_header_size & 1) != 0) {
2742 return rte_flow_error_set(error, EINVAL,
2743 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2744 "Odd layer size in the encap. header");
2747 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2748 bounce_eh->size += proto_header_size;
2750 parsed_item->item = pattern;
2751 parsed_item->proto_header_size = proto_header_size;
2755 if (exp_items != 0) {
2756 /* Parsing item VXLAN would have reset exp_items to 0. */
2757 return rte_flow_error_set(error, ENOTSUP,
2758 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2759 "No item VXLAN in the encap. header");
2762 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2763 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2764 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2765 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2767 /* The HW cannot compute this checksum. */
2768 ipv4->hdr_checksum = 0;
2769 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2771 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2772 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2773 ipv6->payload_len = udp->dgram_len;
2775 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2777 /* Take care of the masks. */
2778 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2780 rc = efx_mae_action_set_populate_encap(spec);
2782 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2783 NULL, "failed to request action ENCAP");
2790 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
2791 const struct rte_flow_action_mark *conf,
2792 efx_mae_actions_t *spec)
2796 rc = efx_mae_action_set_populate_mark(spec, conf->id);
2798 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
2804 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
2805 const struct rte_flow_action_count *conf,
2806 efx_mae_actions_t *spec)
2812 goto fail_counter_shared;
2815 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
2817 "counter queue is not configured for COUNT action");
2819 goto fail_counter_queue_uninit;
2822 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
2824 goto fail_no_service_core;
2827 rc = efx_mae_action_set_populate_count(spec);
2830 "failed to populate counters in MAE action set: %s",
2832 goto fail_populate_count;
2837 fail_populate_count:
2838 fail_no_service_core:
2839 fail_counter_queue_uninit:
2840 fail_counter_shared:
2846 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2847 const struct rte_flow_action_phy_port *conf,
2848 efx_mae_actions_t *spec)
2850 efx_mport_sel_t mport;
2854 if (conf->original != 0)
2855 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2857 phy_port = conf->index;
2859 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2861 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
2862 phy_port, strerror(rc));
2866 rc = efx_mae_action_set_populate_deliver(spec, &mport);
2868 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
2869 mport.sel, strerror(rc));
2876 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
2877 const struct rte_flow_action_vf *vf_conf,
2878 efx_mae_actions_t *spec)
2880 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2881 efx_mport_sel_t mport;
2885 if (vf_conf == NULL)
2886 vf = EFX_PCI_VF_INVALID;
2887 else if (vf_conf->original != 0)
2892 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
2894 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
2895 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
2900 rc = efx_mae_action_set_populate_deliver(spec, &mport);
2902 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
2903 mport.sel, strerror(rc));
2910 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
2911 const struct rte_flow_action_port_id *conf,
2912 efx_mae_actions_t *spec)
2914 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2915 struct sfc_mae *mae = &sa->mae;
2916 efx_mport_sel_t mport;
2920 if (conf->id > UINT16_MAX)
2923 port_id = (conf->original != 0) ? sas->port_id : conf->id;
2925 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2928 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
2929 port_id, strerror(rc));
2933 rc = efx_mae_action_set_populate_deliver(spec, &mport);
2935 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
2936 mport.sel, strerror(rc));
2942 static const char * const action_names[] = {
2943 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
2944 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
2945 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
2946 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
2947 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
2948 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
2949 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
2950 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
2951 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
2952 [RTE_FLOW_ACTION_TYPE_PF] = "PF",
2953 [RTE_FLOW_ACTION_TYPE_VF] = "VF",
2954 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
2955 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
2959 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2960 const struct rte_flow_action *action,
2961 const struct sfc_mae_outer_rule *outer_rule,
2962 struct sfc_mae_actions_bundle *bundle,
2963 efx_mae_actions_t *spec,
2964 struct rte_flow_error *error)
2966 bool custom_error = B_FALSE;
2969 switch (action->type) {
2970 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2971 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
2972 bundle->actions_mask);
2973 if (outer_rule == NULL ||
2974 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
2977 rc = efx_mae_action_set_populate_decap(spec);
2979 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2980 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2981 bundle->actions_mask);
2982 rc = efx_mae_action_set_populate_vlan_pop(spec);
2984 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2985 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2986 bundle->actions_mask);
2987 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2989 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2990 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2991 bundle->actions_mask);
2992 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2994 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2995 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2996 bundle->actions_mask);
2997 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2999 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3000 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3001 bundle->actions_mask);
3002 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3005 custom_error = B_TRUE;
3007 case RTE_FLOW_ACTION_TYPE_COUNT:
3008 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3009 bundle->actions_mask);
3010 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3012 case RTE_FLOW_ACTION_TYPE_FLAG:
3013 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3014 bundle->actions_mask);
3015 rc = efx_mae_action_set_populate_flag(spec);
3017 case RTE_FLOW_ACTION_TYPE_MARK:
3018 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3019 bundle->actions_mask);
3020 rc = sfc_mae_rule_parse_action_mark(sa, action->conf, spec);
3022 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3023 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3024 bundle->actions_mask);
3025 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3027 case RTE_FLOW_ACTION_TYPE_PF:
3028 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3029 bundle->actions_mask);
3030 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3032 case RTE_FLOW_ACTION_TYPE_VF:
3033 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3034 bundle->actions_mask);
3035 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3037 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3038 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3039 bundle->actions_mask);
3040 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3042 case RTE_FLOW_ACTION_TYPE_DROP:
3043 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3044 bundle->actions_mask);
3045 rc = efx_mae_action_set_populate_drop(spec);
3048 return rte_flow_error_set(error, ENOTSUP,
3049 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3050 "Unsupported action");
3054 bundle->actions_mask |= (1ULL << action->type);
3055 } else if (!custom_error) {
3056 if (action->type < RTE_DIM(action_names)) {
3057 const char *action_name = action_names[action->type];
3059 if (action_name != NULL) {
3060 sfc_err(sa, "action %s was rejected: %s",
3061 action_name, strerror(rc));
3064 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3065 NULL, "Failed to request the action");
3072 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3074 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3078 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3079 const struct sfc_mae_bounce_eh *bounce_eh,
3080 struct sfc_mae_encap_header **encap_headerp)
3082 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3083 encap_headerp = NULL;
3087 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3088 if (*encap_headerp != NULL)
3091 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3095 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3096 const struct rte_flow_action actions[],
3097 struct sfc_flow_spec_mae *spec_mae,
3098 struct rte_flow_error *error)
3100 struct sfc_mae_encap_header *encap_header = NULL;
3101 struct sfc_mae_actions_bundle bundle = {0};
3102 const struct rte_flow_action *action;
3103 struct sfc_mae *mae = &sa->mae;
3104 efx_mae_actions_t *spec;
3105 unsigned int n_count;
3110 if (actions == NULL) {
3111 return rte_flow_error_set(error, EINVAL,
3112 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3116 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3118 goto fail_action_set_spec_init;
3120 /* Cleanup after previous encap. header bounce buffer usage. */
3121 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3123 for (action = actions;
3124 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3125 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3127 goto fail_rule_parse_action;
3129 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
3130 &bundle, spec, error);
3132 goto fail_rule_parse_action;
3135 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3137 goto fail_rule_parse_action;
3139 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3141 goto fail_process_encap_header;
3143 n_count = efx_mae_action_set_get_nb_count(spec);
3146 sfc_err(sa, "too many count actions requested: %u", n_count);
3150 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3152 if (spec_mae->action_set != NULL) {
3153 sfc_mae_encap_header_del(sa, encap_header);
3154 efx_mae_action_set_spec_fini(sa->nic, spec);
3158 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
3159 &spec_mae->action_set);
3161 goto fail_action_set_add;
3165 fail_action_set_add:
3167 sfc_mae_encap_header_del(sa, encap_header);
3169 fail_process_encap_header:
3170 fail_rule_parse_action:
3171 efx_mae_action_set_spec_fini(sa->nic, spec);
3173 fail_action_set_spec_init:
3174 if (rc > 0 && rte_errno == 0) {
3175 rc = rte_flow_error_set(error, rc,
3176 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3177 NULL, "Failed to process the action");
3183 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3184 const efx_mae_match_spec_t *left,
3185 const efx_mae_match_spec_t *right)
3187 bool have_same_class;
3190 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3193 return (rc == 0) ? have_same_class : false;
3197 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3198 struct sfc_mae_outer_rule *rule)
3200 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3201 struct sfc_mae_outer_rule *entry;
3202 struct sfc_mae *mae = &sa->mae;
3204 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3205 /* An active rule is reused. It's class is wittingly valid. */
3209 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3210 sfc_mae_outer_rules, entries) {
3211 const efx_mae_match_spec_t *left = entry->match_spec;
3212 const efx_mae_match_spec_t *right = rule->match_spec;
3217 if (sfc_mae_rules_class_cmp(sa, left, right))
3221 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3222 "support for outer frame pattern items is not guaranteed; "
3223 "other than that, the items are valid from SW standpoint");
3228 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3229 struct sfc_flow_spec_mae *spec)
3231 const struct rte_flow *entry;
3233 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3234 const struct sfc_flow_spec *entry_spec = &entry->spec;
3235 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3236 const efx_mae_match_spec_t *left = es_mae->match_spec;
3237 const efx_mae_match_spec_t *right = spec->match_spec;
3239 switch (entry_spec->type) {
3240 case SFC_FLOW_SPEC_FILTER:
3241 /* Ignore VNIC-level flows */
3243 case SFC_FLOW_SPEC_MAE:
3244 if (sfc_mae_rules_class_cmp(sa, left, right))
3252 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3253 "support for inner frame pattern items is not guaranteed; "
3254 "other than that, the items are valid from SW standpoint");
3259 * Confirm that a given flow can be accepted by the FW.
3262 * Software adapter context
3264 * Flow to be verified
3266 * Zero on success and non-zero in the case of error.
3267 * A special value of EAGAIN indicates that the adapter is
3268 * not in started state. This state is compulsory because
3269 * it only makes sense to compare the rule class of the flow
3270 * being validated with classes of the active rules.
3271 * Such classes are wittingly supported by the FW.
3274 sfc_mae_flow_verify(struct sfc_adapter *sa,
3275 struct rte_flow *flow)
3277 struct sfc_flow_spec *spec = &flow->spec;
3278 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3279 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3282 SFC_ASSERT(sfc_adapter_is_locked(sa));
3284 if (sa->state != SFC_ADAPTER_STARTED)
3287 if (outer_rule != NULL) {
3288 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3293 return sfc_mae_action_rule_class_verify(sa, spec_mae);
3297 sfc_mae_flow_insert(struct sfc_adapter *sa,
3298 struct rte_flow *flow)
3300 struct sfc_flow_spec *spec = &flow->spec;
3301 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3302 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3303 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3304 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
3307 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3308 SFC_ASSERT(action_set != NULL);
3310 if (outer_rule != NULL) {
3311 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3312 spec_mae->match_spec);
3314 goto fail_outer_rule_enable;
3317 rc = sfc_mae_action_set_enable(sa, action_set);
3319 goto fail_action_set_enable;
3321 if (action_set->n_counters > 0) {
3322 rc = sfc_mae_counter_start(sa);
3324 sfc_err(sa, "failed to start MAE counters support: %s",
3326 goto fail_mae_counter_start;
3330 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3331 NULL, &fw_rsrc->aset_id,
3332 &spec_mae->rule_id);
3334 goto fail_action_rule_insert;
3336 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3337 flow, spec_mae->rule_id.id);
3341 fail_action_rule_insert:
3342 fail_mae_counter_start:
3343 sfc_mae_action_set_disable(sa, action_set);
3345 fail_action_set_enable:
3346 if (outer_rule != NULL)
3347 sfc_mae_outer_rule_disable(sa, outer_rule);
3349 fail_outer_rule_enable:
3354 sfc_mae_flow_remove(struct sfc_adapter *sa,
3355 struct rte_flow *flow)
3357 struct sfc_flow_spec *spec = &flow->spec;
3358 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3359 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3360 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3363 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3364 SFC_ASSERT(action_set != NULL);
3366 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3368 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3369 flow, spec_mae->rule_id.id, strerror(rc));
3371 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3372 flow, spec_mae->rule_id.id);
3373 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3375 sfc_mae_action_set_disable(sa, action_set);
3377 if (outer_rule != NULL)
3378 sfc_mae_outer_rule_disable(sa, outer_rule);
3384 sfc_mae_query_counter(struct sfc_adapter *sa,
3385 struct sfc_flow_spec_mae *spec,
3386 const struct rte_flow_action *action,
3387 struct rte_flow_query_count *data,
3388 struct rte_flow_error *error)
3390 struct sfc_mae_action_set *action_set = spec->action_set;
3391 const struct rte_flow_action_count *conf = action->conf;
3395 if (action_set->n_counters == 0) {
3396 return rte_flow_error_set(error, EINVAL,
3397 RTE_FLOW_ERROR_TYPE_ACTION, action,
3398 "Queried flow rule does not have count actions");
3401 for (i = 0; i < action_set->n_counters; i++) {
3403 * Get the first available counter of the flow rule if
3404 * counter ID is not specified.
3406 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3409 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3410 &action_set->counters[i], data);
3412 return rte_flow_error_set(error, EINVAL,
3413 RTE_FLOW_ERROR_TYPE_ACTION, action,
3414 "Queried flow rule counter action is invalid");
3420 return rte_flow_error_set(error, ENOENT,
3421 RTE_FLOW_ERROR_TYPE_ACTION, action,
3422 "No such flow rule action count ID");
3426 sfc_mae_flow_query(struct rte_eth_dev *dev,
3427 struct rte_flow *flow,
3428 const struct rte_flow_action *action,
3430 struct rte_flow_error *error)
3432 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3433 struct sfc_flow_spec *spec = &flow->spec;
3434 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3436 switch (action->type) {
3437 case RTE_FLOW_ACTION_TYPE_COUNT:
3438 return sfc_mae_query_counter(sa, spec_mae, action,
3441 return rte_flow_error_set(error, ENOTSUP,
3442 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3443 "Query for action of this type is not supported");