1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
19 #include "sfc_mae_counter.h"
21 #include "sfc_switch.h"
22 #include "sfc_service.h"
25 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
26 efx_mport_sel_t *mportp)
28 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
30 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
35 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
36 uint32_t nb_counters_max)
38 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
42 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
44 sfc_mae_counters_fini(®istry->counters);
48 sfc_mae_attach(struct sfc_adapter *sa)
50 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
51 struct sfc_mae_switch_port_request switch_port_request = {0};
52 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
53 efx_mport_sel_t entity_mport;
54 struct sfc_mae *mae = &sa->mae;
55 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
56 efx_mae_limits_t limits;
59 sfc_log_init(sa, "entry");
61 if (!encp->enc_mae_supported) {
62 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
66 sfc_log_init(sa, "init MAE");
67 rc = efx_mae_init(sa->nic);
71 sfc_log_init(sa, "get MAE limits");
72 rc = efx_mae_get_limits(sa->nic, &limits);
74 goto fail_mae_get_limits;
76 sfc_log_init(sa, "init MAE counter registry");
77 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
78 limits.eml_max_n_counters);
80 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
81 limits.eml_max_n_counters, rte_strerror(rc));
82 goto fail_counter_registry_init;
85 sfc_log_init(sa, "assign entity MPORT");
86 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
88 goto fail_mae_assign_entity_mport;
90 sfc_log_init(sa, "assign RTE switch domain");
91 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
93 goto fail_mae_assign_switch_domain;
95 sfc_log_init(sa, "assign RTE switch port");
96 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
97 switch_port_request.entity_mportp = &entity_mport;
99 * As of now, the driver does not support representors, so
100 * RTE ethdev MPORT simply matches that of the entity.
102 switch_port_request.ethdev_mportp = &entity_mport;
103 switch_port_request.ethdev_port_id = sas->port_id;
104 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
105 &switch_port_request,
106 &mae->switch_port_id);
108 goto fail_mae_assign_switch_port;
110 sfc_log_init(sa, "allocate encap. header bounce buffer");
111 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
112 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
113 bounce_eh->buf_size, 0);
114 if (bounce_eh->buf == NULL)
115 goto fail_mae_alloc_bounce_eh;
117 mae->status = SFC_MAE_STATUS_SUPPORTED;
118 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
119 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
120 mae->encap_types_supported = limits.eml_encap_types_supported;
121 TAILQ_INIT(&mae->outer_rules);
122 TAILQ_INIT(&mae->encap_headers);
123 TAILQ_INIT(&mae->action_sets);
125 sfc_log_init(sa, "done");
129 fail_mae_alloc_bounce_eh:
130 fail_mae_assign_switch_port:
131 fail_mae_assign_switch_domain:
132 fail_mae_assign_entity_mport:
133 sfc_mae_counter_registry_fini(&mae->counter_registry);
135 fail_counter_registry_init:
137 efx_mae_fini(sa->nic);
140 sfc_log_init(sa, "failed %d", rc);
146 sfc_mae_detach(struct sfc_adapter *sa)
148 struct sfc_mae *mae = &sa->mae;
149 enum sfc_mae_status status_prev = mae->status;
151 sfc_log_init(sa, "entry");
153 mae->nb_action_rule_prios_max = 0;
154 mae->status = SFC_MAE_STATUS_UNKNOWN;
156 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
159 rte_free(mae->bounce_eh.buf);
160 sfc_mae_counter_registry_fini(&mae->counter_registry);
162 efx_mae_fini(sa->nic);
164 sfc_log_init(sa, "done");
167 static struct sfc_mae_outer_rule *
168 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
169 const efx_mae_match_spec_t *match_spec,
170 efx_tunnel_protocol_t encap_type)
172 struct sfc_mae_outer_rule *rule;
173 struct sfc_mae *mae = &sa->mae;
175 SFC_ASSERT(sfc_adapter_is_locked(sa));
177 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
178 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
179 rule->encap_type == encap_type) {
180 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
190 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
191 efx_mae_match_spec_t *match_spec,
192 efx_tunnel_protocol_t encap_type,
193 struct sfc_mae_outer_rule **rulep)
195 struct sfc_mae_outer_rule *rule;
196 struct sfc_mae *mae = &sa->mae;
198 SFC_ASSERT(sfc_adapter_is_locked(sa));
200 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
205 rule->match_spec = match_spec;
206 rule->encap_type = encap_type;
208 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
210 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
214 sfc_dbg(sa, "added outer_rule=%p", rule);
220 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
221 struct sfc_mae_outer_rule *rule)
223 struct sfc_mae *mae = &sa->mae;
225 SFC_ASSERT(sfc_adapter_is_locked(sa));
226 SFC_ASSERT(rule->refcnt != 0);
230 if (rule->refcnt != 0)
233 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
234 rule->fw_rsrc.refcnt != 0) {
235 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
236 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
239 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
241 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
244 sfc_dbg(sa, "deleted outer_rule=%p", rule);
248 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
249 struct sfc_mae_outer_rule *rule,
250 efx_mae_match_spec_t *match_spec_action)
252 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
255 SFC_ASSERT(sfc_adapter_is_locked(sa));
257 if (fw_rsrc->refcnt == 0) {
258 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
259 SFC_ASSERT(rule->match_spec != NULL);
261 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
265 sfc_err(sa, "failed to enable outer_rule=%p: %s",
271 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
274 if (fw_rsrc->refcnt == 0) {
275 (void)efx_mae_outer_rule_remove(sa->nic,
277 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
280 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
285 if (fw_rsrc->refcnt == 0) {
286 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
287 rule, fw_rsrc->rule_id.id);
296 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
297 struct sfc_mae_outer_rule *rule)
299 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
302 SFC_ASSERT(sfc_adapter_is_locked(sa));
304 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
305 fw_rsrc->refcnt == 0) {
306 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
307 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
311 if (fw_rsrc->refcnt == 1) {
312 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
314 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
315 rule, fw_rsrc->rule_id.id);
317 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
318 rule, fw_rsrc->rule_id.id, strerror(rc));
320 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
326 static struct sfc_mae_encap_header *
327 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
328 const struct sfc_mae_bounce_eh *bounce_eh)
330 struct sfc_mae_encap_header *encap_header;
331 struct sfc_mae *mae = &sa->mae;
333 SFC_ASSERT(sfc_adapter_is_locked(sa));
335 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
336 if (encap_header->size == bounce_eh->size &&
337 memcmp(encap_header->buf, bounce_eh->buf,
338 bounce_eh->size) == 0) {
339 sfc_dbg(sa, "attaching to encap_header=%p",
341 ++(encap_header->refcnt);
350 sfc_mae_encap_header_add(struct sfc_adapter *sa,
351 const struct sfc_mae_bounce_eh *bounce_eh,
352 struct sfc_mae_encap_header **encap_headerp)
354 struct sfc_mae_encap_header *encap_header;
355 struct sfc_mae *mae = &sa->mae;
357 SFC_ASSERT(sfc_adapter_is_locked(sa));
359 encap_header = rte_zmalloc("sfc_mae_encap_header",
360 sizeof(*encap_header), 0);
361 if (encap_header == NULL)
364 encap_header->size = bounce_eh->size;
366 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
367 encap_header->size, 0);
368 if (encap_header->buf == NULL) {
369 rte_free(encap_header);
373 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
375 encap_header->refcnt = 1;
376 encap_header->type = bounce_eh->type;
377 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
379 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
381 *encap_headerp = encap_header;
383 sfc_dbg(sa, "added encap_header=%p", encap_header);
389 sfc_mae_encap_header_del(struct sfc_adapter *sa,
390 struct sfc_mae_encap_header *encap_header)
392 struct sfc_mae *mae = &sa->mae;
394 if (encap_header == NULL)
397 SFC_ASSERT(sfc_adapter_is_locked(sa));
398 SFC_ASSERT(encap_header->refcnt != 0);
400 --(encap_header->refcnt);
402 if (encap_header->refcnt != 0)
405 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
406 encap_header->fw_rsrc.refcnt != 0) {
407 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
408 encap_header, encap_header->fw_rsrc.eh_id.id,
409 encap_header->fw_rsrc.refcnt);
412 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
413 rte_free(encap_header->buf);
414 rte_free(encap_header);
416 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
420 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
421 struct sfc_mae_encap_header *encap_header,
422 efx_mae_actions_t *action_set_spec)
424 struct sfc_mae_fw_rsrc *fw_rsrc;
427 if (encap_header == NULL)
430 SFC_ASSERT(sfc_adapter_is_locked(sa));
432 fw_rsrc = &encap_header->fw_rsrc;
434 if (fw_rsrc->refcnt == 0) {
435 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
436 SFC_ASSERT(encap_header->buf != NULL);
437 SFC_ASSERT(encap_header->size != 0);
439 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
444 sfc_err(sa, "failed to enable encap_header=%p: %s",
445 encap_header, strerror(rc));
450 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
453 if (fw_rsrc->refcnt == 0) {
454 (void)efx_mae_encap_header_free(sa->nic,
456 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
459 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
464 if (fw_rsrc->refcnt == 0) {
465 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
466 encap_header, fw_rsrc->eh_id.id);
475 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
476 struct sfc_mae_encap_header *encap_header)
478 struct sfc_mae_fw_rsrc *fw_rsrc;
481 if (encap_header == NULL)
484 SFC_ASSERT(sfc_adapter_is_locked(sa));
486 fw_rsrc = &encap_header->fw_rsrc;
488 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
489 fw_rsrc->refcnt == 0) {
490 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
491 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
495 if (fw_rsrc->refcnt == 1) {
496 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
498 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
499 encap_header, fw_rsrc->eh_id.id);
501 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
502 encap_header, fw_rsrc->eh_id.id, strerror(rc));
504 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
511 sfc_mae_counters_enable(struct sfc_adapter *sa,
512 struct sfc_mae_counter_id *counters,
513 unsigned int n_counters,
514 efx_mae_actions_t *action_set_spec)
518 sfc_log_init(sa, "entry");
520 if (n_counters == 0) {
521 sfc_log_init(sa, "no counters - skip");
525 SFC_ASSERT(sfc_adapter_is_locked(sa));
526 SFC_ASSERT(n_counters == 1);
528 rc = sfc_mae_counter_enable(sa, &counters[0]);
530 sfc_err(sa, "failed to enable MAE counter %u: %s",
531 counters[0].mae_id.id, rte_strerror(rc));
532 goto fail_counter_add;
535 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
536 &counters[0].mae_id);
538 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
539 counters[0].mae_id.id, rte_strerror(rc));
540 goto fail_fill_in_id;
546 (void)sfc_mae_counter_disable(sa, &counters[0]);
549 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
554 sfc_mae_counters_disable(struct sfc_adapter *sa,
555 struct sfc_mae_counter_id *counters,
556 unsigned int n_counters)
561 SFC_ASSERT(sfc_adapter_is_locked(sa));
562 SFC_ASSERT(n_counters == 1);
564 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
565 sfc_err(sa, "failed to disable: already disabled");
569 return sfc_mae_counter_disable(sa, &counters[0]);
572 static struct sfc_mae_action_set *
573 sfc_mae_action_set_attach(struct sfc_adapter *sa,
574 const struct sfc_mae_encap_header *encap_header,
575 unsigned int n_count,
576 const efx_mae_actions_t *spec)
578 struct sfc_mae_action_set *action_set;
579 struct sfc_mae *mae = &sa->mae;
581 SFC_ASSERT(sfc_adapter_is_locked(sa));
583 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
585 * Shared counters are not supported, hence action sets with
586 * COUNT are not attachable.
588 if (action_set->encap_header == encap_header &&
590 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
591 sfc_dbg(sa, "attaching to action_set=%p", action_set);
592 ++(action_set->refcnt);
601 sfc_mae_action_set_add(struct sfc_adapter *sa,
602 const struct rte_flow_action actions[],
603 efx_mae_actions_t *spec,
604 struct sfc_mae_encap_header *encap_header,
605 unsigned int n_counters,
606 struct sfc_mae_action_set **action_setp)
608 struct sfc_mae_action_set *action_set;
609 struct sfc_mae *mae = &sa->mae;
612 SFC_ASSERT(sfc_adapter_is_locked(sa));
614 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
615 if (action_set == NULL) {
616 sfc_err(sa, "failed to alloc action set");
620 if (n_counters > 0) {
621 const struct rte_flow_action *action;
623 action_set->counters = rte_malloc("sfc_mae_counter_ids",
624 sizeof(action_set->counters[0]) * n_counters, 0);
625 if (action_set->counters == NULL) {
626 rte_free(action_set);
627 sfc_err(sa, "failed to alloc counters");
631 for (action = actions, i = 0;
632 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
634 const struct rte_flow_action_count *conf;
636 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
641 action_set->counters[i].mae_id.id =
642 EFX_MAE_RSRC_ID_INVALID;
643 action_set->counters[i].rte_id = conf->id;
646 action_set->n_counters = n_counters;
649 action_set->refcnt = 1;
650 action_set->spec = spec;
651 action_set->encap_header = encap_header;
653 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
655 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
657 *action_setp = action_set;
659 sfc_dbg(sa, "added action_set=%p", action_set);
665 sfc_mae_action_set_del(struct sfc_adapter *sa,
666 struct sfc_mae_action_set *action_set)
668 struct sfc_mae *mae = &sa->mae;
670 SFC_ASSERT(sfc_adapter_is_locked(sa));
671 SFC_ASSERT(action_set->refcnt != 0);
673 --(action_set->refcnt);
675 if (action_set->refcnt != 0)
678 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
679 action_set->fw_rsrc.refcnt != 0) {
680 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
681 action_set, action_set->fw_rsrc.aset_id.id,
682 action_set->fw_rsrc.refcnt);
685 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
686 sfc_mae_encap_header_del(sa, action_set->encap_header);
687 if (action_set->n_counters > 0) {
688 SFC_ASSERT(action_set->n_counters == 1);
689 SFC_ASSERT(action_set->counters[0].mae_id.id ==
690 EFX_MAE_RSRC_ID_INVALID);
691 rte_free(action_set->counters);
693 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
694 rte_free(action_set);
696 sfc_dbg(sa, "deleted action_set=%p", action_set);
700 sfc_mae_action_set_enable(struct sfc_adapter *sa,
701 struct sfc_mae_action_set *action_set)
703 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
704 struct sfc_mae_counter_id *counters = action_set->counters;
705 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
708 SFC_ASSERT(sfc_adapter_is_locked(sa));
710 if (fw_rsrc->refcnt == 0) {
711 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
712 SFC_ASSERT(action_set->spec != NULL);
714 rc = sfc_mae_encap_header_enable(sa, encap_header,
719 rc = sfc_mae_counters_enable(sa, counters,
720 action_set->n_counters,
723 sfc_err(sa, "failed to enable %u MAE counters: %s",
724 action_set->n_counters, rte_strerror(rc));
726 sfc_mae_encap_header_disable(sa, encap_header);
730 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
733 sfc_err(sa, "failed to enable action_set=%p: %s",
734 action_set, strerror(rc));
736 (void)sfc_mae_counters_disable(sa, counters,
737 action_set->n_counters);
738 sfc_mae_encap_header_disable(sa, encap_header);
742 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
743 action_set, fw_rsrc->aset_id.id);
752 sfc_mae_action_set_disable(struct sfc_adapter *sa,
753 struct sfc_mae_action_set *action_set)
755 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
758 SFC_ASSERT(sfc_adapter_is_locked(sa));
760 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
761 fw_rsrc->refcnt == 0) {
762 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
763 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
767 if (fw_rsrc->refcnt == 1) {
768 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
770 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
771 action_set, fw_rsrc->aset_id.id);
773 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
774 action_set, fw_rsrc->aset_id.id, strerror(rc));
776 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
778 rc = sfc_mae_counters_disable(sa, action_set->counters,
779 action_set->n_counters);
781 sfc_err(sa, "failed to disable %u MAE counters: %s",
782 action_set->n_counters, rte_strerror(rc));
785 sfc_mae_encap_header_disable(sa, action_set->encap_header);
792 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
793 struct rte_flow *flow)
795 struct sfc_flow_spec *spec;
796 struct sfc_flow_spec_mae *spec_mae;
806 spec_mae = &spec->mae;
808 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
810 if (spec_mae->outer_rule != NULL)
811 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
813 if (spec_mae->action_set != NULL)
814 sfc_mae_action_set_del(sa, spec_mae->action_set);
816 if (spec_mae->match_spec != NULL)
817 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
821 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
823 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
824 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
825 const efx_mae_field_id_t field_ids[] = {
826 EFX_MAE_FIELD_VLAN0_PROTO_BE,
827 EFX_MAE_FIELD_VLAN1_PROTO_BE,
829 const struct sfc_mae_ethertype *et;
834 * In accordance with RTE flow API convention, the innermost L2
835 * item's "type" ("inner_type") is a L3 EtherType. If there is
836 * no L3 item, it's 0x0000/0x0000.
838 et = &pdata->ethertypes[pdata->nb_vlan_tags];
839 rc = efx_mae_match_spec_field_set(ctx->match_spec,
840 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
842 (const uint8_t *)&et->value,
844 (const uint8_t *)&et->mask);
849 * sfc_mae_rule_parse_item_vlan() has already made sure
850 * that pdata->nb_vlan_tags does not exceed this figure.
852 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
854 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
855 et = &pdata->ethertypes[i];
857 rc = efx_mae_match_spec_field_set(ctx->match_spec,
858 fremap[field_ids[i]],
860 (const uint8_t *)&et->value,
862 (const uint8_t *)&et->mask);
871 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
872 struct rte_flow_error *error)
874 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
875 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
876 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
877 const rte_be16_t supported_tpids[] = {
878 /* VLAN standard TPID (always the first element) */
879 RTE_BE16(RTE_ETHER_TYPE_VLAN),
881 /* Double-tagging TPIDs */
882 RTE_BE16(RTE_ETHER_TYPE_QINQ),
883 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
884 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
885 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
887 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
888 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
889 unsigned int ethertype_idx;
890 const uint8_t *valuep;
891 const uint8_t *maskp;
894 if (pdata->innermost_ethertype_restriction.mask != 0 &&
895 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
897 * If a single item VLAN is followed by a L3 item, value
898 * of "type" in item ETH can't be a double-tagging TPID.
900 nb_supported_tpids = 1;
904 * sfc_mae_rule_parse_item_vlan() has already made sure
905 * that pdata->nb_vlan_tags does not exceed this figure.
907 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
909 for (ethertype_idx = 0;
910 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
911 unsigned int tpid_idx;
914 * This loop can have only two iterations. On the second one,
915 * drop outer tag presence enforcement bit because the inner
916 * tag presence automatically assumes that for the outer tag.
918 enforce_tag_presence[0] = B_FALSE;
920 if (ethertypes[ethertype_idx].mask == RTE_BE16(0)) {
921 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
922 enforce_tag_presence[ethertype_idx] = B_TRUE;
924 /* No match on this field, and no value check. */
925 nb_supported_tpids = 1;
929 /* Exact match is supported only. */
930 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
935 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
936 tpid_idx < nb_supported_tpids; ++tpid_idx) {
937 if (ethertypes[ethertype_idx].value ==
938 supported_tpids[tpid_idx])
942 if (tpid_idx == nb_supported_tpids) {
947 nb_supported_tpids = 1;
950 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
951 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
954 et->mask = RTE_BE16(0xffff);
956 pdata->innermost_ethertype_restriction.value;
957 } else if (et->mask != RTE_BE16(0xffff) ||
959 pdata->innermost_ethertype_restriction.value) {
966 * Now, when the number of VLAN tags is known, set fields
967 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
968 * one is either a valid L3 EtherType (or 0x0000/0x0000),
969 * and the last two are valid TPIDs (or 0x0000/0x0000).
971 rc = sfc_mae_set_ethertypes(ctx);
975 if (pdata->l3_next_proto_restriction_mask == 0xff) {
976 if (pdata->l3_next_proto_mask == 0) {
977 pdata->l3_next_proto_mask = 0xff;
978 pdata->l3_next_proto_value =
979 pdata->l3_next_proto_restriction_value;
980 } else if (pdata->l3_next_proto_mask != 0xff ||
981 pdata->l3_next_proto_value !=
982 pdata->l3_next_proto_restriction_value) {
988 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
989 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
990 fremap[EFX_MAE_FIELD_HAS_OVLAN],
991 enforce_tag_presence[0] ||
992 pdata->has_ovlan_value);
997 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
998 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
999 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1000 enforce_tag_presence[1] ||
1001 pdata->has_ivlan_value);
1006 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1007 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1008 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1009 fremap[EFX_MAE_FIELD_IP_PROTO],
1010 sizeof(pdata->l3_next_proto_value),
1012 sizeof(pdata->l3_next_proto_mask),
1020 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1021 "Failed to process pattern data");
1025 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1026 struct sfc_flow_parse_ctx *ctx,
1027 struct rte_flow_error *error)
1029 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1030 const struct rte_flow_item_port_id supp_mask = {
1033 const void *def_mask = &rte_flow_item_port_id_mask;
1034 const struct rte_flow_item_port_id *spec = NULL;
1035 const struct rte_flow_item_port_id *mask = NULL;
1036 efx_mport_sel_t mport_sel;
1039 if (ctx_mae->match_mport_set) {
1040 return rte_flow_error_set(error, ENOTSUP,
1041 RTE_FLOW_ERROR_TYPE_ITEM, item,
1042 "Can't handle multiple traffic source items");
1045 rc = sfc_flow_parse_init(item,
1046 (const void **)&spec, (const void **)&mask,
1047 (const void *)&supp_mask, def_mask,
1048 sizeof(struct rte_flow_item_port_id), error);
1052 if (mask->id != supp_mask.id) {
1053 return rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ITEM, item,
1055 "Bad mask in the PORT_ID pattern item");
1058 /* If "spec" is not set, could be any port ID */
1062 if (spec->id > UINT16_MAX) {
1063 return rte_flow_error_set(error, EOVERFLOW,
1064 RTE_FLOW_ERROR_TYPE_ITEM, item,
1065 "The port ID is too large");
1068 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1069 spec->id, &mport_sel);
1071 return rte_flow_error_set(error, rc,
1072 RTE_FLOW_ERROR_TYPE_ITEM, item,
1073 "Can't find RTE ethdev by the port ID");
1076 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1079 return rte_flow_error_set(error, rc,
1080 RTE_FLOW_ERROR_TYPE_ITEM, item,
1081 "Failed to set MPORT for the port ID");
1084 ctx_mae->match_mport_set = B_TRUE;
1090 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1091 struct sfc_flow_parse_ctx *ctx,
1092 struct rte_flow_error *error)
1094 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1095 const struct rte_flow_item_phy_port supp_mask = {
1096 .index = 0xffffffff,
1098 const void *def_mask = &rte_flow_item_phy_port_mask;
1099 const struct rte_flow_item_phy_port *spec = NULL;
1100 const struct rte_flow_item_phy_port *mask = NULL;
1101 efx_mport_sel_t mport_v;
1104 if (ctx_mae->match_mport_set) {
1105 return rte_flow_error_set(error, ENOTSUP,
1106 RTE_FLOW_ERROR_TYPE_ITEM, item,
1107 "Can't handle multiple traffic source items");
1110 rc = sfc_flow_parse_init(item,
1111 (const void **)&spec, (const void **)&mask,
1112 (const void *)&supp_mask, def_mask,
1113 sizeof(struct rte_flow_item_phy_port), error);
1117 if (mask->index != supp_mask.index) {
1118 return rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ITEM, item,
1120 "Bad mask in the PHY_PORT pattern item");
1123 /* If "spec" is not set, could be any physical port */
1127 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1129 return rte_flow_error_set(error, rc,
1130 RTE_FLOW_ERROR_TYPE_ITEM, item,
1131 "Failed to convert the PHY_PORT index");
1134 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1136 return rte_flow_error_set(error, rc,
1137 RTE_FLOW_ERROR_TYPE_ITEM, item,
1138 "Failed to set MPORT for the PHY_PORT");
1141 ctx_mae->match_mport_set = B_TRUE;
1147 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1148 struct sfc_flow_parse_ctx *ctx,
1149 struct rte_flow_error *error)
1151 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1152 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1153 efx_mport_sel_t mport_v;
1156 if (ctx_mae->match_mport_set) {
1157 return rte_flow_error_set(error, ENOTSUP,
1158 RTE_FLOW_ERROR_TYPE_ITEM, item,
1159 "Can't handle multiple traffic source items");
1162 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1165 return rte_flow_error_set(error, rc,
1166 RTE_FLOW_ERROR_TYPE_ITEM, item,
1167 "Failed to convert the PF ID");
1170 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1172 return rte_flow_error_set(error, rc,
1173 RTE_FLOW_ERROR_TYPE_ITEM, item,
1174 "Failed to set MPORT for the PF");
1177 ctx_mae->match_mport_set = B_TRUE;
1183 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1184 struct sfc_flow_parse_ctx *ctx,
1185 struct rte_flow_error *error)
1187 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1188 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1189 const struct rte_flow_item_vf supp_mask = {
1192 const void *def_mask = &rte_flow_item_vf_mask;
1193 const struct rte_flow_item_vf *spec = NULL;
1194 const struct rte_flow_item_vf *mask = NULL;
1195 efx_mport_sel_t mport_v;
1198 if (ctx_mae->match_mport_set) {
1199 return rte_flow_error_set(error, ENOTSUP,
1200 RTE_FLOW_ERROR_TYPE_ITEM, item,
1201 "Can't handle multiple traffic source items");
1204 rc = sfc_flow_parse_init(item,
1205 (const void **)&spec, (const void **)&mask,
1206 (const void *)&supp_mask, def_mask,
1207 sizeof(struct rte_flow_item_vf), error);
1211 if (mask->id != supp_mask.id) {
1212 return rte_flow_error_set(error, EINVAL,
1213 RTE_FLOW_ERROR_TYPE_ITEM, item,
1214 "Bad mask in the VF pattern item");
1218 * If "spec" is not set, the item requests any VF related to the
1219 * PF of the current DPDK port (but not the PF itself).
1220 * Reject this match criterion as unsupported.
1223 return rte_flow_error_set(error, EINVAL,
1224 RTE_FLOW_ERROR_TYPE_ITEM, item,
1225 "Bad spec in the VF pattern item");
1228 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1230 return rte_flow_error_set(error, rc,
1231 RTE_FLOW_ERROR_TYPE_ITEM, item,
1232 "Failed to convert the PF + VF IDs");
1235 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1237 return rte_flow_error_set(error, rc,
1238 RTE_FLOW_ERROR_TYPE_ITEM, item,
1239 "Failed to set MPORT for the PF + VF");
1242 ctx_mae->match_mport_set = B_TRUE;
1248 * Having this field ID in a field locator means that this
1249 * locator cannot be used to actually set the field at the
1250 * time when the corresponding item gets encountered. Such
1251 * fields get stashed in the parsing context instead. This
1252 * is required to resolve dependencies between the stashed
1253 * fields. See sfc_mae_rule_process_pattern_data().
1255 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1257 struct sfc_mae_field_locator {
1258 efx_mae_field_id_t field_id;
1260 /* Field offset in the corresponding rte_flow_item_ struct */
1265 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1266 unsigned int nb_field_locators, void *mask_ptr,
1271 memset(mask_ptr, 0, mask_size);
1273 for (i = 0; i < nb_field_locators; ++i) {
1274 const struct sfc_mae_field_locator *fl = &field_locators[i];
1276 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1277 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1282 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1283 unsigned int nb_field_locators, const uint8_t *spec,
1284 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1285 struct rte_flow_error *error)
1287 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1291 for (i = 0; i < nb_field_locators; ++i) {
1292 const struct sfc_mae_field_locator *fl = &field_locators[i];
1294 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1297 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1298 fremap[fl->field_id],
1299 fl->size, spec + fl->ofst,
1300 fl->size, mask + fl->ofst);
1306 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1307 NULL, "Failed to process item fields");
1313 static const struct sfc_mae_field_locator flocs_eth[] = {
1316 * This locator is used only for building supported fields mask.
1317 * The field is handled by sfc_mae_rule_process_pattern_data().
1319 SFC_MAE_FIELD_HANDLING_DEFERRED,
1320 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1321 offsetof(struct rte_flow_item_eth, type),
1324 EFX_MAE_FIELD_ETH_DADDR_BE,
1325 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1326 offsetof(struct rte_flow_item_eth, dst),
1329 EFX_MAE_FIELD_ETH_SADDR_BE,
1330 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1331 offsetof(struct rte_flow_item_eth, src),
1336 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1337 struct sfc_flow_parse_ctx *ctx,
1338 struct rte_flow_error *error)
1340 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1341 struct rte_flow_item_eth supp_mask;
1342 const uint8_t *spec = NULL;
1343 const uint8_t *mask = NULL;
1346 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1347 &supp_mask, sizeof(supp_mask));
1348 supp_mask.has_vlan = 1;
1350 rc = sfc_flow_parse_init(item,
1351 (const void **)&spec, (const void **)&mask,
1352 (const void *)&supp_mask,
1353 &rte_flow_item_eth_mask,
1354 sizeof(struct rte_flow_item_eth), error);
1359 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1360 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1361 const struct rte_flow_item_eth *item_spec;
1362 const struct rte_flow_item_eth *item_mask;
1364 item_spec = (const struct rte_flow_item_eth *)spec;
1365 item_mask = (const struct rte_flow_item_eth *)mask;
1368 * Remember various match criteria in the parsing context.
1369 * sfc_mae_rule_process_pattern_data() will consider them
1370 * altogether when the rest of the items have been parsed.
1372 ethertypes[0].value = item_spec->type;
1373 ethertypes[0].mask = item_mask->type;
1374 if (item_mask->has_vlan) {
1375 pdata->has_ovlan_mask = B_TRUE;
1376 if (item_spec->has_vlan)
1377 pdata->has_ovlan_value = B_TRUE;
1381 * The specification is empty. The overall pattern
1382 * validity will be enforced at the end of parsing.
1383 * See sfc_mae_rule_process_pattern_data().
1388 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1392 static const struct sfc_mae_field_locator flocs_vlan[] = {
1395 EFX_MAE_FIELD_VLAN0_TCI_BE,
1396 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1397 offsetof(struct rte_flow_item_vlan, tci),
1401 * This locator is used only for building supported fields mask.
1402 * The field is handled by sfc_mae_rule_process_pattern_data().
1404 SFC_MAE_FIELD_HANDLING_DEFERRED,
1405 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1406 offsetof(struct rte_flow_item_vlan, inner_type),
1411 EFX_MAE_FIELD_VLAN1_TCI_BE,
1412 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1413 offsetof(struct rte_flow_item_vlan, tci),
1417 * This locator is used only for building supported fields mask.
1418 * The field is handled by sfc_mae_rule_process_pattern_data().
1420 SFC_MAE_FIELD_HANDLING_DEFERRED,
1421 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1422 offsetof(struct rte_flow_item_vlan, inner_type),
1427 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1428 struct sfc_flow_parse_ctx *ctx,
1429 struct rte_flow_error *error)
1431 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1432 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1433 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1434 &pdata->has_ovlan_mask,
1435 &pdata->has_ivlan_mask,
1437 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1438 &pdata->has_ovlan_value,
1439 &pdata->has_ivlan_value,
1441 boolean_t *cur_tag_presence_bit_mp;
1442 boolean_t *cur_tag_presence_bit_vp;
1443 const struct sfc_mae_field_locator *flocs;
1444 struct rte_flow_item_vlan supp_mask;
1445 const uint8_t *spec = NULL;
1446 const uint8_t *mask = NULL;
1447 unsigned int nb_flocs;
1450 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1452 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1453 return rte_flow_error_set(error, ENOTSUP,
1454 RTE_FLOW_ERROR_TYPE_ITEM, item,
1455 "Can't match that many VLAN tags");
1458 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1459 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1461 if (*cur_tag_presence_bit_mp == B_TRUE &&
1462 *cur_tag_presence_bit_vp == B_FALSE) {
1463 return rte_flow_error_set(error, EINVAL,
1464 RTE_FLOW_ERROR_TYPE_ITEM, item,
1465 "The previous item enforces no (more) VLAN, "
1466 "so the current item (VLAN) must not exist");
1469 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1470 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1472 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1473 &supp_mask, sizeof(supp_mask));
1475 * This only means that the field is supported by the driver and libefx.
1476 * Support on NIC level will be checked when all items have been parsed.
1478 supp_mask.has_more_vlan = 1;
1480 rc = sfc_flow_parse_init(item,
1481 (const void **)&spec, (const void **)&mask,
1482 (const void *)&supp_mask,
1483 &rte_flow_item_vlan_mask,
1484 sizeof(struct rte_flow_item_vlan), error);
1489 struct sfc_mae_ethertype *et = pdata->ethertypes;
1490 const struct rte_flow_item_vlan *item_spec;
1491 const struct rte_flow_item_vlan *item_mask;
1493 item_spec = (const struct rte_flow_item_vlan *)spec;
1494 item_mask = (const struct rte_flow_item_vlan *)mask;
1497 * Remember various match criteria in the parsing context.
1498 * sfc_mae_rule_process_pattern_data() will consider them
1499 * altogether when the rest of the items have been parsed.
1501 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1502 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1503 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1504 if (item_mask->has_more_vlan) {
1505 if (pdata->nb_vlan_tags ==
1506 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1507 return rte_flow_error_set(error, ENOTSUP,
1508 RTE_FLOW_ERROR_TYPE_ITEM, item,
1509 "Can't use 'has_more_vlan' in "
1510 "the second item VLAN");
1512 pdata->has_ivlan_mask = B_TRUE;
1513 if (item_spec->has_more_vlan)
1514 pdata->has_ivlan_value = B_TRUE;
1517 /* Convert TCI to MAE representation right now. */
1518 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1524 ++(pdata->nb_vlan_tags);
1529 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1531 EFX_MAE_FIELD_SRC_IP4_BE,
1532 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1533 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1536 EFX_MAE_FIELD_DST_IP4_BE,
1537 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1538 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1542 * This locator is used only for building supported fields mask.
1543 * The field is handled by sfc_mae_rule_process_pattern_data().
1545 SFC_MAE_FIELD_HANDLING_DEFERRED,
1546 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1547 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1550 EFX_MAE_FIELD_IP_TOS,
1551 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1552 hdr.type_of_service),
1553 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1556 EFX_MAE_FIELD_IP_TTL,
1557 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1558 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1563 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1564 struct sfc_flow_parse_ctx *ctx,
1565 struct rte_flow_error *error)
1567 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1568 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1569 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1570 struct rte_flow_item_ipv4 supp_mask;
1571 const uint8_t *spec = NULL;
1572 const uint8_t *mask = NULL;
1575 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1576 &supp_mask, sizeof(supp_mask));
1578 rc = sfc_flow_parse_init(item,
1579 (const void **)&spec, (const void **)&mask,
1580 (const void *)&supp_mask,
1581 &rte_flow_item_ipv4_mask,
1582 sizeof(struct rte_flow_item_ipv4), error);
1586 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1587 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1590 const struct rte_flow_item_ipv4 *item_spec;
1591 const struct rte_flow_item_ipv4 *item_mask;
1593 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1594 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1596 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1597 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1602 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1606 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1608 EFX_MAE_FIELD_SRC_IP6_BE,
1609 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1610 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1613 EFX_MAE_FIELD_DST_IP6_BE,
1614 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1615 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1619 * This locator is used only for building supported fields mask.
1620 * The field is handled by sfc_mae_rule_process_pattern_data().
1622 SFC_MAE_FIELD_HANDLING_DEFERRED,
1623 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1624 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1627 EFX_MAE_FIELD_IP_TTL,
1628 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1629 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1634 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1635 struct sfc_flow_parse_ctx *ctx,
1636 struct rte_flow_error *error)
1638 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1639 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1640 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1641 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1642 struct rte_flow_item_ipv6 supp_mask;
1643 const uint8_t *spec = NULL;
1644 const uint8_t *mask = NULL;
1645 rte_be32_t vtc_flow_be;
1651 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1652 &supp_mask, sizeof(supp_mask));
1654 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1655 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1657 rc = sfc_flow_parse_init(item,
1658 (const void **)&spec, (const void **)&mask,
1659 (const void *)&supp_mask,
1660 &rte_flow_item_ipv6_mask,
1661 sizeof(struct rte_flow_item_ipv6), error);
1665 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1666 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1669 const struct rte_flow_item_ipv6 *item_spec;
1670 const struct rte_flow_item_ipv6 *item_mask;
1672 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1673 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1675 pdata->l3_next_proto_value = item_spec->hdr.proto;
1676 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1681 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1686 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1687 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1688 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1690 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1691 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1692 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1694 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1695 fremap[EFX_MAE_FIELD_IP_TOS],
1696 sizeof(tc_value), &tc_value,
1697 sizeof(tc_mask), &tc_mask);
1699 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1700 NULL, "Failed to process item fields");
1706 static const struct sfc_mae_field_locator flocs_tcp[] = {
1708 EFX_MAE_FIELD_L4_SPORT_BE,
1709 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1710 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1713 EFX_MAE_FIELD_L4_DPORT_BE,
1714 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1715 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1718 EFX_MAE_FIELD_TCP_FLAGS_BE,
1720 * The values have been picked intentionally since the
1721 * target MAE field is oversize (16 bit). This mapping
1722 * relies on the fact that the MAE field is big-endian.
1724 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1725 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1726 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1731 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1732 struct sfc_flow_parse_ctx *ctx,
1733 struct rte_flow_error *error)
1735 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1736 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1737 struct rte_flow_item_tcp supp_mask;
1738 const uint8_t *spec = NULL;
1739 const uint8_t *mask = NULL;
1743 * When encountered among outermost items, item TCP is invalid.
1744 * Check which match specification is being constructed now.
1746 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1747 return rte_flow_error_set(error, EINVAL,
1748 RTE_FLOW_ERROR_TYPE_ITEM, item,
1749 "TCP in outer frame is invalid");
1752 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1753 &supp_mask, sizeof(supp_mask));
1755 rc = sfc_flow_parse_init(item,
1756 (const void **)&spec, (const void **)&mask,
1757 (const void *)&supp_mask,
1758 &rte_flow_item_tcp_mask,
1759 sizeof(struct rte_flow_item_tcp), error);
1763 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1764 pdata->l3_next_proto_restriction_mask = 0xff;
1769 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1773 static const struct sfc_mae_field_locator flocs_udp[] = {
1775 EFX_MAE_FIELD_L4_SPORT_BE,
1776 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1777 offsetof(struct rte_flow_item_udp, hdr.src_port),
1780 EFX_MAE_FIELD_L4_DPORT_BE,
1781 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1782 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1787 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1788 struct sfc_flow_parse_ctx *ctx,
1789 struct rte_flow_error *error)
1791 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1792 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1793 struct rte_flow_item_udp supp_mask;
1794 const uint8_t *spec = NULL;
1795 const uint8_t *mask = NULL;
1798 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1799 &supp_mask, sizeof(supp_mask));
1801 rc = sfc_flow_parse_init(item,
1802 (const void **)&spec, (const void **)&mask,
1803 (const void *)&supp_mask,
1804 &rte_flow_item_udp_mask,
1805 sizeof(struct rte_flow_item_udp), error);
1809 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1810 pdata->l3_next_proto_restriction_mask = 0xff;
1815 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1819 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1822 * The size and offset values are relevant
1823 * for Geneve and NVGRE, too.
1825 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1826 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1831 * An auxiliary registry which allows using non-encap. field IDs
1832 * directly when building a match specification of type ACTION.
1834 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1836 static const efx_mae_field_id_t field_ids_no_remap[] = {
1837 #define FIELD_ID_NO_REMAP(_field) \
1838 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1840 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1841 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1842 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1843 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1844 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1845 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1846 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1847 FIELD_ID_NO_REMAP(SRC_IP4_BE),
1848 FIELD_ID_NO_REMAP(DST_IP4_BE),
1849 FIELD_ID_NO_REMAP(IP_PROTO),
1850 FIELD_ID_NO_REMAP(IP_TOS),
1851 FIELD_ID_NO_REMAP(IP_TTL),
1852 FIELD_ID_NO_REMAP(SRC_IP6_BE),
1853 FIELD_ID_NO_REMAP(DST_IP6_BE),
1854 FIELD_ID_NO_REMAP(L4_SPORT_BE),
1855 FIELD_ID_NO_REMAP(L4_DPORT_BE),
1856 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1857 FIELD_ID_NO_REMAP(HAS_OVLAN),
1858 FIELD_ID_NO_REMAP(HAS_IVLAN),
1860 #undef FIELD_ID_NO_REMAP
1864 * An auxiliary registry which allows using "ENC" field IDs
1865 * when building a match specification of type OUTER.
1867 * See sfc_mae_rule_encap_parse_init().
1869 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1870 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1871 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1873 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1874 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1875 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1876 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1877 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1878 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1879 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1880 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1881 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1882 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1883 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1884 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1885 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1886 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1887 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1888 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1889 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
1890 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
1892 #undef FIELD_ID_REMAP_TO_ENCAP
1896 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1897 struct sfc_flow_parse_ctx *ctx,
1898 struct rte_flow_error *error)
1900 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1901 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1902 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1903 const struct rte_flow_item_vxlan *vxp;
1904 uint8_t supp_mask[sizeof(uint64_t)];
1905 const uint8_t *spec = NULL;
1906 const uint8_t *mask = NULL;
1910 * We're about to start processing inner frame items.
1911 * Process pattern data that has been deferred so far
1912 * and reset pattern data storage.
1914 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1918 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1920 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1921 &supp_mask, sizeof(supp_mask));
1924 * This tunnel item was preliminarily detected by
1925 * sfc_mae_rule_encap_parse_init(). Default mask
1926 * was also picked by that helper. Use it here.
1928 rc = sfc_flow_parse_init(item,
1929 (const void **)&spec, (const void **)&mask,
1930 (const void *)&supp_mask,
1931 ctx_mae->tunnel_def_mask,
1932 ctx_mae->tunnel_def_mask_size, error);
1937 * This item and later ones comprise a
1938 * match specification of type ACTION.
1940 ctx_mae->match_spec = ctx_mae->match_spec_action;
1942 /* This item and later ones use non-encap. EFX MAE field IDs. */
1943 ctx_mae->field_ids_remap = field_ids_no_remap;
1949 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1950 * Copy 24-bit VNI, which is BE, at offset 1 in it.
1951 * The extra byte is 0 both in the mask and in the value.
1953 vxp = (const struct rte_flow_item_vxlan *)spec;
1954 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1956 vxp = (const struct rte_flow_item_vxlan *)mask;
1957 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1959 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1960 EFX_MAE_FIELD_ENC_VNET_ID_BE,
1961 sizeof(vnet_id_v), vnet_id_v,
1962 sizeof(vnet_id_m), vnet_id_m);
1964 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1965 item, "Failed to set VXLAN VNI");
1971 static const struct sfc_flow_item sfc_flow_items[] = {
1973 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1975 * In terms of RTE flow, this item is a META one,
1976 * and its position in the pattern is don't care.
1978 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1979 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1980 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1981 .parse = sfc_mae_rule_parse_item_port_id,
1984 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1986 * In terms of RTE flow, this item is a META one,
1987 * and its position in the pattern is don't care.
1989 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1990 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1991 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1992 .parse = sfc_mae_rule_parse_item_phy_port,
1995 .type = RTE_FLOW_ITEM_TYPE_PF,
1997 * In terms of RTE flow, this item is a META one,
1998 * and its position in the pattern is don't care.
2000 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2001 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2002 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2003 .parse = sfc_mae_rule_parse_item_pf,
2006 .type = RTE_FLOW_ITEM_TYPE_VF,
2008 * In terms of RTE flow, this item is a META one,
2009 * and its position in the pattern is don't care.
2011 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2012 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2013 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2014 .parse = sfc_mae_rule_parse_item_vf,
2017 .type = RTE_FLOW_ITEM_TYPE_ETH,
2018 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2019 .layer = SFC_FLOW_ITEM_L2,
2020 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2021 .parse = sfc_mae_rule_parse_item_eth,
2024 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2025 .prev_layer = SFC_FLOW_ITEM_L2,
2026 .layer = SFC_FLOW_ITEM_L2,
2027 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2028 .parse = sfc_mae_rule_parse_item_vlan,
2031 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2032 .prev_layer = SFC_FLOW_ITEM_L2,
2033 .layer = SFC_FLOW_ITEM_L3,
2034 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2035 .parse = sfc_mae_rule_parse_item_ipv4,
2038 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2039 .prev_layer = SFC_FLOW_ITEM_L2,
2040 .layer = SFC_FLOW_ITEM_L3,
2041 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2042 .parse = sfc_mae_rule_parse_item_ipv6,
2045 .type = RTE_FLOW_ITEM_TYPE_TCP,
2046 .prev_layer = SFC_FLOW_ITEM_L3,
2047 .layer = SFC_FLOW_ITEM_L4,
2048 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2049 .parse = sfc_mae_rule_parse_item_tcp,
2052 .type = RTE_FLOW_ITEM_TYPE_UDP,
2053 .prev_layer = SFC_FLOW_ITEM_L3,
2054 .layer = SFC_FLOW_ITEM_L4,
2055 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2056 .parse = sfc_mae_rule_parse_item_udp,
2059 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2060 .prev_layer = SFC_FLOW_ITEM_L4,
2061 .layer = SFC_FLOW_ITEM_START_LAYER,
2062 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2063 .parse = sfc_mae_rule_parse_item_tunnel,
2066 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2067 .prev_layer = SFC_FLOW_ITEM_L4,
2068 .layer = SFC_FLOW_ITEM_START_LAYER,
2069 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2070 .parse = sfc_mae_rule_parse_item_tunnel,
2073 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2074 .prev_layer = SFC_FLOW_ITEM_L3,
2075 .layer = SFC_FLOW_ITEM_START_LAYER,
2076 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2077 .parse = sfc_mae_rule_parse_item_tunnel,
2082 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2083 struct sfc_mae_parse_ctx *ctx,
2084 struct sfc_mae_outer_rule **rulep,
2085 struct rte_flow_error *error)
2087 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2090 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2095 SFC_ASSERT(ctx->match_spec_outer != NULL);
2097 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2098 return rte_flow_error_set(error, ENOTSUP,
2099 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2100 "Inconsistent pattern (outer)");
2103 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2105 if (*rulep != NULL) {
2106 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2108 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2109 ctx->encap_type, rulep);
2111 return rte_flow_error_set(error, rc,
2112 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2113 "Failed to process the pattern");
2117 /* The spec has now been tracked by the outer rule entry. */
2118 ctx->match_spec_outer = NULL;
2122 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2123 * inner parse (when some outer rule is hit) and action rule lookup.
2124 * If the currently processed flow does not come with an outer rule,
2125 * its action rule must be available only for packets which miss in
2126 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2127 * in the action rule specification; this ensures correct behaviour.
2129 * If, on the other hand, this flow does have an outer rule, its ID
2130 * may be unknown at the moment (not yet allocated), but OR_ID mask
2131 * has to be set to 0xffffffff anyway for correct class comparisons.
2132 * When the outer rule has been allocated, this match field will be
2133 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2135 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2139 sfc_mae_outer_rule_del(sa, *rulep);
2143 return rte_flow_error_set(error, rc,
2144 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2145 "Failed to process the pattern");
2152 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2153 const struct rte_flow_item pattern[],
2154 struct sfc_mae_parse_ctx *ctx,
2155 struct rte_flow_error *error)
2157 struct sfc_mae *mae = &sa->mae;
2160 if (pattern == NULL) {
2161 rte_flow_error_set(error, EINVAL,
2162 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2168 switch (pattern->type) {
2169 case RTE_FLOW_ITEM_TYPE_VXLAN:
2170 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2171 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2172 ctx->tunnel_def_mask_size =
2173 sizeof(rte_flow_item_vxlan_mask);
2175 case RTE_FLOW_ITEM_TYPE_GENEVE:
2176 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2177 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2178 ctx->tunnel_def_mask_size =
2179 sizeof(rte_flow_item_geneve_mask);
2181 case RTE_FLOW_ITEM_TYPE_NVGRE:
2182 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2183 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2184 ctx->tunnel_def_mask_size =
2185 sizeof(rte_flow_item_nvgre_mask);
2187 case RTE_FLOW_ITEM_TYPE_END:
2197 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2200 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2201 return rte_flow_error_set(error, ENOTSUP,
2202 RTE_FLOW_ERROR_TYPE_ITEM,
2203 pattern, "Unsupported tunnel item");
2206 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2207 return rte_flow_error_set(error, ENOTSUP,
2208 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2209 NULL, "Unsupported priority level");
2212 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
2213 &ctx->match_spec_outer);
2215 return rte_flow_error_set(error, rc,
2216 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
2217 "Failed to initialise outer rule match specification");
2220 /* Outermost items comprise a match specification of type OUTER. */
2221 ctx->match_spec = ctx->match_spec_outer;
2223 /* Outermost items use "ENC" EFX MAE field IDs. */
2224 ctx->field_ids_remap = field_ids_remap_to_encap;
2230 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2231 struct sfc_mae_parse_ctx *ctx)
2233 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2236 if (ctx->match_spec_outer != NULL)
2237 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2241 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2242 const struct rte_flow_item pattern[],
2243 struct sfc_flow_spec_mae *spec,
2244 struct rte_flow_error *error)
2246 struct sfc_mae_parse_ctx ctx_mae;
2247 struct sfc_flow_parse_ctx ctx;
2250 memset(&ctx_mae, 0, sizeof(ctx_mae));
2251 ctx_mae.priority = spec->priority;
2254 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2256 &ctx_mae.match_spec_action);
2258 rc = rte_flow_error_set(error, rc,
2259 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2260 "Failed to initialise action rule match specification");
2261 goto fail_init_match_spec_action;
2265 * As a preliminary setting, assume that there is no encapsulation
2266 * in the pattern. That is, pattern items are about to comprise a
2267 * match specification of type ACTION and use non-encap. field IDs.
2269 * sfc_mae_rule_encap_parse_init() below may override this.
2271 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2272 ctx_mae.match_spec = ctx_mae.match_spec_action;
2273 ctx_mae.field_ids_remap = field_ids_no_remap;
2275 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2278 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2280 goto fail_encap_parse_init;
2282 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2283 pattern, &ctx, error);
2285 goto fail_parse_pattern;
2287 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2289 goto fail_process_pattern_data;
2291 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2293 goto fail_process_outer;
2295 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2296 rc = rte_flow_error_set(error, ENOTSUP,
2297 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2298 "Inconsistent pattern");
2299 goto fail_validate_match_spec_action;
2302 spec->match_spec = ctx_mae.match_spec_action;
2306 fail_validate_match_spec_action:
2308 fail_process_pattern_data:
2310 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2312 fail_encap_parse_init:
2313 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2315 fail_init_match_spec_action:
2320 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2321 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2322 * That is, related RTE flow actions need to be tracked as parts of a whole
2323 * so that they can be combined into a single action and submitted to MAE
2324 * representation of a given rule's action set.
2326 * Each RTE flow action provided by an application gets classified as
2327 * one belonging to some bundle type. If an action is not supposed to
2328 * belong to any bundle, or if this action is END, it is described as
2329 * one belonging to a dummy bundle of type EMPTY.
2331 * A currently tracked bundle will be submitted if a repeating
2332 * action or an action of different bundle type follows.
2335 enum sfc_mae_actions_bundle_type {
2336 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2337 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2340 struct sfc_mae_actions_bundle {
2341 enum sfc_mae_actions_bundle_type type;
2343 /* Indicates actions already tracked by the current bundle */
2344 uint64_t actions_mask;
2346 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2347 rte_be16_t vlan_push_tpid;
2348 rte_be16_t vlan_push_tci;
2352 * Combine configuration of RTE flow actions tracked by the bundle into a
2353 * single action and submit the result to MAE action set specification.
2354 * Do nothing in the case of dummy action bundle.
2357 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2358 efx_mae_actions_t *spec)
2362 switch (bundle->type) {
2363 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2365 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2366 rc = efx_mae_action_set_populate_vlan_push(
2367 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2370 SFC_ASSERT(B_FALSE);
2378 * Given the type of the next RTE flow action in the line, decide
2379 * whether a new bundle is about to start, and, if this is the case,
2380 * submit and reset the current bundle.
2383 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2384 struct sfc_mae_actions_bundle *bundle,
2385 efx_mae_actions_t *spec,
2386 struct rte_flow_error *error)
2388 enum sfc_mae_actions_bundle_type bundle_type_new;
2391 switch (action->type) {
2392 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2393 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2394 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2395 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2399 * Self-sufficient actions, including END, are handled in this
2400 * case. No checks for unsupported actions are needed here
2401 * because parsing doesn't occur at this point.
2403 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2407 if (bundle_type_new != bundle->type ||
2408 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2409 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2413 memset(bundle, 0, sizeof(*bundle));
2416 bundle->type = bundle_type_new;
2421 return rte_flow_error_set(error, rc,
2422 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423 "Failed to request the (group of) action(s)");
2427 sfc_mae_rule_parse_action_of_push_vlan(
2428 const struct rte_flow_action_of_push_vlan *conf,
2429 struct sfc_mae_actions_bundle *bundle)
2431 bundle->vlan_push_tpid = conf->ethertype;
2435 sfc_mae_rule_parse_action_of_set_vlan_vid(
2436 const struct rte_flow_action_of_set_vlan_vid *conf,
2437 struct sfc_mae_actions_bundle *bundle)
2439 bundle->vlan_push_tci |= (conf->vlan_vid &
2440 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2444 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2445 const struct rte_flow_action_of_set_vlan_pcp *conf,
2446 struct sfc_mae_actions_bundle *bundle)
2448 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2449 RTE_LEN2MASK(3, uint8_t)) << 13;
2451 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2454 struct sfc_mae_parsed_item {
2455 const struct rte_flow_item *item;
2456 size_t proto_header_ofst;
2457 size_t proto_header_size;
2461 * For each 16-bit word of the given header, override
2462 * bits enforced by the corresponding 16-bit mask.
2465 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2466 const struct sfc_mae_parsed_item *parsed_items,
2467 unsigned int nb_parsed_items)
2469 unsigned int item_idx;
2471 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2472 const struct sfc_mae_parsed_item *parsed_item;
2473 const struct rte_flow_item *item;
2474 size_t proto_header_size;
2477 parsed_item = &parsed_items[item_idx];
2478 proto_header_size = parsed_item->proto_header_size;
2479 item = parsed_item->item;
2481 for (ofst = 0; ofst < proto_header_size;
2482 ofst += sizeof(rte_be16_t)) {
2483 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2484 const rte_be16_t *w_maskp;
2485 const rte_be16_t *w_specp;
2487 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2488 w_specp = RTE_PTR_ADD(item->spec, ofst);
2491 *wp |= (*w_specp & *w_maskp);
2494 header_buf += proto_header_size;
2498 #define SFC_IPV4_TTL_DEF 0x40
2499 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
2500 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2501 #define SFC_VXLAN_FLAGS_DEF 0x08000000
2504 sfc_mae_rule_parse_action_vxlan_encap(
2505 struct sfc_mae *mae,
2506 const struct rte_flow_action_vxlan_encap *conf,
2507 efx_mae_actions_t *spec,
2508 struct rte_flow_error *error)
2510 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2511 struct rte_flow_item *pattern = conf->definition;
2512 uint8_t *buf = bounce_eh->buf;
2514 /* This array will keep track of non-VOID pattern items. */
2515 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2517 1 /* IPv4 or IPv6 */ +
2520 unsigned int nb_parsed_items = 0;
2522 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2523 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2524 sizeof(struct rte_ipv6_hdr))];
2525 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2526 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2527 struct rte_vxlan_hdr *vxlan = NULL;
2528 struct rte_udp_hdr *udp = NULL;
2529 unsigned int nb_vlan_tags = 0;
2530 size_t next_proto_ofst = 0;
2531 size_t ethertype_ofst = 0;
2534 if (pattern == NULL) {
2535 return rte_flow_error_set(error, EINVAL,
2536 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2537 "The encap. header definition is NULL");
2540 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2541 bounce_eh->size = 0;
2544 * Process pattern items and remember non-VOID ones.
2545 * Defer applying masks until after the complete header
2546 * has been built from the pattern items.
2548 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2550 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2551 struct sfc_mae_parsed_item *parsed_item;
2552 const uint64_t exp_items_extra_vlan[] = {
2553 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2555 size_t proto_header_size;
2556 rte_be16_t *ethertypep;
2557 uint8_t *next_protop;
2560 if (pattern->spec == NULL) {
2561 return rte_flow_error_set(error, EINVAL,
2562 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2563 "NULL item spec in the encap. header");
2566 if (pattern->mask == NULL) {
2567 return rte_flow_error_set(error, EINVAL,
2568 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2569 "NULL item mask in the encap. header");
2572 if (pattern->last != NULL) {
2573 /* This is not a match pattern, so disallow range. */
2574 return rte_flow_error_set(error, EINVAL,
2575 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2576 "Range item in the encap. header");
2579 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2580 /* Handle VOID separately, for clarity. */
2584 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2585 return rte_flow_error_set(error, ENOTSUP,
2586 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2587 "Unexpected item in the encap. header");
2590 parsed_item = &parsed_items[nb_parsed_items];
2591 buf_cur = buf + bounce_eh->size;
2593 switch (pattern->type) {
2594 case RTE_FLOW_ITEM_TYPE_ETH:
2595 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2597 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2600 proto_header_size = sizeof(struct rte_ether_hdr);
2602 ethertype_ofst = eth_ethertype_ofst;
2604 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2605 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2606 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2608 case RTE_FLOW_ITEM_TYPE_VLAN:
2609 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2611 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2614 proto_header_size = sizeof(struct rte_vlan_hdr);
2616 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2617 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2619 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2620 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2624 offsetof(struct rte_vlan_hdr, eth_proto);
2626 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2627 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2628 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2632 case RTE_FLOW_ITEM_TYPE_IPV4:
2633 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2635 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2638 proto_header_size = sizeof(struct rte_ipv4_hdr);
2640 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2641 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2645 offsetof(struct rte_ipv4_hdr, next_proto_id);
2647 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2649 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2651 case RTE_FLOW_ITEM_TYPE_IPV6:
2652 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2654 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2657 proto_header_size = sizeof(struct rte_ipv6_hdr);
2659 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2660 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2662 next_proto_ofst = bounce_eh->size +
2663 offsetof(struct rte_ipv6_hdr, proto);
2665 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2667 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2669 case RTE_FLOW_ITEM_TYPE_UDP:
2670 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2672 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2675 proto_header_size = sizeof(struct rte_udp_hdr);
2677 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2678 *next_protop = IPPROTO_UDP;
2680 udp = (struct rte_udp_hdr *)buf_cur;
2682 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2684 case RTE_FLOW_ITEM_TYPE_VXLAN:
2685 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2687 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2690 proto_header_size = sizeof(struct rte_vxlan_hdr);
2692 vxlan = (struct rte_vxlan_hdr *)buf_cur;
2694 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2695 udp->dgram_len = RTE_BE16(sizeof(*udp) +
2697 udp->dgram_cksum = 0;
2702 return rte_flow_error_set(error, ENOTSUP,
2703 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2704 "Unknown item in the encap. header");
2707 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2708 return rte_flow_error_set(error, E2BIG,
2709 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2710 "The encap. header is too big");
2713 if ((proto_header_size & 1) != 0) {
2714 return rte_flow_error_set(error, EINVAL,
2715 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2716 "Odd layer size in the encap. header");
2719 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2720 bounce_eh->size += proto_header_size;
2722 parsed_item->item = pattern;
2723 parsed_item->proto_header_size = proto_header_size;
2727 if (exp_items != 0) {
2728 /* Parsing item VXLAN would have reset exp_items to 0. */
2729 return rte_flow_error_set(error, ENOTSUP,
2730 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2731 "No item VXLAN in the encap. header");
2734 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2735 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2736 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2737 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2739 /* The HW cannot compute this checksum. */
2740 ipv4->hdr_checksum = 0;
2741 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2743 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2744 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2745 ipv6->payload_len = udp->dgram_len;
2747 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2749 /* Take care of the masks. */
2750 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2752 return (spec != NULL) ? efx_mae_action_set_populate_encap(spec) : 0;
2756 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
2757 efx_mae_actions_t *spec)
2759 return efx_mae_action_set_populate_mark(spec, conf->id);
2763 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
2764 const struct rte_flow_action_count *conf,
2765 efx_mae_actions_t *spec)
2771 goto fail_counter_shared;
2774 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
2776 "counter queue is not configured for COUNT action");
2778 goto fail_counter_queue_uninit;
2781 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
2783 goto fail_no_service_core;
2786 rc = efx_mae_action_set_populate_count(spec);
2789 "failed to populate counters in MAE action set: %s",
2791 goto fail_populate_count;
2796 fail_populate_count:
2797 fail_no_service_core:
2798 fail_counter_queue_uninit:
2799 fail_counter_shared:
2805 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2806 const struct rte_flow_action_phy_port *conf,
2807 efx_mae_actions_t *spec)
2809 efx_mport_sel_t mport;
2813 if (conf->original != 0)
2814 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2816 phy_port = conf->index;
2818 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2822 return efx_mae_action_set_populate_deliver(spec, &mport);
2826 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
2827 const struct rte_flow_action_vf *vf_conf,
2828 efx_mae_actions_t *spec)
2830 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2831 efx_mport_sel_t mport;
2835 if (vf_conf == NULL)
2836 vf = EFX_PCI_VF_INVALID;
2837 else if (vf_conf->original != 0)
2842 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
2846 return efx_mae_action_set_populate_deliver(spec, &mport);
2850 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
2851 const struct rte_flow_action_port_id *conf,
2852 efx_mae_actions_t *spec)
2854 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2855 struct sfc_mae *mae = &sa->mae;
2856 efx_mport_sel_t mport;
2860 if (conf->id > UINT16_MAX)
2863 port_id = (conf->original != 0) ? sas->port_id : conf->id;
2865 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2870 return efx_mae_action_set_populate_deliver(spec, &mport);
2874 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2875 const struct rte_flow_action *action,
2876 const struct sfc_mae_outer_rule *outer_rule,
2877 struct sfc_mae_actions_bundle *bundle,
2878 efx_mae_actions_t *spec,
2879 struct rte_flow_error *error)
2881 bool custom_error = B_FALSE;
2884 switch (action->type) {
2885 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2886 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
2887 bundle->actions_mask);
2888 if (outer_rule == NULL ||
2889 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
2892 rc = efx_mae_action_set_populate_decap(spec);
2894 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2895 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2896 bundle->actions_mask);
2897 rc = efx_mae_action_set_populate_vlan_pop(spec);
2899 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2900 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2901 bundle->actions_mask);
2902 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2904 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2905 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2906 bundle->actions_mask);
2907 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2909 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2910 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2911 bundle->actions_mask);
2912 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2914 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2915 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
2916 bundle->actions_mask);
2917 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
2920 custom_error = B_TRUE;
2922 case RTE_FLOW_ACTION_TYPE_COUNT:
2923 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
2924 bundle->actions_mask);
2925 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
2927 case RTE_FLOW_ACTION_TYPE_FLAG:
2928 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2929 bundle->actions_mask);
2930 rc = efx_mae_action_set_populate_flag(spec);
2932 case RTE_FLOW_ACTION_TYPE_MARK:
2933 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2934 bundle->actions_mask);
2935 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2937 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2938 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2939 bundle->actions_mask);
2940 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2942 case RTE_FLOW_ACTION_TYPE_PF:
2943 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2944 bundle->actions_mask);
2945 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2947 case RTE_FLOW_ACTION_TYPE_VF:
2948 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2949 bundle->actions_mask);
2950 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2952 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2953 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2954 bundle->actions_mask);
2955 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2957 case RTE_FLOW_ACTION_TYPE_DROP:
2958 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2959 bundle->actions_mask);
2960 rc = efx_mae_action_set_populate_drop(spec);
2963 return rte_flow_error_set(error, ENOTSUP,
2964 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2965 "Unsupported action");
2969 bundle->actions_mask |= (1ULL << action->type);
2970 } else if (!custom_error) {
2971 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2972 NULL, "Failed to request the action");
2979 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
2981 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
2985 sfc_mae_process_encap_header(struct sfc_adapter *sa,
2986 const struct sfc_mae_bounce_eh *bounce_eh,
2987 struct sfc_mae_encap_header **encap_headerp)
2989 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
2990 encap_headerp = NULL;
2994 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
2995 if (*encap_headerp != NULL)
2998 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3002 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3003 const struct rte_flow_action actions[],
3004 struct sfc_flow_spec_mae *spec_mae,
3005 struct rte_flow_error *error)
3007 struct sfc_mae_encap_header *encap_header = NULL;
3008 struct sfc_mae_actions_bundle bundle = {0};
3009 const struct rte_flow_action *action;
3010 struct sfc_mae *mae = &sa->mae;
3011 efx_mae_actions_t *spec;
3012 unsigned int n_count;
3017 if (actions == NULL) {
3018 return rte_flow_error_set(error, EINVAL,
3019 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3023 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3025 goto fail_action_set_spec_init;
3027 /* Cleanup after previous encap. header bounce buffer usage. */
3028 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3030 for (action = actions;
3031 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3032 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3034 goto fail_rule_parse_action;
3036 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
3037 &bundle, spec, error);
3039 goto fail_rule_parse_action;
3042 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3044 goto fail_rule_parse_action;
3046 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3048 goto fail_process_encap_header;
3050 n_count = efx_mae_action_set_get_nb_count(spec);
3053 sfc_err(sa, "too many count actions requested: %u", n_count);
3057 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3059 if (spec_mae->action_set != NULL) {
3060 sfc_mae_encap_header_del(sa, encap_header);
3061 efx_mae_action_set_spec_fini(sa->nic, spec);
3065 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
3066 &spec_mae->action_set);
3068 goto fail_action_set_add;
3072 fail_action_set_add:
3074 sfc_mae_encap_header_del(sa, encap_header);
3076 fail_process_encap_header:
3077 fail_rule_parse_action:
3078 efx_mae_action_set_spec_fini(sa->nic, spec);
3080 fail_action_set_spec_init:
3081 if (rc > 0 && rte_errno == 0) {
3082 rc = rte_flow_error_set(error, rc,
3083 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3084 NULL, "Failed to process the action");
3090 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3091 const efx_mae_match_spec_t *left,
3092 const efx_mae_match_spec_t *right)
3094 bool have_same_class;
3097 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3100 return (rc == 0) ? have_same_class : false;
3104 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3105 struct sfc_mae_outer_rule *rule)
3107 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3108 struct sfc_mae_outer_rule *entry;
3109 struct sfc_mae *mae = &sa->mae;
3111 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3112 /* An active rule is reused. It's class is wittingly valid. */
3116 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3117 sfc_mae_outer_rules, entries) {
3118 const efx_mae_match_spec_t *left = entry->match_spec;
3119 const efx_mae_match_spec_t *right = rule->match_spec;
3124 if (sfc_mae_rules_class_cmp(sa, left, right))
3128 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3129 "support for outer frame pattern items is not guaranteed; "
3130 "other than that, the items are valid from SW standpoint");
3135 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3136 struct sfc_flow_spec_mae *spec)
3138 const struct rte_flow *entry;
3140 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3141 const struct sfc_flow_spec *entry_spec = &entry->spec;
3142 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3143 const efx_mae_match_spec_t *left = es_mae->match_spec;
3144 const efx_mae_match_spec_t *right = spec->match_spec;
3146 switch (entry_spec->type) {
3147 case SFC_FLOW_SPEC_FILTER:
3148 /* Ignore VNIC-level flows */
3150 case SFC_FLOW_SPEC_MAE:
3151 if (sfc_mae_rules_class_cmp(sa, left, right))
3159 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3160 "support for inner frame pattern items is not guaranteed; "
3161 "other than that, the items are valid from SW standpoint");
3166 * Confirm that a given flow can be accepted by the FW.
3169 * Software adapter context
3171 * Flow to be verified
3173 * Zero on success and non-zero in the case of error.
3174 * A special value of EAGAIN indicates that the adapter is
3175 * not in started state. This state is compulsory because
3176 * it only makes sense to compare the rule class of the flow
3177 * being validated with classes of the active rules.
3178 * Such classes are wittingly supported by the FW.
3181 sfc_mae_flow_verify(struct sfc_adapter *sa,
3182 struct rte_flow *flow)
3184 struct sfc_flow_spec *spec = &flow->spec;
3185 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3186 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3189 SFC_ASSERT(sfc_adapter_is_locked(sa));
3191 if (sa->state != SFC_ADAPTER_STARTED)
3194 if (outer_rule != NULL) {
3195 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3200 return sfc_mae_action_rule_class_verify(sa, spec_mae);
3204 sfc_mae_flow_insert(struct sfc_adapter *sa,
3205 struct rte_flow *flow)
3207 struct sfc_flow_spec *spec = &flow->spec;
3208 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3209 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3210 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3211 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
3214 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3215 SFC_ASSERT(action_set != NULL);
3217 if (outer_rule != NULL) {
3218 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3219 spec_mae->match_spec);
3221 goto fail_outer_rule_enable;
3224 rc = sfc_mae_action_set_enable(sa, action_set);
3226 goto fail_action_set_enable;
3228 if (action_set->n_counters > 0) {
3229 rc = sfc_mae_counter_start(sa);
3231 sfc_err(sa, "failed to start MAE counters support: %s",
3233 goto fail_mae_counter_start;
3237 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3238 NULL, &fw_rsrc->aset_id,
3239 &spec_mae->rule_id);
3241 goto fail_action_rule_insert;
3243 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3244 flow, spec_mae->rule_id.id);
3248 fail_action_rule_insert:
3249 fail_mae_counter_start:
3250 sfc_mae_action_set_disable(sa, action_set);
3252 fail_action_set_enable:
3253 if (outer_rule != NULL)
3254 sfc_mae_outer_rule_disable(sa, outer_rule);
3256 fail_outer_rule_enable:
3261 sfc_mae_flow_remove(struct sfc_adapter *sa,
3262 struct rte_flow *flow)
3264 struct sfc_flow_spec *spec = &flow->spec;
3265 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3266 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3267 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3270 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3271 SFC_ASSERT(action_set != NULL);
3273 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3275 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3276 flow, spec_mae->rule_id.id, strerror(rc));
3278 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3279 flow, spec_mae->rule_id.id);
3280 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3282 sfc_mae_action_set_disable(sa, action_set);
3284 if (outer_rule != NULL)
3285 sfc_mae_outer_rule_disable(sa, outer_rule);
3291 sfc_mae_query_counter(struct sfc_adapter *sa,
3292 struct sfc_flow_spec_mae *spec,
3293 const struct rte_flow_action *action,
3294 struct rte_flow_query_count *data,
3295 struct rte_flow_error *error)
3297 struct sfc_mae_action_set *action_set = spec->action_set;
3298 const struct rte_flow_action_count *conf = action->conf;
3302 if (action_set->n_counters == 0) {
3303 return rte_flow_error_set(error, EINVAL,
3304 RTE_FLOW_ERROR_TYPE_ACTION, action,
3305 "Queried flow rule does not have count actions");
3308 for (i = 0; i < action_set->n_counters; i++) {
3310 * Get the first available counter of the flow rule if
3311 * counter ID is not specified.
3313 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3316 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3317 &action_set->counters[i], data);
3319 return rte_flow_error_set(error, EINVAL,
3320 RTE_FLOW_ERROR_TYPE_ACTION, action,
3321 "Queried flow rule counter action is invalid");
3327 return rte_flow_error_set(error, ENOENT,
3328 RTE_FLOW_ERROR_TYPE_ACTION, action,
3329 "No such flow rule action count ID");
3333 sfc_mae_flow_query(struct rte_eth_dev *dev,
3334 struct rte_flow *flow,
3335 const struct rte_flow_action *action,
3337 struct rte_flow_error *error)
3339 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3340 struct sfc_flow_spec *spec = &flow->spec;
3341 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3343 switch (action->type) {
3344 case RTE_FLOW_ACTION_TYPE_COUNT:
3345 return sfc_mae_query_counter(sa, spec_mae, action,
3348 return rte_flow_error_set(error, ENOTSUP,
3349 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3350 "Query for action of this type is not supported");