1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
19 #include "sfc_mae_counter.h"
21 #include "sfc_switch.h"
22 #include "sfc_service.h"
25 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
26 efx_mport_sel_t *mportp)
28 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
30 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
35 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
36 uint32_t nb_counters_max)
38 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
42 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
44 sfc_mae_counters_fini(®istry->counters);
48 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
49 struct sfc_mae_rule **rule)
51 struct sfc_mae *mae = &sa->mae;
52 struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
56 for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
57 if (internal_rules->rules[entry].spec == NULL)
61 if (entry == SFC_MAE_NB_RULES_MAX) {
63 sfc_err(sa, "failed too many rules (%u rules used)", entry);
64 goto fail_too_many_rules;
67 *rule = &internal_rules->rules[entry];
76 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
77 const efx_mport_sel_t *mport_match,
78 const efx_mport_sel_t *mport_deliver,
79 int prio, struct sfc_mae_rule **rulep)
81 struct sfc_mae *mae = &sa->mae;
82 struct sfc_mae_rule *rule;
85 sfc_log_init(sa, "entry");
87 if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
89 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
90 mae->nb_action_rule_prios_max);
91 goto fail_invalid_prio;
94 prio = mae->nb_action_rule_prios_max - 1;
96 rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
98 goto fail_find_empty_slot;
100 sfc_log_init(sa, "init MAE match spec");
101 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
102 (uint32_t)prio, &rule->spec);
104 sfc_err(sa, "failed to init MAE match spec");
105 goto fail_match_init;
108 rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
110 sfc_err(sa, "failed to get MAE match mport selector");
114 rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
116 sfc_err(sa, "failed to init MAE action set");
117 goto fail_action_init;
120 rc = efx_mae_action_set_populate_deliver(rule->actions,
123 sfc_err(sa, "failed to populate deliver action");
124 goto fail_populate_deliver;
127 rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
130 sfc_err(sa, "failed to allocate action set");
131 goto fail_action_set_alloc;
134 rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
138 sfc_err(sa, "failed to insert action rule");
139 goto fail_rule_insert;
144 sfc_log_init(sa, "done");
149 efx_mae_action_set_free(sa->nic, &rule->action_set);
151 fail_action_set_alloc:
152 fail_populate_deliver:
153 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
157 efx_mae_match_spec_fini(sa->nic, rule->spec);
160 fail_find_empty_slot:
162 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
167 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
169 if (rule == NULL || rule->spec == NULL)
172 efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
173 efx_mae_action_set_free(sa->nic, &rule->action_set);
174 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
175 efx_mae_match_spec_fini(sa->nic, rule->spec);
181 sfc_mae_attach(struct sfc_adapter *sa)
183 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
184 struct sfc_mae_switch_port_request switch_port_request = {0};
185 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
186 efx_mport_sel_t entity_mport;
187 struct sfc_mae *mae = &sa->mae;
188 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
189 efx_mae_limits_t limits;
192 sfc_log_init(sa, "entry");
194 if (!encp->enc_mae_supported) {
195 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
199 sfc_log_init(sa, "init MAE");
200 rc = efx_mae_init(sa->nic);
204 sfc_log_init(sa, "get MAE limits");
205 rc = efx_mae_get_limits(sa->nic, &limits);
207 goto fail_mae_get_limits;
209 sfc_log_init(sa, "init MAE counter registry");
210 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
211 limits.eml_max_n_counters);
213 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
214 limits.eml_max_n_counters, rte_strerror(rc));
215 goto fail_counter_registry_init;
218 sfc_log_init(sa, "assign entity MPORT");
219 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
221 goto fail_mae_assign_entity_mport;
223 sfc_log_init(sa, "assign RTE switch domain");
224 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
226 goto fail_mae_assign_switch_domain;
228 sfc_log_init(sa, "assign RTE switch port");
229 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
230 switch_port_request.entity_mportp = &entity_mport;
231 /* RTE ethdev MPORT matches that of the entity for independent ports. */
232 switch_port_request.ethdev_mportp = &entity_mport;
233 switch_port_request.ethdev_port_id = sas->port_id;
234 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
235 &switch_port_request,
236 &mae->switch_port_id);
238 goto fail_mae_assign_switch_port;
240 sfc_log_init(sa, "allocate encap. header bounce buffer");
241 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
242 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
243 bounce_eh->buf_size, 0);
244 if (bounce_eh->buf == NULL)
245 goto fail_mae_alloc_bounce_eh;
247 mae->status = SFC_MAE_STATUS_SUPPORTED;
248 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
249 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
250 mae->encap_types_supported = limits.eml_encap_types_supported;
251 TAILQ_INIT(&mae->outer_rules);
252 TAILQ_INIT(&mae->encap_headers);
253 TAILQ_INIT(&mae->action_sets);
255 sfc_log_init(sa, "done");
259 fail_mae_alloc_bounce_eh:
260 fail_mae_assign_switch_port:
261 fail_mae_assign_switch_domain:
262 fail_mae_assign_entity_mport:
263 sfc_mae_counter_registry_fini(&mae->counter_registry);
265 fail_counter_registry_init:
267 efx_mae_fini(sa->nic);
270 sfc_log_init(sa, "failed %d", rc);
276 sfc_mae_detach(struct sfc_adapter *sa)
278 struct sfc_mae *mae = &sa->mae;
279 enum sfc_mae_status status_prev = mae->status;
281 sfc_log_init(sa, "entry");
283 mae->nb_action_rule_prios_max = 0;
284 mae->status = SFC_MAE_STATUS_UNKNOWN;
286 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
289 rte_free(mae->bounce_eh.buf);
290 sfc_mae_counter_registry_fini(&mae->counter_registry);
292 efx_mae_fini(sa->nic);
294 sfc_log_init(sa, "done");
297 static struct sfc_mae_outer_rule *
298 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
299 const efx_mae_match_spec_t *match_spec,
300 efx_tunnel_protocol_t encap_type)
302 struct sfc_mae_outer_rule *rule;
303 struct sfc_mae *mae = &sa->mae;
305 SFC_ASSERT(sfc_adapter_is_locked(sa));
307 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
308 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
309 rule->encap_type == encap_type) {
310 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
320 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
321 efx_mae_match_spec_t *match_spec,
322 efx_tunnel_protocol_t encap_type,
323 struct sfc_mae_outer_rule **rulep)
325 struct sfc_mae_outer_rule *rule;
326 struct sfc_mae *mae = &sa->mae;
328 SFC_ASSERT(sfc_adapter_is_locked(sa));
330 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
335 rule->match_spec = match_spec;
336 rule->encap_type = encap_type;
338 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
340 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
344 sfc_dbg(sa, "added outer_rule=%p", rule);
350 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
351 struct sfc_mae_outer_rule *rule)
353 struct sfc_mae *mae = &sa->mae;
355 SFC_ASSERT(sfc_adapter_is_locked(sa));
356 SFC_ASSERT(rule->refcnt != 0);
360 if (rule->refcnt != 0)
363 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
364 rule->fw_rsrc.refcnt != 0) {
365 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
366 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
369 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
371 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
374 sfc_dbg(sa, "deleted outer_rule=%p", rule);
378 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
379 struct sfc_mae_outer_rule *rule,
380 efx_mae_match_spec_t *match_spec_action)
382 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
385 SFC_ASSERT(sfc_adapter_is_locked(sa));
387 if (fw_rsrc->refcnt == 0) {
388 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
389 SFC_ASSERT(rule->match_spec != NULL);
391 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
395 sfc_err(sa, "failed to enable outer_rule=%p: %s",
401 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
404 if (fw_rsrc->refcnt == 0) {
405 (void)efx_mae_outer_rule_remove(sa->nic,
407 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
410 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
415 if (fw_rsrc->refcnt == 0) {
416 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
417 rule, fw_rsrc->rule_id.id);
426 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
427 struct sfc_mae_outer_rule *rule)
429 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
432 SFC_ASSERT(sfc_adapter_is_locked(sa));
434 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
435 fw_rsrc->refcnt == 0) {
436 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
437 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
441 if (fw_rsrc->refcnt == 1) {
442 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
444 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
445 rule, fw_rsrc->rule_id.id);
447 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
448 rule, fw_rsrc->rule_id.id, strerror(rc));
450 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
456 static struct sfc_mae_encap_header *
457 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
458 const struct sfc_mae_bounce_eh *bounce_eh)
460 struct sfc_mae_encap_header *encap_header;
461 struct sfc_mae *mae = &sa->mae;
463 SFC_ASSERT(sfc_adapter_is_locked(sa));
465 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
466 if (encap_header->size == bounce_eh->size &&
467 memcmp(encap_header->buf, bounce_eh->buf,
468 bounce_eh->size) == 0) {
469 sfc_dbg(sa, "attaching to encap_header=%p",
471 ++(encap_header->refcnt);
480 sfc_mae_encap_header_add(struct sfc_adapter *sa,
481 const struct sfc_mae_bounce_eh *bounce_eh,
482 struct sfc_mae_encap_header **encap_headerp)
484 struct sfc_mae_encap_header *encap_header;
485 struct sfc_mae *mae = &sa->mae;
487 SFC_ASSERT(sfc_adapter_is_locked(sa));
489 encap_header = rte_zmalloc("sfc_mae_encap_header",
490 sizeof(*encap_header), 0);
491 if (encap_header == NULL)
494 encap_header->size = bounce_eh->size;
496 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
497 encap_header->size, 0);
498 if (encap_header->buf == NULL) {
499 rte_free(encap_header);
503 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
505 encap_header->refcnt = 1;
506 encap_header->type = bounce_eh->type;
507 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
509 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
511 *encap_headerp = encap_header;
513 sfc_dbg(sa, "added encap_header=%p", encap_header);
519 sfc_mae_encap_header_del(struct sfc_adapter *sa,
520 struct sfc_mae_encap_header *encap_header)
522 struct sfc_mae *mae = &sa->mae;
524 if (encap_header == NULL)
527 SFC_ASSERT(sfc_adapter_is_locked(sa));
528 SFC_ASSERT(encap_header->refcnt != 0);
530 --(encap_header->refcnt);
532 if (encap_header->refcnt != 0)
535 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
536 encap_header->fw_rsrc.refcnt != 0) {
537 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
538 encap_header, encap_header->fw_rsrc.eh_id.id,
539 encap_header->fw_rsrc.refcnt);
542 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
543 rte_free(encap_header->buf);
544 rte_free(encap_header);
546 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
550 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
551 struct sfc_mae_encap_header *encap_header,
552 efx_mae_actions_t *action_set_spec)
554 struct sfc_mae_fw_rsrc *fw_rsrc;
557 if (encap_header == NULL)
560 SFC_ASSERT(sfc_adapter_is_locked(sa));
562 fw_rsrc = &encap_header->fw_rsrc;
564 if (fw_rsrc->refcnt == 0) {
565 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
566 SFC_ASSERT(encap_header->buf != NULL);
567 SFC_ASSERT(encap_header->size != 0);
569 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
574 sfc_err(sa, "failed to enable encap_header=%p: %s",
575 encap_header, strerror(rc));
580 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
583 if (fw_rsrc->refcnt == 0) {
584 (void)efx_mae_encap_header_free(sa->nic,
586 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
589 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
594 if (fw_rsrc->refcnt == 0) {
595 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
596 encap_header, fw_rsrc->eh_id.id);
605 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
606 struct sfc_mae_encap_header *encap_header)
608 struct sfc_mae_fw_rsrc *fw_rsrc;
611 if (encap_header == NULL)
614 SFC_ASSERT(sfc_adapter_is_locked(sa));
616 fw_rsrc = &encap_header->fw_rsrc;
618 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
619 fw_rsrc->refcnt == 0) {
620 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
621 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
625 if (fw_rsrc->refcnt == 1) {
626 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
628 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
629 encap_header, fw_rsrc->eh_id.id);
631 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
632 encap_header, fw_rsrc->eh_id.id, strerror(rc));
634 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
641 sfc_mae_counters_enable(struct sfc_adapter *sa,
642 struct sfc_mae_counter_id *counters,
643 unsigned int n_counters,
644 efx_mae_actions_t *action_set_spec)
648 sfc_log_init(sa, "entry");
650 if (n_counters == 0) {
651 sfc_log_init(sa, "no counters - skip");
655 SFC_ASSERT(sfc_adapter_is_locked(sa));
656 SFC_ASSERT(n_counters == 1);
658 rc = sfc_mae_counter_enable(sa, &counters[0]);
660 sfc_err(sa, "failed to enable MAE counter %u: %s",
661 counters[0].mae_id.id, rte_strerror(rc));
662 goto fail_counter_add;
665 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
666 &counters[0].mae_id);
668 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
669 counters[0].mae_id.id, rte_strerror(rc));
670 goto fail_fill_in_id;
676 (void)sfc_mae_counter_disable(sa, &counters[0]);
679 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
684 sfc_mae_counters_disable(struct sfc_adapter *sa,
685 struct sfc_mae_counter_id *counters,
686 unsigned int n_counters)
691 SFC_ASSERT(sfc_adapter_is_locked(sa));
692 SFC_ASSERT(n_counters == 1);
694 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
695 sfc_err(sa, "failed to disable: already disabled");
699 return sfc_mae_counter_disable(sa, &counters[0]);
702 static struct sfc_mae_action_set *
703 sfc_mae_action_set_attach(struct sfc_adapter *sa,
704 const struct sfc_mae_encap_header *encap_header,
705 unsigned int n_count,
706 const efx_mae_actions_t *spec)
708 struct sfc_mae_action_set *action_set;
709 struct sfc_mae *mae = &sa->mae;
711 SFC_ASSERT(sfc_adapter_is_locked(sa));
713 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
715 * Shared counters are not supported, hence action sets with
716 * COUNT are not attachable.
718 if (action_set->encap_header == encap_header &&
720 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
721 sfc_dbg(sa, "attaching to action_set=%p", action_set);
722 ++(action_set->refcnt);
731 sfc_mae_action_set_add(struct sfc_adapter *sa,
732 const struct rte_flow_action actions[],
733 efx_mae_actions_t *spec,
734 struct sfc_mae_encap_header *encap_header,
735 unsigned int n_counters,
736 struct sfc_mae_action_set **action_setp)
738 struct sfc_mae_action_set *action_set;
739 struct sfc_mae *mae = &sa->mae;
742 SFC_ASSERT(sfc_adapter_is_locked(sa));
744 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
745 if (action_set == NULL) {
746 sfc_err(sa, "failed to alloc action set");
750 if (n_counters > 0) {
751 const struct rte_flow_action *action;
753 action_set->counters = rte_malloc("sfc_mae_counter_ids",
754 sizeof(action_set->counters[0]) * n_counters, 0);
755 if (action_set->counters == NULL) {
756 rte_free(action_set);
757 sfc_err(sa, "failed to alloc counters");
761 for (action = actions, i = 0;
762 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
764 const struct rte_flow_action_count *conf;
766 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
771 action_set->counters[i].mae_id.id =
772 EFX_MAE_RSRC_ID_INVALID;
773 action_set->counters[i].rte_id = conf->id;
776 action_set->n_counters = n_counters;
779 action_set->refcnt = 1;
780 action_set->spec = spec;
781 action_set->encap_header = encap_header;
783 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
785 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
787 *action_setp = action_set;
789 sfc_dbg(sa, "added action_set=%p", action_set);
795 sfc_mae_action_set_del(struct sfc_adapter *sa,
796 struct sfc_mae_action_set *action_set)
798 struct sfc_mae *mae = &sa->mae;
800 SFC_ASSERT(sfc_adapter_is_locked(sa));
801 SFC_ASSERT(action_set->refcnt != 0);
803 --(action_set->refcnt);
805 if (action_set->refcnt != 0)
808 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
809 action_set->fw_rsrc.refcnt != 0) {
810 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
811 action_set, action_set->fw_rsrc.aset_id.id,
812 action_set->fw_rsrc.refcnt);
815 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
816 sfc_mae_encap_header_del(sa, action_set->encap_header);
817 if (action_set->n_counters > 0) {
818 SFC_ASSERT(action_set->n_counters == 1);
819 SFC_ASSERT(action_set->counters[0].mae_id.id ==
820 EFX_MAE_RSRC_ID_INVALID);
821 rte_free(action_set->counters);
823 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
824 rte_free(action_set);
826 sfc_dbg(sa, "deleted action_set=%p", action_set);
830 sfc_mae_action_set_enable(struct sfc_adapter *sa,
831 struct sfc_mae_action_set *action_set)
833 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
834 struct sfc_mae_counter_id *counters = action_set->counters;
835 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
838 SFC_ASSERT(sfc_adapter_is_locked(sa));
840 if (fw_rsrc->refcnt == 0) {
841 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
842 SFC_ASSERT(action_set->spec != NULL);
844 rc = sfc_mae_encap_header_enable(sa, encap_header,
849 rc = sfc_mae_counters_enable(sa, counters,
850 action_set->n_counters,
853 sfc_err(sa, "failed to enable %u MAE counters: %s",
854 action_set->n_counters, rte_strerror(rc));
856 sfc_mae_encap_header_disable(sa, encap_header);
860 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
863 sfc_err(sa, "failed to enable action_set=%p: %s",
864 action_set, strerror(rc));
866 (void)sfc_mae_counters_disable(sa, counters,
867 action_set->n_counters);
868 sfc_mae_encap_header_disable(sa, encap_header);
872 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
873 action_set, fw_rsrc->aset_id.id);
882 sfc_mae_action_set_disable(struct sfc_adapter *sa,
883 struct sfc_mae_action_set *action_set)
885 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
888 SFC_ASSERT(sfc_adapter_is_locked(sa));
890 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
891 fw_rsrc->refcnt == 0) {
892 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
893 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
897 if (fw_rsrc->refcnt == 1) {
898 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
900 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
901 action_set, fw_rsrc->aset_id.id);
903 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
904 action_set, fw_rsrc->aset_id.id, strerror(rc));
906 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
908 rc = sfc_mae_counters_disable(sa, action_set->counters,
909 action_set->n_counters);
911 sfc_err(sa, "failed to disable %u MAE counters: %s",
912 action_set->n_counters, rte_strerror(rc));
915 sfc_mae_encap_header_disable(sa, action_set->encap_header);
922 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
923 struct rte_flow *flow)
925 struct sfc_flow_spec *spec;
926 struct sfc_flow_spec_mae *spec_mae;
936 spec_mae = &spec->mae;
938 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
940 if (spec_mae->outer_rule != NULL)
941 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
943 if (spec_mae->action_set != NULL)
944 sfc_mae_action_set_del(sa, spec_mae->action_set);
946 if (spec_mae->match_spec != NULL)
947 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
951 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
953 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
954 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
955 const efx_mae_field_id_t field_ids[] = {
956 EFX_MAE_FIELD_VLAN0_PROTO_BE,
957 EFX_MAE_FIELD_VLAN1_PROTO_BE,
959 const struct sfc_mae_ethertype *et;
964 * In accordance with RTE flow API convention, the innermost L2
965 * item's "type" ("inner_type") is a L3 EtherType. If there is
966 * no L3 item, it's 0x0000/0x0000.
968 et = &pdata->ethertypes[pdata->nb_vlan_tags];
969 rc = efx_mae_match_spec_field_set(ctx->match_spec,
970 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
972 (const uint8_t *)&et->value,
974 (const uint8_t *)&et->mask);
979 * sfc_mae_rule_parse_item_vlan() has already made sure
980 * that pdata->nb_vlan_tags does not exceed this figure.
982 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
984 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
985 et = &pdata->ethertypes[i];
987 rc = efx_mae_match_spec_field_set(ctx->match_spec,
988 fremap[field_ids[i]],
990 (const uint8_t *)&et->value,
992 (const uint8_t *)&et->mask);
1001 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1002 struct rte_flow_error *error)
1004 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1005 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1006 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1007 const rte_be16_t supported_tpids[] = {
1008 /* VLAN standard TPID (always the first element) */
1009 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1011 /* Double-tagging TPIDs */
1012 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1013 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1014 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1015 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1017 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1018 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1019 unsigned int ethertype_idx;
1020 const uint8_t *valuep;
1021 const uint8_t *maskp;
1024 if (pdata->innermost_ethertype_restriction.mask != 0 &&
1025 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1027 * If a single item VLAN is followed by a L3 item, value
1028 * of "type" in item ETH can't be a double-tagging TPID.
1030 nb_supported_tpids = 1;
1034 * sfc_mae_rule_parse_item_vlan() has already made sure
1035 * that pdata->nb_vlan_tags does not exceed this figure.
1037 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1039 for (ethertype_idx = 0;
1040 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1041 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1042 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1043 unsigned int tpid_idx;
1046 * This loop can have only two iterations. On the second one,
1047 * drop outer tag presence enforcement bit because the inner
1048 * tag presence automatically assumes that for the outer tag.
1050 enforce_tag_presence[0] = B_FALSE;
1052 if (tpid_m == RTE_BE16(0)) {
1053 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1054 enforce_tag_presence[ethertype_idx] = B_TRUE;
1056 /* No match on this field, and no value check. */
1057 nb_supported_tpids = 1;
1061 /* Exact match is supported only. */
1062 if (tpid_m != RTE_BE16(0xffff)) {
1063 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1064 rte_be_to_cpu_16(tpid_m));
1069 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1070 tpid_idx < nb_supported_tpids; ++tpid_idx) {
1071 if (tpid_v == supported_tpids[tpid_idx])
1075 if (tpid_idx == nb_supported_tpids) {
1076 sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1077 rte_be_to_cpu_16(tpid_v));
1082 nb_supported_tpids = 1;
1085 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1086 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
1087 rte_be16_t enforced_et;
1089 enforced_et = pdata->innermost_ethertype_restriction.value;
1091 if (et->mask == 0) {
1092 et->mask = RTE_BE16(0xffff);
1093 et->value = enforced_et;
1094 } else if (et->mask != RTE_BE16(0xffff) ||
1095 et->value != enforced_et) {
1096 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1097 rte_be_to_cpu_16(enforced_et),
1098 rte_be_to_cpu_16(et->value),
1099 rte_be_to_cpu_16(et->mask));
1106 * Now, when the number of VLAN tags is known, set fields
1107 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1108 * one is either a valid L3 EtherType (or 0x0000/0x0000),
1109 * and the last two are valid TPIDs (or 0x0000/0x0000).
1111 rc = sfc_mae_set_ethertypes(ctx);
1115 if (pdata->l3_next_proto_restriction_mask == 0xff) {
1116 if (pdata->l3_next_proto_mask == 0) {
1117 pdata->l3_next_proto_mask = 0xff;
1118 pdata->l3_next_proto_value =
1119 pdata->l3_next_proto_restriction_value;
1120 } else if (pdata->l3_next_proto_mask != 0xff ||
1121 pdata->l3_next_proto_value !=
1122 pdata->l3_next_proto_restriction_value) {
1123 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1124 pdata->l3_next_proto_restriction_value,
1125 pdata->l3_next_proto_value,
1126 pdata->l3_next_proto_mask);
1132 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1133 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1134 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1135 enforce_tag_presence[0] ||
1136 pdata->has_ovlan_value);
1141 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1142 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1143 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1144 enforce_tag_presence[1] ||
1145 pdata->has_ivlan_value);
1150 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1151 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1152 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1153 fremap[EFX_MAE_FIELD_IP_PROTO],
1154 sizeof(pdata->l3_next_proto_value),
1156 sizeof(pdata->l3_next_proto_mask),
1164 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1165 "Failed to process pattern data");
1169 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1170 struct sfc_flow_parse_ctx *ctx,
1171 struct rte_flow_error *error)
1173 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1174 const struct rte_flow_item_port_id supp_mask = {
1177 const void *def_mask = &rte_flow_item_port_id_mask;
1178 const struct rte_flow_item_port_id *spec = NULL;
1179 const struct rte_flow_item_port_id *mask = NULL;
1180 efx_mport_sel_t mport_sel;
1183 if (ctx_mae->match_mport_set) {
1184 return rte_flow_error_set(error, ENOTSUP,
1185 RTE_FLOW_ERROR_TYPE_ITEM, item,
1186 "Can't handle multiple traffic source items");
1189 rc = sfc_flow_parse_init(item,
1190 (const void **)&spec, (const void **)&mask,
1191 (const void *)&supp_mask, def_mask,
1192 sizeof(struct rte_flow_item_port_id), error);
1196 if (mask->id != supp_mask.id) {
1197 return rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_ITEM, item,
1199 "Bad mask in the PORT_ID pattern item");
1202 /* If "spec" is not set, could be any port ID */
1206 if (spec->id > UINT16_MAX) {
1207 return rte_flow_error_set(error, EOVERFLOW,
1208 RTE_FLOW_ERROR_TYPE_ITEM, item,
1209 "The port ID is too large");
1212 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1213 spec->id, &mport_sel);
1215 return rte_flow_error_set(error, rc,
1216 RTE_FLOW_ERROR_TYPE_ITEM, item,
1217 "Can't find RTE ethdev by the port ID");
1220 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1223 return rte_flow_error_set(error, rc,
1224 RTE_FLOW_ERROR_TYPE_ITEM, item,
1225 "Failed to set MPORT for the port ID");
1228 ctx_mae->match_mport_set = B_TRUE;
1234 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1235 struct sfc_flow_parse_ctx *ctx,
1236 struct rte_flow_error *error)
1238 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1239 const struct rte_flow_item_phy_port supp_mask = {
1240 .index = 0xffffffff,
1242 const void *def_mask = &rte_flow_item_phy_port_mask;
1243 const struct rte_flow_item_phy_port *spec = NULL;
1244 const struct rte_flow_item_phy_port *mask = NULL;
1245 efx_mport_sel_t mport_v;
1248 if (ctx_mae->match_mport_set) {
1249 return rte_flow_error_set(error, ENOTSUP,
1250 RTE_FLOW_ERROR_TYPE_ITEM, item,
1251 "Can't handle multiple traffic source items");
1254 rc = sfc_flow_parse_init(item,
1255 (const void **)&spec, (const void **)&mask,
1256 (const void *)&supp_mask, def_mask,
1257 sizeof(struct rte_flow_item_phy_port), error);
1261 if (mask->index != supp_mask.index) {
1262 return rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ITEM, item,
1264 "Bad mask in the PHY_PORT pattern item");
1267 /* If "spec" is not set, could be any physical port */
1271 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1273 return rte_flow_error_set(error, rc,
1274 RTE_FLOW_ERROR_TYPE_ITEM, item,
1275 "Failed to convert the PHY_PORT index");
1278 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1280 return rte_flow_error_set(error, rc,
1281 RTE_FLOW_ERROR_TYPE_ITEM, item,
1282 "Failed to set MPORT for the PHY_PORT");
1285 ctx_mae->match_mport_set = B_TRUE;
1291 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1292 struct sfc_flow_parse_ctx *ctx,
1293 struct rte_flow_error *error)
1295 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1296 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1297 efx_mport_sel_t mport_v;
1300 if (ctx_mae->match_mport_set) {
1301 return rte_flow_error_set(error, ENOTSUP,
1302 RTE_FLOW_ERROR_TYPE_ITEM, item,
1303 "Can't handle multiple traffic source items");
1306 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1309 return rte_flow_error_set(error, rc,
1310 RTE_FLOW_ERROR_TYPE_ITEM, item,
1311 "Failed to convert the PF ID");
1314 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1316 return rte_flow_error_set(error, rc,
1317 RTE_FLOW_ERROR_TYPE_ITEM, item,
1318 "Failed to set MPORT for the PF");
1321 ctx_mae->match_mport_set = B_TRUE;
1327 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1328 struct sfc_flow_parse_ctx *ctx,
1329 struct rte_flow_error *error)
1331 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1332 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1333 const struct rte_flow_item_vf supp_mask = {
1336 const void *def_mask = &rte_flow_item_vf_mask;
1337 const struct rte_flow_item_vf *spec = NULL;
1338 const struct rte_flow_item_vf *mask = NULL;
1339 efx_mport_sel_t mport_v;
1342 if (ctx_mae->match_mport_set) {
1343 return rte_flow_error_set(error, ENOTSUP,
1344 RTE_FLOW_ERROR_TYPE_ITEM, item,
1345 "Can't handle multiple traffic source items");
1348 rc = sfc_flow_parse_init(item,
1349 (const void **)&spec, (const void **)&mask,
1350 (const void *)&supp_mask, def_mask,
1351 sizeof(struct rte_flow_item_vf), error);
1355 if (mask->id != supp_mask.id) {
1356 return rte_flow_error_set(error, EINVAL,
1357 RTE_FLOW_ERROR_TYPE_ITEM, item,
1358 "Bad mask in the VF pattern item");
1362 * If "spec" is not set, the item requests any VF related to the
1363 * PF of the current DPDK port (but not the PF itself).
1364 * Reject this match criterion as unsupported.
1367 return rte_flow_error_set(error, EINVAL,
1368 RTE_FLOW_ERROR_TYPE_ITEM, item,
1369 "Bad spec in the VF pattern item");
1372 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1374 return rte_flow_error_set(error, rc,
1375 RTE_FLOW_ERROR_TYPE_ITEM, item,
1376 "Failed to convert the PF + VF IDs");
1379 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1381 return rte_flow_error_set(error, rc,
1382 RTE_FLOW_ERROR_TYPE_ITEM, item,
1383 "Failed to set MPORT for the PF + VF");
1386 ctx_mae->match_mport_set = B_TRUE;
1392 * Having this field ID in a field locator means that this
1393 * locator cannot be used to actually set the field at the
1394 * time when the corresponding item gets encountered. Such
1395 * fields get stashed in the parsing context instead. This
1396 * is required to resolve dependencies between the stashed
1397 * fields. See sfc_mae_rule_process_pattern_data().
1399 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1401 struct sfc_mae_field_locator {
1402 efx_mae_field_id_t field_id;
1404 /* Field offset in the corresponding rte_flow_item_ struct */
1409 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1410 unsigned int nb_field_locators, void *mask_ptr,
1415 memset(mask_ptr, 0, mask_size);
1417 for (i = 0; i < nb_field_locators; ++i) {
1418 const struct sfc_mae_field_locator *fl = &field_locators[i];
1420 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1421 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1426 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1427 unsigned int nb_field_locators, const uint8_t *spec,
1428 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1429 struct rte_flow_error *error)
1431 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1435 for (i = 0; i < nb_field_locators; ++i) {
1436 const struct sfc_mae_field_locator *fl = &field_locators[i];
1438 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1441 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1442 fremap[fl->field_id],
1443 fl->size, spec + fl->ofst,
1444 fl->size, mask + fl->ofst);
1450 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1451 NULL, "Failed to process item fields");
1457 static const struct sfc_mae_field_locator flocs_eth[] = {
1460 * This locator is used only for building supported fields mask.
1461 * The field is handled by sfc_mae_rule_process_pattern_data().
1463 SFC_MAE_FIELD_HANDLING_DEFERRED,
1464 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1465 offsetof(struct rte_flow_item_eth, type),
1468 EFX_MAE_FIELD_ETH_DADDR_BE,
1469 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1470 offsetof(struct rte_flow_item_eth, dst),
1473 EFX_MAE_FIELD_ETH_SADDR_BE,
1474 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1475 offsetof(struct rte_flow_item_eth, src),
1480 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1481 struct sfc_flow_parse_ctx *ctx,
1482 struct rte_flow_error *error)
1484 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1485 struct rte_flow_item_eth supp_mask;
1486 const uint8_t *spec = NULL;
1487 const uint8_t *mask = NULL;
1490 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1491 &supp_mask, sizeof(supp_mask));
1492 supp_mask.has_vlan = 1;
1494 rc = sfc_flow_parse_init(item,
1495 (const void **)&spec, (const void **)&mask,
1496 (const void *)&supp_mask,
1497 &rte_flow_item_eth_mask,
1498 sizeof(struct rte_flow_item_eth), error);
1503 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1504 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1505 const struct rte_flow_item_eth *item_spec;
1506 const struct rte_flow_item_eth *item_mask;
1508 item_spec = (const struct rte_flow_item_eth *)spec;
1509 item_mask = (const struct rte_flow_item_eth *)mask;
1512 * Remember various match criteria in the parsing context.
1513 * sfc_mae_rule_process_pattern_data() will consider them
1514 * altogether when the rest of the items have been parsed.
1516 ethertypes[0].value = item_spec->type;
1517 ethertypes[0].mask = item_mask->type;
1518 if (item_mask->has_vlan) {
1519 pdata->has_ovlan_mask = B_TRUE;
1520 if (item_spec->has_vlan)
1521 pdata->has_ovlan_value = B_TRUE;
1525 * The specification is empty. The overall pattern
1526 * validity will be enforced at the end of parsing.
1527 * See sfc_mae_rule_process_pattern_data().
1532 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1536 static const struct sfc_mae_field_locator flocs_vlan[] = {
1539 EFX_MAE_FIELD_VLAN0_TCI_BE,
1540 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1541 offsetof(struct rte_flow_item_vlan, tci),
1545 * This locator is used only for building supported fields mask.
1546 * The field is handled by sfc_mae_rule_process_pattern_data().
1548 SFC_MAE_FIELD_HANDLING_DEFERRED,
1549 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1550 offsetof(struct rte_flow_item_vlan, inner_type),
1555 EFX_MAE_FIELD_VLAN1_TCI_BE,
1556 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1557 offsetof(struct rte_flow_item_vlan, tci),
1561 * This locator is used only for building supported fields mask.
1562 * The field is handled by sfc_mae_rule_process_pattern_data().
1564 SFC_MAE_FIELD_HANDLING_DEFERRED,
1565 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1566 offsetof(struct rte_flow_item_vlan, inner_type),
1571 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1572 struct sfc_flow_parse_ctx *ctx,
1573 struct rte_flow_error *error)
1575 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1576 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1577 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1578 &pdata->has_ovlan_mask,
1579 &pdata->has_ivlan_mask,
1581 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1582 &pdata->has_ovlan_value,
1583 &pdata->has_ivlan_value,
1585 boolean_t *cur_tag_presence_bit_mp;
1586 boolean_t *cur_tag_presence_bit_vp;
1587 const struct sfc_mae_field_locator *flocs;
1588 struct rte_flow_item_vlan supp_mask;
1589 const uint8_t *spec = NULL;
1590 const uint8_t *mask = NULL;
1591 unsigned int nb_flocs;
1594 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1596 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1597 return rte_flow_error_set(error, ENOTSUP,
1598 RTE_FLOW_ERROR_TYPE_ITEM, item,
1599 "Can't match that many VLAN tags");
1602 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1603 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1605 if (*cur_tag_presence_bit_mp == B_TRUE &&
1606 *cur_tag_presence_bit_vp == B_FALSE) {
1607 return rte_flow_error_set(error, EINVAL,
1608 RTE_FLOW_ERROR_TYPE_ITEM, item,
1609 "The previous item enforces no (more) VLAN, "
1610 "so the current item (VLAN) must not exist");
1613 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1614 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1616 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1617 &supp_mask, sizeof(supp_mask));
1619 * This only means that the field is supported by the driver and libefx.
1620 * Support on NIC level will be checked when all items have been parsed.
1622 supp_mask.has_more_vlan = 1;
1624 rc = sfc_flow_parse_init(item,
1625 (const void **)&spec, (const void **)&mask,
1626 (const void *)&supp_mask,
1627 &rte_flow_item_vlan_mask,
1628 sizeof(struct rte_flow_item_vlan), error);
1633 struct sfc_mae_ethertype *et = pdata->ethertypes;
1634 const struct rte_flow_item_vlan *item_spec;
1635 const struct rte_flow_item_vlan *item_mask;
1637 item_spec = (const struct rte_flow_item_vlan *)spec;
1638 item_mask = (const struct rte_flow_item_vlan *)mask;
1641 * Remember various match criteria in the parsing context.
1642 * sfc_mae_rule_process_pattern_data() will consider them
1643 * altogether when the rest of the items have been parsed.
1645 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1646 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1647 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1648 if (item_mask->has_more_vlan) {
1649 if (pdata->nb_vlan_tags ==
1650 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1651 return rte_flow_error_set(error, ENOTSUP,
1652 RTE_FLOW_ERROR_TYPE_ITEM, item,
1653 "Can't use 'has_more_vlan' in "
1654 "the second item VLAN");
1656 pdata->has_ivlan_mask = B_TRUE;
1657 if (item_spec->has_more_vlan)
1658 pdata->has_ivlan_value = B_TRUE;
1661 /* Convert TCI to MAE representation right now. */
1662 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1668 ++(pdata->nb_vlan_tags);
1673 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1675 EFX_MAE_FIELD_SRC_IP4_BE,
1676 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1677 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1680 EFX_MAE_FIELD_DST_IP4_BE,
1681 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1682 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1686 * This locator is used only for building supported fields mask.
1687 * The field is handled by sfc_mae_rule_process_pattern_data().
1689 SFC_MAE_FIELD_HANDLING_DEFERRED,
1690 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1691 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1694 EFX_MAE_FIELD_IP_TOS,
1695 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1696 hdr.type_of_service),
1697 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1700 EFX_MAE_FIELD_IP_TTL,
1701 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1702 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1707 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1708 struct sfc_flow_parse_ctx *ctx,
1709 struct rte_flow_error *error)
1711 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1712 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1713 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1714 struct rte_flow_item_ipv4 supp_mask;
1715 const uint8_t *spec = NULL;
1716 const uint8_t *mask = NULL;
1719 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1720 &supp_mask, sizeof(supp_mask));
1722 rc = sfc_flow_parse_init(item,
1723 (const void **)&spec, (const void **)&mask,
1724 (const void *)&supp_mask,
1725 &rte_flow_item_ipv4_mask,
1726 sizeof(struct rte_flow_item_ipv4), error);
1730 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1731 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1734 const struct rte_flow_item_ipv4 *item_spec;
1735 const struct rte_flow_item_ipv4 *item_mask;
1737 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1738 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1740 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1741 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1746 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1750 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1752 EFX_MAE_FIELD_SRC_IP6_BE,
1753 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1754 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1757 EFX_MAE_FIELD_DST_IP6_BE,
1758 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1759 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1763 * This locator is used only for building supported fields mask.
1764 * The field is handled by sfc_mae_rule_process_pattern_data().
1766 SFC_MAE_FIELD_HANDLING_DEFERRED,
1767 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1768 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1771 EFX_MAE_FIELD_IP_TTL,
1772 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1773 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1778 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1779 struct sfc_flow_parse_ctx *ctx,
1780 struct rte_flow_error *error)
1782 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1783 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1784 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1785 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1786 struct rte_flow_item_ipv6 supp_mask;
1787 const uint8_t *spec = NULL;
1788 const uint8_t *mask = NULL;
1789 rte_be32_t vtc_flow_be;
1795 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1796 &supp_mask, sizeof(supp_mask));
1798 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1799 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1801 rc = sfc_flow_parse_init(item,
1802 (const void **)&spec, (const void **)&mask,
1803 (const void *)&supp_mask,
1804 &rte_flow_item_ipv6_mask,
1805 sizeof(struct rte_flow_item_ipv6), error);
1809 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1810 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1813 const struct rte_flow_item_ipv6 *item_spec;
1814 const struct rte_flow_item_ipv6 *item_mask;
1816 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1817 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1819 pdata->l3_next_proto_value = item_spec->hdr.proto;
1820 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1825 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1830 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1831 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1832 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1834 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1835 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1836 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1838 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1839 fremap[EFX_MAE_FIELD_IP_TOS],
1840 sizeof(tc_value), &tc_value,
1841 sizeof(tc_mask), &tc_mask);
1843 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1844 NULL, "Failed to process item fields");
1850 static const struct sfc_mae_field_locator flocs_tcp[] = {
1852 EFX_MAE_FIELD_L4_SPORT_BE,
1853 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1854 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1857 EFX_MAE_FIELD_L4_DPORT_BE,
1858 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1859 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1862 EFX_MAE_FIELD_TCP_FLAGS_BE,
1864 * The values have been picked intentionally since the
1865 * target MAE field is oversize (16 bit). This mapping
1866 * relies on the fact that the MAE field is big-endian.
1868 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1869 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1870 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1875 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1876 struct sfc_flow_parse_ctx *ctx,
1877 struct rte_flow_error *error)
1879 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1880 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1881 struct rte_flow_item_tcp supp_mask;
1882 const uint8_t *spec = NULL;
1883 const uint8_t *mask = NULL;
1887 * When encountered among outermost items, item TCP is invalid.
1888 * Check which match specification is being constructed now.
1890 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1891 return rte_flow_error_set(error, EINVAL,
1892 RTE_FLOW_ERROR_TYPE_ITEM, item,
1893 "TCP in outer frame is invalid");
1896 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1897 &supp_mask, sizeof(supp_mask));
1899 rc = sfc_flow_parse_init(item,
1900 (const void **)&spec, (const void **)&mask,
1901 (const void *)&supp_mask,
1902 &rte_flow_item_tcp_mask,
1903 sizeof(struct rte_flow_item_tcp), error);
1907 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1908 pdata->l3_next_proto_restriction_mask = 0xff;
1913 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1917 static const struct sfc_mae_field_locator flocs_udp[] = {
1919 EFX_MAE_FIELD_L4_SPORT_BE,
1920 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1921 offsetof(struct rte_flow_item_udp, hdr.src_port),
1924 EFX_MAE_FIELD_L4_DPORT_BE,
1925 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1926 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1931 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1932 struct sfc_flow_parse_ctx *ctx,
1933 struct rte_flow_error *error)
1935 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1936 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1937 struct rte_flow_item_udp supp_mask;
1938 const uint8_t *spec = NULL;
1939 const uint8_t *mask = NULL;
1942 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1943 &supp_mask, sizeof(supp_mask));
1945 rc = sfc_flow_parse_init(item,
1946 (const void **)&spec, (const void **)&mask,
1947 (const void *)&supp_mask,
1948 &rte_flow_item_udp_mask,
1949 sizeof(struct rte_flow_item_udp), error);
1953 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1954 pdata->l3_next_proto_restriction_mask = 0xff;
1959 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1963 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1966 * The size and offset values are relevant
1967 * for Geneve and NVGRE, too.
1969 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1970 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1975 * An auxiliary registry which allows using non-encap. field IDs
1976 * directly when building a match specification of type ACTION.
1978 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1980 static const efx_mae_field_id_t field_ids_no_remap[] = {
1981 #define FIELD_ID_NO_REMAP(_field) \
1982 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1984 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1985 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1986 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1987 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1988 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1989 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1990 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1991 FIELD_ID_NO_REMAP(SRC_IP4_BE),
1992 FIELD_ID_NO_REMAP(DST_IP4_BE),
1993 FIELD_ID_NO_REMAP(IP_PROTO),
1994 FIELD_ID_NO_REMAP(IP_TOS),
1995 FIELD_ID_NO_REMAP(IP_TTL),
1996 FIELD_ID_NO_REMAP(SRC_IP6_BE),
1997 FIELD_ID_NO_REMAP(DST_IP6_BE),
1998 FIELD_ID_NO_REMAP(L4_SPORT_BE),
1999 FIELD_ID_NO_REMAP(L4_DPORT_BE),
2000 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2001 FIELD_ID_NO_REMAP(HAS_OVLAN),
2002 FIELD_ID_NO_REMAP(HAS_IVLAN),
2004 #undef FIELD_ID_NO_REMAP
2008 * An auxiliary registry which allows using "ENC" field IDs
2009 * when building a match specification of type OUTER.
2011 * See sfc_mae_rule_encap_parse_init().
2013 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2014 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2015 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2017 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2018 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2019 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2020 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2021 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2022 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2023 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2024 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2025 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2026 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2027 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2028 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2029 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2030 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2031 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2032 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2033 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2034 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2036 #undef FIELD_ID_REMAP_TO_ENCAP
2040 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2041 struct sfc_flow_parse_ctx *ctx,
2042 struct rte_flow_error *error)
2044 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2045 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2046 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2047 const struct rte_flow_item_vxlan *vxp;
2048 uint8_t supp_mask[sizeof(uint64_t)];
2049 const uint8_t *spec = NULL;
2050 const uint8_t *mask = NULL;
2054 * We're about to start processing inner frame items.
2055 * Process pattern data that has been deferred so far
2056 * and reset pattern data storage.
2058 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2062 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2064 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2065 &supp_mask, sizeof(supp_mask));
2068 * This tunnel item was preliminarily detected by
2069 * sfc_mae_rule_encap_parse_init(). Default mask
2070 * was also picked by that helper. Use it here.
2072 rc = sfc_flow_parse_init(item,
2073 (const void **)&spec, (const void **)&mask,
2074 (const void *)&supp_mask,
2075 ctx_mae->tunnel_def_mask,
2076 ctx_mae->tunnel_def_mask_size, error);
2081 * This item and later ones comprise a
2082 * match specification of type ACTION.
2084 ctx_mae->match_spec = ctx_mae->match_spec_action;
2086 /* This item and later ones use non-encap. EFX MAE field IDs. */
2087 ctx_mae->field_ids_remap = field_ids_no_remap;
2093 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2094 * Copy 24-bit VNI, which is BE, at offset 1 in it.
2095 * The extra byte is 0 both in the mask and in the value.
2097 vxp = (const struct rte_flow_item_vxlan *)spec;
2098 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2100 vxp = (const struct rte_flow_item_vxlan *)mask;
2101 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2103 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2104 EFX_MAE_FIELD_ENC_VNET_ID_BE,
2105 sizeof(vnet_id_v), vnet_id_v,
2106 sizeof(vnet_id_m), vnet_id_m);
2108 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Failed to set VXLAN VNI");
2115 static const struct sfc_flow_item sfc_flow_items[] = {
2117 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2120 * In terms of RTE flow, this item is a META one,
2121 * and its position in the pattern is don't care.
2123 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2124 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2125 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2126 .parse = sfc_mae_rule_parse_item_port_id,
2129 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2132 * In terms of RTE flow, this item is a META one,
2133 * and its position in the pattern is don't care.
2135 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2136 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2137 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2138 .parse = sfc_mae_rule_parse_item_phy_port,
2141 .type = RTE_FLOW_ITEM_TYPE_PF,
2144 * In terms of RTE flow, this item is a META one,
2145 * and its position in the pattern is don't care.
2147 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2148 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2149 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2150 .parse = sfc_mae_rule_parse_item_pf,
2153 .type = RTE_FLOW_ITEM_TYPE_VF,
2156 * In terms of RTE flow, this item is a META one,
2157 * and its position in the pattern is don't care.
2159 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2160 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2161 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2162 .parse = sfc_mae_rule_parse_item_vf,
2165 .type = RTE_FLOW_ITEM_TYPE_ETH,
2167 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2168 .layer = SFC_FLOW_ITEM_L2,
2169 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2170 .parse = sfc_mae_rule_parse_item_eth,
2173 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2175 .prev_layer = SFC_FLOW_ITEM_L2,
2176 .layer = SFC_FLOW_ITEM_L2,
2177 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2178 .parse = sfc_mae_rule_parse_item_vlan,
2181 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2183 .prev_layer = SFC_FLOW_ITEM_L2,
2184 .layer = SFC_FLOW_ITEM_L3,
2185 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2186 .parse = sfc_mae_rule_parse_item_ipv4,
2189 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2191 .prev_layer = SFC_FLOW_ITEM_L2,
2192 .layer = SFC_FLOW_ITEM_L3,
2193 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2194 .parse = sfc_mae_rule_parse_item_ipv6,
2197 .type = RTE_FLOW_ITEM_TYPE_TCP,
2199 .prev_layer = SFC_FLOW_ITEM_L3,
2200 .layer = SFC_FLOW_ITEM_L4,
2201 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2202 .parse = sfc_mae_rule_parse_item_tcp,
2205 .type = RTE_FLOW_ITEM_TYPE_UDP,
2207 .prev_layer = SFC_FLOW_ITEM_L3,
2208 .layer = SFC_FLOW_ITEM_L4,
2209 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2210 .parse = sfc_mae_rule_parse_item_udp,
2213 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2215 .prev_layer = SFC_FLOW_ITEM_L4,
2216 .layer = SFC_FLOW_ITEM_START_LAYER,
2217 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2218 .parse = sfc_mae_rule_parse_item_tunnel,
2221 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2223 .prev_layer = SFC_FLOW_ITEM_L4,
2224 .layer = SFC_FLOW_ITEM_START_LAYER,
2225 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2226 .parse = sfc_mae_rule_parse_item_tunnel,
2229 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2231 .prev_layer = SFC_FLOW_ITEM_L3,
2232 .layer = SFC_FLOW_ITEM_START_LAYER,
2233 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2234 .parse = sfc_mae_rule_parse_item_tunnel,
2239 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2240 struct sfc_mae_parse_ctx *ctx,
2241 struct sfc_mae_outer_rule **rulep,
2242 struct rte_flow_error *error)
2244 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2247 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2252 SFC_ASSERT(ctx->match_spec_outer != NULL);
2254 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2255 return rte_flow_error_set(error, ENOTSUP,
2256 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2257 "Inconsistent pattern (outer)");
2260 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2262 if (*rulep != NULL) {
2263 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2265 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2266 ctx->encap_type, rulep);
2268 return rte_flow_error_set(error, rc,
2269 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2270 "Failed to process the pattern");
2274 /* The spec has now been tracked by the outer rule entry. */
2275 ctx->match_spec_outer = NULL;
2279 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2280 * inner parse (when some outer rule is hit) and action rule lookup.
2281 * If the currently processed flow does not come with an outer rule,
2282 * its action rule must be available only for packets which miss in
2283 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2284 * in the action rule specification; this ensures correct behaviour.
2286 * If, on the other hand, this flow does have an outer rule, its ID
2287 * may be unknown at the moment (not yet allocated), but OR_ID mask
2288 * has to be set to 0xffffffff anyway for correct class comparisons.
2289 * When the outer rule has been allocated, this match field will be
2290 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2292 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2296 sfc_mae_outer_rule_del(sa, *rulep);
2300 return rte_flow_error_set(error, rc,
2301 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2302 "Failed to process the pattern");
2309 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2310 const struct rte_flow_item pattern[],
2311 struct sfc_mae_parse_ctx *ctx,
2312 struct rte_flow_error *error)
2314 struct sfc_mae *mae = &sa->mae;
2317 if (pattern == NULL) {
2318 rte_flow_error_set(error, EINVAL,
2319 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2325 switch (pattern->type) {
2326 case RTE_FLOW_ITEM_TYPE_VXLAN:
2327 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2328 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2329 ctx->tunnel_def_mask_size =
2330 sizeof(rte_flow_item_vxlan_mask);
2332 case RTE_FLOW_ITEM_TYPE_GENEVE:
2333 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2334 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2335 ctx->tunnel_def_mask_size =
2336 sizeof(rte_flow_item_geneve_mask);
2338 case RTE_FLOW_ITEM_TYPE_NVGRE:
2339 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2340 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2341 ctx->tunnel_def_mask_size =
2342 sizeof(rte_flow_item_nvgre_mask);
2344 case RTE_FLOW_ITEM_TYPE_END:
2354 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2357 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2358 return rte_flow_error_set(error, ENOTSUP,
2359 RTE_FLOW_ERROR_TYPE_ITEM,
2360 pattern, "Unsupported tunnel item");
2363 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2364 return rte_flow_error_set(error, ENOTSUP,
2365 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2366 NULL, "Unsupported priority level");
2369 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
2370 &ctx->match_spec_outer);
2372 return rte_flow_error_set(error, rc,
2373 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
2374 "Failed to initialise outer rule match specification");
2377 /* Outermost items comprise a match specification of type OUTER. */
2378 ctx->match_spec = ctx->match_spec_outer;
2380 /* Outermost items use "ENC" EFX MAE field IDs. */
2381 ctx->field_ids_remap = field_ids_remap_to_encap;
2387 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2388 struct sfc_mae_parse_ctx *ctx)
2390 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2393 if (ctx->match_spec_outer != NULL)
2394 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2398 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2399 const struct rte_flow_item pattern[],
2400 struct sfc_flow_spec_mae *spec,
2401 struct rte_flow_error *error)
2403 struct sfc_mae_parse_ctx ctx_mae;
2404 struct sfc_flow_parse_ctx ctx;
2407 memset(&ctx_mae, 0, sizeof(ctx_mae));
2408 ctx_mae.priority = spec->priority;
2411 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2413 &ctx_mae.match_spec_action);
2415 rc = rte_flow_error_set(error, rc,
2416 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2417 "Failed to initialise action rule match specification");
2418 goto fail_init_match_spec_action;
2422 * As a preliminary setting, assume that there is no encapsulation
2423 * in the pattern. That is, pattern items are about to comprise a
2424 * match specification of type ACTION and use non-encap. field IDs.
2426 * sfc_mae_rule_encap_parse_init() below may override this.
2428 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2429 ctx_mae.match_spec = ctx_mae.match_spec_action;
2430 ctx_mae.field_ids_remap = field_ids_no_remap;
2432 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2435 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2437 goto fail_encap_parse_init;
2439 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2440 pattern, &ctx, error);
2442 goto fail_parse_pattern;
2444 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2446 goto fail_process_pattern_data;
2448 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2450 goto fail_process_outer;
2452 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2453 rc = rte_flow_error_set(error, ENOTSUP,
2454 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2455 "Inconsistent pattern");
2456 goto fail_validate_match_spec_action;
2459 spec->match_spec = ctx_mae.match_spec_action;
2463 fail_validate_match_spec_action:
2465 fail_process_pattern_data:
2467 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2469 fail_encap_parse_init:
2470 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2472 fail_init_match_spec_action:
2477 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2478 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2479 * That is, related RTE flow actions need to be tracked as parts of a whole
2480 * so that they can be combined into a single action and submitted to MAE
2481 * representation of a given rule's action set.
2483 * Each RTE flow action provided by an application gets classified as
2484 * one belonging to some bundle type. If an action is not supposed to
2485 * belong to any bundle, or if this action is END, it is described as
2486 * one belonging to a dummy bundle of type EMPTY.
2488 * A currently tracked bundle will be submitted if a repeating
2489 * action or an action of different bundle type follows.
2492 enum sfc_mae_actions_bundle_type {
2493 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2494 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2497 struct sfc_mae_actions_bundle {
2498 enum sfc_mae_actions_bundle_type type;
2500 /* Indicates actions already tracked by the current bundle */
2501 uint64_t actions_mask;
2503 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2504 rte_be16_t vlan_push_tpid;
2505 rte_be16_t vlan_push_tci;
2509 * Combine configuration of RTE flow actions tracked by the bundle into a
2510 * single action and submit the result to MAE action set specification.
2511 * Do nothing in the case of dummy action bundle.
2514 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2515 efx_mae_actions_t *spec)
2519 switch (bundle->type) {
2520 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2522 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2523 rc = efx_mae_action_set_populate_vlan_push(
2524 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2527 SFC_ASSERT(B_FALSE);
2535 * Given the type of the next RTE flow action in the line, decide
2536 * whether a new bundle is about to start, and, if this is the case,
2537 * submit and reset the current bundle.
2540 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2541 struct sfc_mae_actions_bundle *bundle,
2542 efx_mae_actions_t *spec,
2543 struct rte_flow_error *error)
2545 enum sfc_mae_actions_bundle_type bundle_type_new;
2548 switch (action->type) {
2549 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2550 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2551 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2552 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2556 * Self-sufficient actions, including END, are handled in this
2557 * case. No checks for unsupported actions are needed here
2558 * because parsing doesn't occur at this point.
2560 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2564 if (bundle_type_new != bundle->type ||
2565 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2566 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2570 memset(bundle, 0, sizeof(*bundle));
2573 bundle->type = bundle_type_new;
2578 return rte_flow_error_set(error, rc,
2579 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2580 "Failed to request the (group of) action(s)");
2584 sfc_mae_rule_parse_action_of_push_vlan(
2585 const struct rte_flow_action_of_push_vlan *conf,
2586 struct sfc_mae_actions_bundle *bundle)
2588 bundle->vlan_push_tpid = conf->ethertype;
2592 sfc_mae_rule_parse_action_of_set_vlan_vid(
2593 const struct rte_flow_action_of_set_vlan_vid *conf,
2594 struct sfc_mae_actions_bundle *bundle)
2596 bundle->vlan_push_tci |= (conf->vlan_vid &
2597 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2601 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2602 const struct rte_flow_action_of_set_vlan_pcp *conf,
2603 struct sfc_mae_actions_bundle *bundle)
2605 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2606 RTE_LEN2MASK(3, uint8_t)) << 13;
2608 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2611 struct sfc_mae_parsed_item {
2612 const struct rte_flow_item *item;
2613 size_t proto_header_ofst;
2614 size_t proto_header_size;
2618 * For each 16-bit word of the given header, override
2619 * bits enforced by the corresponding 16-bit mask.
2622 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2623 const struct sfc_mae_parsed_item *parsed_items,
2624 unsigned int nb_parsed_items)
2626 unsigned int item_idx;
2628 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2629 const struct sfc_mae_parsed_item *parsed_item;
2630 const struct rte_flow_item *item;
2631 size_t proto_header_size;
2634 parsed_item = &parsed_items[item_idx];
2635 proto_header_size = parsed_item->proto_header_size;
2636 item = parsed_item->item;
2638 for (ofst = 0; ofst < proto_header_size;
2639 ofst += sizeof(rte_be16_t)) {
2640 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2641 const rte_be16_t *w_maskp;
2642 const rte_be16_t *w_specp;
2644 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2645 w_specp = RTE_PTR_ADD(item->spec, ofst);
2648 *wp |= (*w_specp & *w_maskp);
2651 header_buf += proto_header_size;
2655 #define SFC_IPV4_TTL_DEF 0x40
2656 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
2657 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2658 #define SFC_VXLAN_FLAGS_DEF 0x08000000
2661 sfc_mae_rule_parse_action_vxlan_encap(
2662 struct sfc_mae *mae,
2663 const struct rte_flow_action_vxlan_encap *conf,
2664 efx_mae_actions_t *spec,
2665 struct rte_flow_error *error)
2667 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2668 struct rte_flow_item *pattern = conf->definition;
2669 uint8_t *buf = bounce_eh->buf;
2671 /* This array will keep track of non-VOID pattern items. */
2672 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2674 1 /* IPv4 or IPv6 */ +
2677 unsigned int nb_parsed_items = 0;
2679 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2680 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2681 sizeof(struct rte_ipv6_hdr))];
2682 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2683 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2684 struct rte_vxlan_hdr *vxlan = NULL;
2685 struct rte_udp_hdr *udp = NULL;
2686 unsigned int nb_vlan_tags = 0;
2687 size_t next_proto_ofst = 0;
2688 size_t ethertype_ofst = 0;
2692 if (pattern == NULL) {
2693 return rte_flow_error_set(error, EINVAL,
2694 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2695 "The encap. header definition is NULL");
2698 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2699 bounce_eh->size = 0;
2702 * Process pattern items and remember non-VOID ones.
2703 * Defer applying masks until after the complete header
2704 * has been built from the pattern items.
2706 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2708 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2709 struct sfc_mae_parsed_item *parsed_item;
2710 const uint64_t exp_items_extra_vlan[] = {
2711 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2713 size_t proto_header_size;
2714 rte_be16_t *ethertypep;
2715 uint8_t *next_protop;
2718 if (pattern->spec == NULL) {
2719 return rte_flow_error_set(error, EINVAL,
2720 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2721 "NULL item spec in the encap. header");
2724 if (pattern->mask == NULL) {
2725 return rte_flow_error_set(error, EINVAL,
2726 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2727 "NULL item mask in the encap. header");
2730 if (pattern->last != NULL) {
2731 /* This is not a match pattern, so disallow range. */
2732 return rte_flow_error_set(error, EINVAL,
2733 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2734 "Range item in the encap. header");
2737 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2738 /* Handle VOID separately, for clarity. */
2742 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2743 return rte_flow_error_set(error, ENOTSUP,
2744 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2745 "Unexpected item in the encap. header");
2748 parsed_item = &parsed_items[nb_parsed_items];
2749 buf_cur = buf + bounce_eh->size;
2751 switch (pattern->type) {
2752 case RTE_FLOW_ITEM_TYPE_ETH:
2753 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2755 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2758 proto_header_size = sizeof(struct rte_ether_hdr);
2760 ethertype_ofst = eth_ethertype_ofst;
2762 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2763 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2764 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2766 case RTE_FLOW_ITEM_TYPE_VLAN:
2767 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2769 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2772 proto_header_size = sizeof(struct rte_vlan_hdr);
2774 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2775 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2777 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2778 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2782 offsetof(struct rte_vlan_hdr, eth_proto);
2784 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2785 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2786 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2790 case RTE_FLOW_ITEM_TYPE_IPV4:
2791 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2793 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2796 proto_header_size = sizeof(struct rte_ipv4_hdr);
2798 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2799 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2803 offsetof(struct rte_ipv4_hdr, next_proto_id);
2805 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2807 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2809 case RTE_FLOW_ITEM_TYPE_IPV6:
2810 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2812 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2815 proto_header_size = sizeof(struct rte_ipv6_hdr);
2817 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2818 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2820 next_proto_ofst = bounce_eh->size +
2821 offsetof(struct rte_ipv6_hdr, proto);
2823 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2825 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2827 case RTE_FLOW_ITEM_TYPE_UDP:
2828 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2830 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2833 proto_header_size = sizeof(struct rte_udp_hdr);
2835 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2836 *next_protop = IPPROTO_UDP;
2838 udp = (struct rte_udp_hdr *)buf_cur;
2840 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2842 case RTE_FLOW_ITEM_TYPE_VXLAN:
2843 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2845 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2848 proto_header_size = sizeof(struct rte_vxlan_hdr);
2850 vxlan = (struct rte_vxlan_hdr *)buf_cur;
2852 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2853 udp->dgram_len = RTE_BE16(sizeof(*udp) +
2855 udp->dgram_cksum = 0;
2860 return rte_flow_error_set(error, ENOTSUP,
2861 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2862 "Unknown item in the encap. header");
2865 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2866 return rte_flow_error_set(error, E2BIG,
2867 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2868 "The encap. header is too big");
2871 if ((proto_header_size & 1) != 0) {
2872 return rte_flow_error_set(error, EINVAL,
2873 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2874 "Odd layer size in the encap. header");
2877 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2878 bounce_eh->size += proto_header_size;
2880 parsed_item->item = pattern;
2881 parsed_item->proto_header_size = proto_header_size;
2885 if (exp_items != 0) {
2886 /* Parsing item VXLAN would have reset exp_items to 0. */
2887 return rte_flow_error_set(error, ENOTSUP,
2888 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2889 "No item VXLAN in the encap. header");
2892 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2893 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2894 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2895 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2897 /* The HW cannot compute this checksum. */
2898 ipv4->hdr_checksum = 0;
2899 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2901 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2902 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2903 ipv6->payload_len = udp->dgram_len;
2905 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2907 /* Take care of the masks. */
2908 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2910 rc = efx_mae_action_set_populate_encap(spec);
2912 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2913 NULL, "failed to request action ENCAP");
2920 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
2921 const struct rte_flow_action_mark *conf,
2922 efx_mae_actions_t *spec)
2926 rc = efx_mae_action_set_populate_mark(spec, conf->id);
2928 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
2934 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
2935 const struct rte_flow_action_count *conf
2937 efx_mae_actions_t *spec)
2941 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
2943 "counter queue is not configured for COUNT action");
2945 goto fail_counter_queue_uninit;
2948 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
2950 goto fail_no_service_core;
2953 rc = efx_mae_action_set_populate_count(spec);
2956 "failed to populate counters in MAE action set: %s",
2958 goto fail_populate_count;
2963 fail_populate_count:
2964 fail_no_service_core:
2965 fail_counter_queue_uninit:
2971 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2972 const struct rte_flow_action_phy_port *conf,
2973 efx_mae_actions_t *spec)
2975 efx_mport_sel_t mport;
2979 if (conf->original != 0)
2980 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2982 phy_port = conf->index;
2984 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2986 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
2987 phy_port, strerror(rc));
2991 rc = efx_mae_action_set_populate_deliver(spec, &mport);
2993 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
2994 mport.sel, strerror(rc));
3001 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3002 const struct rte_flow_action_vf *vf_conf,
3003 efx_mae_actions_t *spec)
3005 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3006 efx_mport_sel_t mport;
3010 if (vf_conf == NULL)
3011 vf = EFX_PCI_VF_INVALID;
3012 else if (vf_conf->original != 0)
3017 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3019 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3020 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3025 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3027 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3028 mport.sel, strerror(rc));
3035 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3036 const struct rte_flow_action_port_id *conf,
3037 efx_mae_actions_t *spec)
3039 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3040 struct sfc_mae *mae = &sa->mae;
3041 efx_mport_sel_t mport;
3045 if (conf->id > UINT16_MAX)
3048 port_id = (conf->original != 0) ? sas->port_id : conf->id;
3050 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
3053 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
3054 port_id, strerror(rc));
3058 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3060 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3061 mport.sel, strerror(rc));
3067 static const char * const action_names[] = {
3068 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3069 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3070 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3071 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3072 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3073 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3074 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3075 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3076 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3077 [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3078 [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3079 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3080 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3084 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3085 const struct rte_flow_action *action,
3086 const struct sfc_mae_outer_rule *outer_rule,
3087 struct sfc_mae_actions_bundle *bundle,
3088 efx_mae_actions_t *spec,
3089 struct rte_flow_error *error)
3091 bool custom_error = B_FALSE;
3094 switch (action->type) {
3095 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3096 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3097 bundle->actions_mask);
3098 if (outer_rule == NULL ||
3099 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3102 rc = efx_mae_action_set_populate_decap(spec);
3104 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3105 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3106 bundle->actions_mask);
3107 rc = efx_mae_action_set_populate_vlan_pop(spec);
3109 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3110 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3111 bundle->actions_mask);
3112 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3114 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3115 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3116 bundle->actions_mask);
3117 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3119 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3120 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3121 bundle->actions_mask);
3122 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3124 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3125 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3126 bundle->actions_mask);
3127 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3130 custom_error = B_TRUE;
3132 case RTE_FLOW_ACTION_TYPE_COUNT:
3133 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3134 bundle->actions_mask);
3135 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3137 case RTE_FLOW_ACTION_TYPE_FLAG:
3138 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3139 bundle->actions_mask);
3140 rc = efx_mae_action_set_populate_flag(spec);
3142 case RTE_FLOW_ACTION_TYPE_MARK:
3143 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3144 bundle->actions_mask);
3145 rc = sfc_mae_rule_parse_action_mark(sa, action->conf, spec);
3147 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3148 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3149 bundle->actions_mask);
3150 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3152 case RTE_FLOW_ACTION_TYPE_PF:
3153 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3154 bundle->actions_mask);
3155 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3157 case RTE_FLOW_ACTION_TYPE_VF:
3158 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3159 bundle->actions_mask);
3160 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3162 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3163 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3164 bundle->actions_mask);
3165 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3167 case RTE_FLOW_ACTION_TYPE_DROP:
3168 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3169 bundle->actions_mask);
3170 rc = efx_mae_action_set_populate_drop(spec);
3173 return rte_flow_error_set(error, ENOTSUP,
3174 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3175 "Unsupported action");
3179 bundle->actions_mask |= (1ULL << action->type);
3180 } else if (!custom_error) {
3181 if (action->type < RTE_DIM(action_names)) {
3182 const char *action_name = action_names[action->type];
3184 if (action_name != NULL) {
3185 sfc_err(sa, "action %s was rejected: %s",
3186 action_name, strerror(rc));
3189 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3190 NULL, "Failed to request the action");
3197 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3199 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3203 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3204 const struct sfc_mae_bounce_eh *bounce_eh,
3205 struct sfc_mae_encap_header **encap_headerp)
3207 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3208 encap_headerp = NULL;
3212 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3213 if (*encap_headerp != NULL)
3216 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3220 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3221 const struct rte_flow_action actions[],
3222 struct sfc_flow_spec_mae *spec_mae,
3223 struct rte_flow_error *error)
3225 struct sfc_mae_encap_header *encap_header = NULL;
3226 struct sfc_mae_actions_bundle bundle = {0};
3227 const struct rte_flow_action *action;
3228 struct sfc_mae *mae = &sa->mae;
3229 efx_mae_actions_t *spec;
3230 unsigned int n_count;
3235 if (actions == NULL) {
3236 return rte_flow_error_set(error, EINVAL,
3237 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3241 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3243 goto fail_action_set_spec_init;
3245 /* Cleanup after previous encap. header bounce buffer usage. */
3246 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3248 for (action = actions;
3249 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3250 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3252 goto fail_rule_parse_action;
3254 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
3255 &bundle, spec, error);
3257 goto fail_rule_parse_action;
3260 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3262 goto fail_rule_parse_action;
3264 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3266 goto fail_process_encap_header;
3268 n_count = efx_mae_action_set_get_nb_count(spec);
3271 sfc_err(sa, "too many count actions requested: %u", n_count);
3275 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3277 if (spec_mae->action_set != NULL) {
3278 sfc_mae_encap_header_del(sa, encap_header);
3279 efx_mae_action_set_spec_fini(sa->nic, spec);
3283 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
3284 &spec_mae->action_set);
3286 goto fail_action_set_add;
3290 fail_action_set_add:
3292 sfc_mae_encap_header_del(sa, encap_header);
3294 fail_process_encap_header:
3295 fail_rule_parse_action:
3296 efx_mae_action_set_spec_fini(sa->nic, spec);
3298 fail_action_set_spec_init:
3299 if (rc > 0 && rte_errno == 0) {
3300 rc = rte_flow_error_set(error, rc,
3301 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3302 NULL, "Failed to process the action");
3308 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3309 const efx_mae_match_spec_t *left,
3310 const efx_mae_match_spec_t *right)
3312 bool have_same_class;
3315 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3318 return (rc == 0) ? have_same_class : false;
3322 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3323 struct sfc_mae_outer_rule *rule)
3325 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3326 struct sfc_mae_outer_rule *entry;
3327 struct sfc_mae *mae = &sa->mae;
3329 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3330 /* An active rule is reused. It's class is wittingly valid. */
3334 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3335 sfc_mae_outer_rules, entries) {
3336 const efx_mae_match_spec_t *left = entry->match_spec;
3337 const efx_mae_match_spec_t *right = rule->match_spec;
3342 if (sfc_mae_rules_class_cmp(sa, left, right))
3346 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3347 "support for outer frame pattern items is not guaranteed; "
3348 "other than that, the items are valid from SW standpoint");
3353 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3354 struct sfc_flow_spec_mae *spec)
3356 const struct rte_flow *entry;
3358 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3359 const struct sfc_flow_spec *entry_spec = &entry->spec;
3360 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3361 const efx_mae_match_spec_t *left = es_mae->match_spec;
3362 const efx_mae_match_spec_t *right = spec->match_spec;
3364 switch (entry_spec->type) {
3365 case SFC_FLOW_SPEC_FILTER:
3366 /* Ignore VNIC-level flows */
3368 case SFC_FLOW_SPEC_MAE:
3369 if (sfc_mae_rules_class_cmp(sa, left, right))
3377 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3378 "support for inner frame pattern items is not guaranteed; "
3379 "other than that, the items are valid from SW standpoint");
3384 * Confirm that a given flow can be accepted by the FW.
3387 * Software adapter context
3389 * Flow to be verified
3391 * Zero on success and non-zero in the case of error.
3392 * A special value of EAGAIN indicates that the adapter is
3393 * not in started state. This state is compulsory because
3394 * it only makes sense to compare the rule class of the flow
3395 * being validated with classes of the active rules.
3396 * Such classes are wittingly supported by the FW.
3399 sfc_mae_flow_verify(struct sfc_adapter *sa,
3400 struct rte_flow *flow)
3402 struct sfc_flow_spec *spec = &flow->spec;
3403 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3404 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3407 SFC_ASSERT(sfc_adapter_is_locked(sa));
3409 if (sa->state != SFC_ETHDEV_STARTED)
3412 if (outer_rule != NULL) {
3413 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3418 return sfc_mae_action_rule_class_verify(sa, spec_mae);
3422 sfc_mae_flow_insert(struct sfc_adapter *sa,
3423 struct rte_flow *flow)
3425 struct sfc_flow_spec *spec = &flow->spec;
3426 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3427 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3428 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3429 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
3432 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3433 SFC_ASSERT(action_set != NULL);
3435 if (outer_rule != NULL) {
3436 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3437 spec_mae->match_spec);
3439 goto fail_outer_rule_enable;
3442 rc = sfc_mae_action_set_enable(sa, action_set);
3444 goto fail_action_set_enable;
3446 if (action_set->n_counters > 0) {
3447 rc = sfc_mae_counter_start(sa);
3449 sfc_err(sa, "failed to start MAE counters support: %s",
3451 goto fail_mae_counter_start;
3455 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3456 NULL, &fw_rsrc->aset_id,
3457 &spec_mae->rule_id);
3459 goto fail_action_rule_insert;
3461 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3462 flow, spec_mae->rule_id.id);
3466 fail_action_rule_insert:
3467 fail_mae_counter_start:
3468 sfc_mae_action_set_disable(sa, action_set);
3470 fail_action_set_enable:
3471 if (outer_rule != NULL)
3472 sfc_mae_outer_rule_disable(sa, outer_rule);
3474 fail_outer_rule_enable:
3479 sfc_mae_flow_remove(struct sfc_adapter *sa,
3480 struct rte_flow *flow)
3482 struct sfc_flow_spec *spec = &flow->spec;
3483 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3484 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3485 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3488 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3489 SFC_ASSERT(action_set != NULL);
3491 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3493 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3494 flow, spec_mae->rule_id.id, strerror(rc));
3496 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3497 flow, spec_mae->rule_id.id);
3498 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3500 sfc_mae_action_set_disable(sa, action_set);
3502 if (outer_rule != NULL)
3503 sfc_mae_outer_rule_disable(sa, outer_rule);
3509 sfc_mae_query_counter(struct sfc_adapter *sa,
3510 struct sfc_flow_spec_mae *spec,
3511 const struct rte_flow_action *action,
3512 struct rte_flow_query_count *data,
3513 struct rte_flow_error *error)
3515 struct sfc_mae_action_set *action_set = spec->action_set;
3516 const struct rte_flow_action_count *conf = action->conf;
3520 if (action_set->n_counters == 0) {
3521 return rte_flow_error_set(error, EINVAL,
3522 RTE_FLOW_ERROR_TYPE_ACTION, action,
3523 "Queried flow rule does not have count actions");
3526 for (i = 0; i < action_set->n_counters; i++) {
3528 * Get the first available counter of the flow rule if
3529 * counter ID is not specified.
3531 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3534 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3535 &action_set->counters[i], data);
3537 return rte_flow_error_set(error, EINVAL,
3538 RTE_FLOW_ERROR_TYPE_ACTION, action,
3539 "Queried flow rule counter action is invalid");
3545 return rte_flow_error_set(error, ENOENT,
3546 RTE_FLOW_ERROR_TYPE_ACTION, action,
3547 "No such flow rule action count ID");
3551 sfc_mae_flow_query(struct rte_eth_dev *dev,
3552 struct rte_flow *flow,
3553 const struct rte_flow_action *action,
3555 struct rte_flow_error *error)
3557 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3558 struct sfc_flow_spec *spec = &flow->spec;
3559 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3561 switch (action->type) {
3562 case RTE_FLOW_ACTION_TYPE_COUNT:
3563 return sfc_mae_query_counter(sa, spec_mae, action,
3566 return rte_flow_error_set(error, ENOTSUP,
3567 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3568 "Query for action of this type is not supported");
3573 sfc_mae_switchdev_init(struct sfc_adapter *sa)
3575 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3576 struct sfc_mae *mae = &sa->mae;
3578 efx_mport_sel_t phy;
3581 sfc_log_init(sa, "entry");
3583 if (!sa->switchdev) {
3584 sfc_log_init(sa, "switchdev is not enabled - skip");
3588 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
3590 sfc_err(sa, "failed to init switchdev - no MAE support");
3594 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
3597 sfc_err(sa, "failed get PF mport");
3601 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
3603 sfc_err(sa, "failed get PHY mport");
3607 rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
3608 SFC_MAE_RULE_PRIO_LOWEST,
3609 &mae->switchdev_rule_pf_to_ext);
3611 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
3615 rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
3616 SFC_MAE_RULE_PRIO_LOWEST,
3617 &mae->switchdev_rule_ext_to_pf);
3619 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
3623 sfc_log_init(sa, "done");
3628 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3634 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
3639 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
3641 struct sfc_mae *mae = &sa->mae;
3646 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3647 sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);