1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
26 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
27 efx_mport_sel_t *mportp)
29 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
31 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
36 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
37 uint32_t nb_counters_max)
39 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
43 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
45 sfc_mae_counters_fini(®istry->counters);
49 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
50 struct sfc_mae_rule **rule)
52 struct sfc_mae *mae = &sa->mae;
53 struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
57 for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
58 if (internal_rules->rules[entry].spec == NULL)
62 if (entry == SFC_MAE_NB_RULES_MAX) {
64 sfc_err(sa, "failed too many rules (%u rules used)", entry);
65 goto fail_too_many_rules;
68 *rule = &internal_rules->rules[entry];
77 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
78 const efx_mport_sel_t *mport_match,
79 const efx_mport_sel_t *mport_deliver,
80 int prio, struct sfc_mae_rule **rulep)
82 struct sfc_mae *mae = &sa->mae;
83 struct sfc_mae_rule *rule;
86 sfc_log_init(sa, "entry");
88 if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
90 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
91 mae->nb_action_rule_prios_max);
92 goto fail_invalid_prio;
95 prio = mae->nb_action_rule_prios_max - 1;
97 rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
99 goto fail_find_empty_slot;
101 sfc_log_init(sa, "init MAE match spec");
102 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
103 (uint32_t)prio, &rule->spec);
105 sfc_err(sa, "failed to init MAE match spec");
106 goto fail_match_init;
109 rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
111 sfc_err(sa, "failed to get MAE match mport selector");
115 rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
117 sfc_err(sa, "failed to init MAE action set");
118 goto fail_action_init;
121 rc = efx_mae_action_set_populate_deliver(rule->actions,
124 sfc_err(sa, "failed to populate deliver action");
125 goto fail_populate_deliver;
128 rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
131 sfc_err(sa, "failed to allocate action set");
132 goto fail_action_set_alloc;
135 rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
139 sfc_err(sa, "failed to insert action rule");
140 goto fail_rule_insert;
145 sfc_log_init(sa, "done");
150 efx_mae_action_set_free(sa->nic, &rule->action_set);
152 fail_action_set_alloc:
153 fail_populate_deliver:
154 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
158 efx_mae_match_spec_fini(sa->nic, rule->spec);
161 fail_find_empty_slot:
163 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
168 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
170 if (rule == NULL || rule->spec == NULL)
173 efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
174 efx_mae_action_set_free(sa->nic, &rule->action_set);
175 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
176 efx_mae_match_spec_fini(sa->nic, rule->spec);
182 sfc_mae_attach(struct sfc_adapter *sa)
184 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
185 struct sfc_mae_switch_port_request switch_port_request = {0};
186 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
187 efx_mport_sel_t entity_mport;
188 struct sfc_mae *mae = &sa->mae;
189 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
190 efx_mae_limits_t limits;
193 sfc_log_init(sa, "entry");
195 if (!encp->enc_mae_supported) {
196 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
200 sfc_log_init(sa, "init MAE");
201 rc = efx_mae_init(sa->nic);
205 sfc_log_init(sa, "get MAE limits");
206 rc = efx_mae_get_limits(sa->nic, &limits);
208 goto fail_mae_get_limits;
210 sfc_log_init(sa, "init MAE counter registry");
211 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
212 limits.eml_max_n_counters);
214 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
215 limits.eml_max_n_counters, rte_strerror(rc));
216 goto fail_counter_registry_init;
219 sfc_log_init(sa, "assign entity MPORT");
220 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
222 goto fail_mae_assign_entity_mport;
224 sfc_log_init(sa, "assign RTE switch domain");
225 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
227 goto fail_mae_assign_switch_domain;
229 sfc_log_init(sa, "assign RTE switch port");
230 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
231 switch_port_request.entity_mportp = &entity_mport;
232 /* RTE ethdev MPORT matches that of the entity for independent ports. */
233 switch_port_request.ethdev_mportp = &entity_mport;
234 switch_port_request.ethdev_port_id = sas->port_id;
235 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
236 &switch_port_request,
237 &mae->switch_port_id);
239 goto fail_mae_assign_switch_port;
241 sfc_log_init(sa, "allocate encap. header bounce buffer");
242 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
243 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
244 bounce_eh->buf_size, 0);
245 if (bounce_eh->buf == NULL)
246 goto fail_mae_alloc_bounce_eh;
248 mae->status = SFC_MAE_STATUS_SUPPORTED;
249 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
250 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
251 mae->encap_types_supported = limits.eml_encap_types_supported;
252 TAILQ_INIT(&mae->outer_rules);
253 TAILQ_INIT(&mae->encap_headers);
254 TAILQ_INIT(&mae->action_sets);
256 sfc_log_init(sa, "done");
260 fail_mae_alloc_bounce_eh:
261 fail_mae_assign_switch_port:
262 fail_mae_assign_switch_domain:
263 fail_mae_assign_entity_mport:
264 sfc_mae_counter_registry_fini(&mae->counter_registry);
266 fail_counter_registry_init:
268 efx_mae_fini(sa->nic);
271 sfc_log_init(sa, "failed %d", rc);
277 sfc_mae_detach(struct sfc_adapter *sa)
279 struct sfc_mae *mae = &sa->mae;
280 enum sfc_mae_status status_prev = mae->status;
282 sfc_log_init(sa, "entry");
284 mae->nb_action_rule_prios_max = 0;
285 mae->status = SFC_MAE_STATUS_UNKNOWN;
287 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
290 rte_free(mae->bounce_eh.buf);
291 sfc_mae_counter_registry_fini(&mae->counter_registry);
293 efx_mae_fini(sa->nic);
295 sfc_log_init(sa, "done");
298 static struct sfc_mae_outer_rule *
299 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
300 const efx_mae_match_spec_t *match_spec,
301 efx_tunnel_protocol_t encap_type)
303 struct sfc_mae_outer_rule *rule;
304 struct sfc_mae *mae = &sa->mae;
306 SFC_ASSERT(sfc_adapter_is_locked(sa));
308 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
309 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
310 rule->encap_type == encap_type) {
311 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
321 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
322 efx_mae_match_spec_t *match_spec,
323 efx_tunnel_protocol_t encap_type,
324 struct sfc_mae_outer_rule **rulep)
326 struct sfc_mae_outer_rule *rule;
327 struct sfc_mae *mae = &sa->mae;
329 SFC_ASSERT(sfc_adapter_is_locked(sa));
331 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
336 rule->match_spec = match_spec;
337 rule->encap_type = encap_type;
339 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
341 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
345 sfc_dbg(sa, "added outer_rule=%p", rule);
351 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
352 struct sfc_mae_outer_rule *rule)
354 struct sfc_mae *mae = &sa->mae;
356 SFC_ASSERT(sfc_adapter_is_locked(sa));
357 SFC_ASSERT(rule->refcnt != 0);
361 if (rule->refcnt != 0)
364 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
365 rule->fw_rsrc.refcnt != 0) {
366 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
367 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
370 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
372 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
375 sfc_dbg(sa, "deleted outer_rule=%p", rule);
379 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
380 struct sfc_mae_outer_rule *rule,
381 efx_mae_match_spec_t *match_spec_action)
383 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
386 SFC_ASSERT(sfc_adapter_is_locked(sa));
388 if (fw_rsrc->refcnt == 0) {
389 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
390 SFC_ASSERT(rule->match_spec != NULL);
392 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
396 sfc_err(sa, "failed to enable outer_rule=%p: %s",
402 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
405 if (fw_rsrc->refcnt == 0) {
406 (void)efx_mae_outer_rule_remove(sa->nic,
408 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
411 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
416 if (fw_rsrc->refcnt == 0) {
417 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
418 rule, fw_rsrc->rule_id.id);
427 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
428 struct sfc_mae_outer_rule *rule)
430 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
433 SFC_ASSERT(sfc_adapter_is_locked(sa));
435 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
436 fw_rsrc->refcnt == 0) {
437 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
438 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
442 if (fw_rsrc->refcnt == 1) {
443 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
445 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
446 rule, fw_rsrc->rule_id.id);
448 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
449 rule, fw_rsrc->rule_id.id, strerror(rc));
451 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
457 static struct sfc_mae_encap_header *
458 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
459 const struct sfc_mae_bounce_eh *bounce_eh)
461 struct sfc_mae_encap_header *encap_header;
462 struct sfc_mae *mae = &sa->mae;
464 SFC_ASSERT(sfc_adapter_is_locked(sa));
466 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
467 if (encap_header->size == bounce_eh->size &&
468 memcmp(encap_header->buf, bounce_eh->buf,
469 bounce_eh->size) == 0) {
470 sfc_dbg(sa, "attaching to encap_header=%p",
472 ++(encap_header->refcnt);
481 sfc_mae_encap_header_add(struct sfc_adapter *sa,
482 const struct sfc_mae_bounce_eh *bounce_eh,
483 struct sfc_mae_encap_header **encap_headerp)
485 struct sfc_mae_encap_header *encap_header;
486 struct sfc_mae *mae = &sa->mae;
488 SFC_ASSERT(sfc_adapter_is_locked(sa));
490 encap_header = rte_zmalloc("sfc_mae_encap_header",
491 sizeof(*encap_header), 0);
492 if (encap_header == NULL)
495 encap_header->size = bounce_eh->size;
497 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
498 encap_header->size, 0);
499 if (encap_header->buf == NULL) {
500 rte_free(encap_header);
504 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
506 encap_header->refcnt = 1;
507 encap_header->type = bounce_eh->type;
508 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
510 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
512 *encap_headerp = encap_header;
514 sfc_dbg(sa, "added encap_header=%p", encap_header);
520 sfc_mae_encap_header_del(struct sfc_adapter *sa,
521 struct sfc_mae_encap_header *encap_header)
523 struct sfc_mae *mae = &sa->mae;
525 if (encap_header == NULL)
528 SFC_ASSERT(sfc_adapter_is_locked(sa));
529 SFC_ASSERT(encap_header->refcnt != 0);
531 --(encap_header->refcnt);
533 if (encap_header->refcnt != 0)
536 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
537 encap_header->fw_rsrc.refcnt != 0) {
538 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
539 encap_header, encap_header->fw_rsrc.eh_id.id,
540 encap_header->fw_rsrc.refcnt);
543 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
544 rte_free(encap_header->buf);
545 rte_free(encap_header);
547 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
551 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
552 struct sfc_mae_encap_header *encap_header,
553 efx_mae_actions_t *action_set_spec)
555 struct sfc_mae_fw_rsrc *fw_rsrc;
558 if (encap_header == NULL)
561 SFC_ASSERT(sfc_adapter_is_locked(sa));
563 fw_rsrc = &encap_header->fw_rsrc;
565 if (fw_rsrc->refcnt == 0) {
566 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
567 SFC_ASSERT(encap_header->buf != NULL);
568 SFC_ASSERT(encap_header->size != 0);
570 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
575 sfc_err(sa, "failed to enable encap_header=%p: %s",
576 encap_header, strerror(rc));
581 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
584 if (fw_rsrc->refcnt == 0) {
585 (void)efx_mae_encap_header_free(sa->nic,
587 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
590 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
595 if (fw_rsrc->refcnt == 0) {
596 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
597 encap_header, fw_rsrc->eh_id.id);
606 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
607 struct sfc_mae_encap_header *encap_header)
609 struct sfc_mae_fw_rsrc *fw_rsrc;
612 if (encap_header == NULL)
615 SFC_ASSERT(sfc_adapter_is_locked(sa));
617 fw_rsrc = &encap_header->fw_rsrc;
619 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
620 fw_rsrc->refcnt == 0) {
621 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
622 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
626 if (fw_rsrc->refcnt == 1) {
627 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
629 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
630 encap_header, fw_rsrc->eh_id.id);
632 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
633 encap_header, fw_rsrc->eh_id.id, strerror(rc));
635 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
642 sfc_mae_counters_enable(struct sfc_adapter *sa,
643 struct sfc_mae_counter_id *counters,
644 unsigned int n_counters,
645 efx_mae_actions_t *action_set_spec)
649 sfc_log_init(sa, "entry");
651 if (n_counters == 0) {
652 sfc_log_init(sa, "no counters - skip");
656 SFC_ASSERT(sfc_adapter_is_locked(sa));
657 SFC_ASSERT(n_counters == 1);
659 rc = sfc_mae_counter_enable(sa, &counters[0]);
661 sfc_err(sa, "failed to enable MAE counter %u: %s",
662 counters[0].mae_id.id, rte_strerror(rc));
663 goto fail_counter_add;
666 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
667 &counters[0].mae_id);
669 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
670 counters[0].mae_id.id, rte_strerror(rc));
671 goto fail_fill_in_id;
677 (void)sfc_mae_counter_disable(sa, &counters[0]);
680 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
685 sfc_mae_counters_disable(struct sfc_adapter *sa,
686 struct sfc_mae_counter_id *counters,
687 unsigned int n_counters)
692 SFC_ASSERT(sfc_adapter_is_locked(sa));
693 SFC_ASSERT(n_counters == 1);
695 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
696 sfc_err(sa, "failed to disable: already disabled");
700 return sfc_mae_counter_disable(sa, &counters[0]);
703 static struct sfc_mae_action_set *
704 sfc_mae_action_set_attach(struct sfc_adapter *sa,
705 const struct sfc_mae_encap_header *encap_header,
706 unsigned int n_count,
707 const efx_mae_actions_t *spec)
709 struct sfc_mae_action_set *action_set;
710 struct sfc_mae *mae = &sa->mae;
712 SFC_ASSERT(sfc_adapter_is_locked(sa));
714 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
716 * Shared counters are not supported, hence action sets with
717 * COUNT are not attachable.
719 if (action_set->encap_header == encap_header &&
721 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
722 sfc_dbg(sa, "attaching to action_set=%p", action_set);
723 ++(action_set->refcnt);
732 sfc_mae_action_set_add(struct sfc_adapter *sa,
733 const struct rte_flow_action actions[],
734 efx_mae_actions_t *spec,
735 struct sfc_mae_encap_header *encap_header,
736 unsigned int n_counters,
737 struct sfc_mae_action_set **action_setp)
739 struct sfc_mae_action_set *action_set;
740 struct sfc_mae *mae = &sa->mae;
743 SFC_ASSERT(sfc_adapter_is_locked(sa));
745 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
746 if (action_set == NULL) {
747 sfc_err(sa, "failed to alloc action set");
751 if (n_counters > 0) {
752 const struct rte_flow_action *action;
754 action_set->counters = rte_malloc("sfc_mae_counter_ids",
755 sizeof(action_set->counters[0]) * n_counters, 0);
756 if (action_set->counters == NULL) {
757 rte_free(action_set);
758 sfc_err(sa, "failed to alloc counters");
762 for (action = actions, i = 0;
763 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
765 const struct rte_flow_action_count *conf;
767 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
772 action_set->counters[i].mae_id.id =
773 EFX_MAE_RSRC_ID_INVALID;
774 action_set->counters[i].rte_id = conf->id;
777 action_set->n_counters = n_counters;
780 action_set->refcnt = 1;
781 action_set->spec = spec;
782 action_set->encap_header = encap_header;
784 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
786 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
788 *action_setp = action_set;
790 sfc_dbg(sa, "added action_set=%p", action_set);
796 sfc_mae_action_set_del(struct sfc_adapter *sa,
797 struct sfc_mae_action_set *action_set)
799 struct sfc_mae *mae = &sa->mae;
801 SFC_ASSERT(sfc_adapter_is_locked(sa));
802 SFC_ASSERT(action_set->refcnt != 0);
804 --(action_set->refcnt);
806 if (action_set->refcnt != 0)
809 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
810 action_set->fw_rsrc.refcnt != 0) {
811 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
812 action_set, action_set->fw_rsrc.aset_id.id,
813 action_set->fw_rsrc.refcnt);
816 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
817 sfc_mae_encap_header_del(sa, action_set->encap_header);
818 if (action_set->n_counters > 0) {
819 SFC_ASSERT(action_set->n_counters == 1);
820 SFC_ASSERT(action_set->counters[0].mae_id.id ==
821 EFX_MAE_RSRC_ID_INVALID);
822 rte_free(action_set->counters);
824 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
825 rte_free(action_set);
827 sfc_dbg(sa, "deleted action_set=%p", action_set);
831 sfc_mae_action_set_enable(struct sfc_adapter *sa,
832 struct sfc_mae_action_set *action_set)
834 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
835 struct sfc_mae_counter_id *counters = action_set->counters;
836 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
839 SFC_ASSERT(sfc_adapter_is_locked(sa));
841 if (fw_rsrc->refcnt == 0) {
842 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
843 SFC_ASSERT(action_set->spec != NULL);
845 rc = sfc_mae_encap_header_enable(sa, encap_header,
850 rc = sfc_mae_counters_enable(sa, counters,
851 action_set->n_counters,
854 sfc_err(sa, "failed to enable %u MAE counters: %s",
855 action_set->n_counters, rte_strerror(rc));
857 sfc_mae_encap_header_disable(sa, encap_header);
861 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
864 sfc_err(sa, "failed to enable action_set=%p: %s",
865 action_set, strerror(rc));
867 (void)sfc_mae_counters_disable(sa, counters,
868 action_set->n_counters);
869 sfc_mae_encap_header_disable(sa, encap_header);
873 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
874 action_set, fw_rsrc->aset_id.id);
883 sfc_mae_action_set_disable(struct sfc_adapter *sa,
884 struct sfc_mae_action_set *action_set)
886 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
889 SFC_ASSERT(sfc_adapter_is_locked(sa));
891 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
892 fw_rsrc->refcnt == 0) {
893 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
894 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
898 if (fw_rsrc->refcnt == 1) {
899 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
901 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
902 action_set, fw_rsrc->aset_id.id);
904 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
905 action_set, fw_rsrc->aset_id.id, strerror(rc));
907 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
909 rc = sfc_mae_counters_disable(sa, action_set->counters,
910 action_set->n_counters);
912 sfc_err(sa, "failed to disable %u MAE counters: %s",
913 action_set->n_counters, rte_strerror(rc));
916 sfc_mae_encap_header_disable(sa, action_set->encap_header);
923 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
924 struct rte_flow *flow)
926 struct sfc_flow_spec *spec;
927 struct sfc_flow_spec_mae *spec_mae;
937 spec_mae = &spec->mae;
939 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
941 if (spec_mae->outer_rule != NULL)
942 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
944 if (spec_mae->action_set != NULL)
945 sfc_mae_action_set_del(sa, spec_mae->action_set);
947 if (spec_mae->match_spec != NULL)
948 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
952 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
954 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
955 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
956 const efx_mae_field_id_t field_ids[] = {
957 EFX_MAE_FIELD_VLAN0_PROTO_BE,
958 EFX_MAE_FIELD_VLAN1_PROTO_BE,
960 const struct sfc_mae_ethertype *et;
965 * In accordance with RTE flow API convention, the innermost L2
966 * item's "type" ("inner_type") is a L3 EtherType. If there is
967 * no L3 item, it's 0x0000/0x0000.
969 et = &pdata->ethertypes[pdata->nb_vlan_tags];
970 rc = efx_mae_match_spec_field_set(ctx->match_spec,
971 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
973 (const uint8_t *)&et->value,
975 (const uint8_t *)&et->mask);
980 * sfc_mae_rule_parse_item_vlan() has already made sure
981 * that pdata->nb_vlan_tags does not exceed this figure.
983 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
985 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
986 et = &pdata->ethertypes[i];
988 rc = efx_mae_match_spec_field_set(ctx->match_spec,
989 fremap[field_ids[i]],
991 (const uint8_t *)&et->value,
993 (const uint8_t *)&et->mask);
1002 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1003 struct rte_flow_error *error)
1005 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1006 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1007 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1008 const rte_be16_t supported_tpids[] = {
1009 /* VLAN standard TPID (always the first element) */
1010 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1012 /* Double-tagging TPIDs */
1013 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1014 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1015 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1016 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1018 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1019 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1020 unsigned int ethertype_idx;
1021 const uint8_t *valuep;
1022 const uint8_t *maskp;
1025 if (pdata->innermost_ethertype_restriction.mask != 0 &&
1026 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1028 * If a single item VLAN is followed by a L3 item, value
1029 * of "type" in item ETH can't be a double-tagging TPID.
1031 nb_supported_tpids = 1;
1035 * sfc_mae_rule_parse_item_vlan() has already made sure
1036 * that pdata->nb_vlan_tags does not exceed this figure.
1038 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1040 for (ethertype_idx = 0;
1041 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1042 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1043 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1044 unsigned int tpid_idx;
1047 * This loop can have only two iterations. On the second one,
1048 * drop outer tag presence enforcement bit because the inner
1049 * tag presence automatically assumes that for the outer tag.
1051 enforce_tag_presence[0] = B_FALSE;
1053 if (tpid_m == RTE_BE16(0)) {
1054 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1055 enforce_tag_presence[ethertype_idx] = B_TRUE;
1057 /* No match on this field, and no value check. */
1058 nb_supported_tpids = 1;
1062 /* Exact match is supported only. */
1063 if (tpid_m != RTE_BE16(0xffff)) {
1064 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1065 rte_be_to_cpu_16(tpid_m));
1070 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1071 tpid_idx < nb_supported_tpids; ++tpid_idx) {
1072 if (tpid_v == supported_tpids[tpid_idx])
1076 if (tpid_idx == nb_supported_tpids) {
1077 sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1078 rte_be_to_cpu_16(tpid_v));
1083 nb_supported_tpids = 1;
1086 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1087 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
1088 rte_be16_t enforced_et;
1090 enforced_et = pdata->innermost_ethertype_restriction.value;
1092 if (et->mask == 0) {
1093 et->mask = RTE_BE16(0xffff);
1094 et->value = enforced_et;
1095 } else if (et->mask != RTE_BE16(0xffff) ||
1096 et->value != enforced_et) {
1097 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1098 rte_be_to_cpu_16(enforced_et),
1099 rte_be_to_cpu_16(et->value),
1100 rte_be_to_cpu_16(et->mask));
1107 * Now, when the number of VLAN tags is known, set fields
1108 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1109 * one is either a valid L3 EtherType (or 0x0000/0x0000),
1110 * and the last two are valid TPIDs (or 0x0000/0x0000).
1112 rc = sfc_mae_set_ethertypes(ctx);
1116 if (pdata->l3_next_proto_restriction_mask == 0xff) {
1117 if (pdata->l3_next_proto_mask == 0) {
1118 pdata->l3_next_proto_mask = 0xff;
1119 pdata->l3_next_proto_value =
1120 pdata->l3_next_proto_restriction_value;
1121 } else if (pdata->l3_next_proto_mask != 0xff ||
1122 pdata->l3_next_proto_value !=
1123 pdata->l3_next_proto_restriction_value) {
1124 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1125 pdata->l3_next_proto_restriction_value,
1126 pdata->l3_next_proto_value,
1127 pdata->l3_next_proto_mask);
1133 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1134 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1135 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1136 enforce_tag_presence[0] ||
1137 pdata->has_ovlan_value);
1142 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1143 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1144 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1145 enforce_tag_presence[1] ||
1146 pdata->has_ivlan_value);
1151 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1152 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1153 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1154 fremap[EFX_MAE_FIELD_IP_PROTO],
1155 sizeof(pdata->l3_next_proto_value),
1157 sizeof(pdata->l3_next_proto_mask),
1165 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1166 "Failed to process pattern data");
1170 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1171 struct sfc_flow_parse_ctx *ctx,
1172 struct rte_flow_error *error)
1174 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1175 const struct rte_flow_item_port_id supp_mask = {
1178 const void *def_mask = &rte_flow_item_port_id_mask;
1179 const struct rte_flow_item_port_id *spec = NULL;
1180 const struct rte_flow_item_port_id *mask = NULL;
1181 efx_mport_sel_t mport_sel;
1184 if (ctx_mae->match_mport_set) {
1185 return rte_flow_error_set(error, ENOTSUP,
1186 RTE_FLOW_ERROR_TYPE_ITEM, item,
1187 "Can't handle multiple traffic source items");
1190 rc = sfc_flow_parse_init(item,
1191 (const void **)&spec, (const void **)&mask,
1192 (const void *)&supp_mask, def_mask,
1193 sizeof(struct rte_flow_item_port_id), error);
1197 if (mask->id != supp_mask.id) {
1198 return rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ITEM, item,
1200 "Bad mask in the PORT_ID pattern item");
1203 /* If "spec" is not set, could be any port ID */
1207 if (spec->id > UINT16_MAX) {
1208 return rte_flow_error_set(error, EOVERFLOW,
1209 RTE_FLOW_ERROR_TYPE_ITEM, item,
1210 "The port ID is too large");
1213 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1214 spec->id, &mport_sel);
1216 return rte_flow_error_set(error, rc,
1217 RTE_FLOW_ERROR_TYPE_ITEM, item,
1218 "Can't find RTE ethdev by the port ID");
1221 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1224 return rte_flow_error_set(error, rc,
1225 RTE_FLOW_ERROR_TYPE_ITEM, item,
1226 "Failed to set MPORT for the port ID");
1229 ctx_mae->match_mport_set = B_TRUE;
1235 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1236 struct sfc_flow_parse_ctx *ctx,
1237 struct rte_flow_error *error)
1239 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1240 const struct rte_flow_item_phy_port supp_mask = {
1241 .index = 0xffffffff,
1243 const void *def_mask = &rte_flow_item_phy_port_mask;
1244 const struct rte_flow_item_phy_port *spec = NULL;
1245 const struct rte_flow_item_phy_port *mask = NULL;
1246 efx_mport_sel_t mport_v;
1249 if (ctx_mae->match_mport_set) {
1250 return rte_flow_error_set(error, ENOTSUP,
1251 RTE_FLOW_ERROR_TYPE_ITEM, item,
1252 "Can't handle multiple traffic source items");
1255 rc = sfc_flow_parse_init(item,
1256 (const void **)&spec, (const void **)&mask,
1257 (const void *)&supp_mask, def_mask,
1258 sizeof(struct rte_flow_item_phy_port), error);
1262 if (mask->index != supp_mask.index) {
1263 return rte_flow_error_set(error, EINVAL,
1264 RTE_FLOW_ERROR_TYPE_ITEM, item,
1265 "Bad mask in the PHY_PORT pattern item");
1268 /* If "spec" is not set, could be any physical port */
1272 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1274 return rte_flow_error_set(error, rc,
1275 RTE_FLOW_ERROR_TYPE_ITEM, item,
1276 "Failed to convert the PHY_PORT index");
1279 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1281 return rte_flow_error_set(error, rc,
1282 RTE_FLOW_ERROR_TYPE_ITEM, item,
1283 "Failed to set MPORT for the PHY_PORT");
1286 ctx_mae->match_mport_set = B_TRUE;
1292 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1293 struct sfc_flow_parse_ctx *ctx,
1294 struct rte_flow_error *error)
1296 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1297 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1298 efx_mport_sel_t mport_v;
1301 if (ctx_mae->match_mport_set) {
1302 return rte_flow_error_set(error, ENOTSUP,
1303 RTE_FLOW_ERROR_TYPE_ITEM, item,
1304 "Can't handle multiple traffic source items");
1307 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1310 return rte_flow_error_set(error, rc,
1311 RTE_FLOW_ERROR_TYPE_ITEM, item,
1312 "Failed to convert the PF ID");
1315 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1317 return rte_flow_error_set(error, rc,
1318 RTE_FLOW_ERROR_TYPE_ITEM, item,
1319 "Failed to set MPORT for the PF");
1322 ctx_mae->match_mport_set = B_TRUE;
1328 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1329 struct sfc_flow_parse_ctx *ctx,
1330 struct rte_flow_error *error)
1332 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1333 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1334 const struct rte_flow_item_vf supp_mask = {
1337 const void *def_mask = &rte_flow_item_vf_mask;
1338 const struct rte_flow_item_vf *spec = NULL;
1339 const struct rte_flow_item_vf *mask = NULL;
1340 efx_mport_sel_t mport_v;
1343 if (ctx_mae->match_mport_set) {
1344 return rte_flow_error_set(error, ENOTSUP,
1345 RTE_FLOW_ERROR_TYPE_ITEM, item,
1346 "Can't handle multiple traffic source items");
1349 rc = sfc_flow_parse_init(item,
1350 (const void **)&spec, (const void **)&mask,
1351 (const void *)&supp_mask, def_mask,
1352 sizeof(struct rte_flow_item_vf), error);
1356 if (mask->id != supp_mask.id) {
1357 return rte_flow_error_set(error, EINVAL,
1358 RTE_FLOW_ERROR_TYPE_ITEM, item,
1359 "Bad mask in the VF pattern item");
1363 * If "spec" is not set, the item requests any VF related to the
1364 * PF of the current DPDK port (but not the PF itself).
1365 * Reject this match criterion as unsupported.
1368 return rte_flow_error_set(error, EINVAL,
1369 RTE_FLOW_ERROR_TYPE_ITEM, item,
1370 "Bad spec in the VF pattern item");
1373 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1375 return rte_flow_error_set(error, rc,
1376 RTE_FLOW_ERROR_TYPE_ITEM, item,
1377 "Failed to convert the PF + VF IDs");
1380 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1382 return rte_flow_error_set(error, rc,
1383 RTE_FLOW_ERROR_TYPE_ITEM, item,
1384 "Failed to set MPORT for the PF + VF");
1387 ctx_mae->match_mport_set = B_TRUE;
1393 * Having this field ID in a field locator means that this
1394 * locator cannot be used to actually set the field at the
1395 * time when the corresponding item gets encountered. Such
1396 * fields get stashed in the parsing context instead. This
1397 * is required to resolve dependencies between the stashed
1398 * fields. See sfc_mae_rule_process_pattern_data().
1400 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1402 struct sfc_mae_field_locator {
1403 efx_mae_field_id_t field_id;
1405 /* Field offset in the corresponding rte_flow_item_ struct */
1410 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1411 unsigned int nb_field_locators, void *mask_ptr,
1416 memset(mask_ptr, 0, mask_size);
1418 for (i = 0; i < nb_field_locators; ++i) {
1419 const struct sfc_mae_field_locator *fl = &field_locators[i];
1421 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1422 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1427 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1428 unsigned int nb_field_locators, const uint8_t *spec,
1429 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1430 struct rte_flow_error *error)
1432 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1436 for (i = 0; i < nb_field_locators; ++i) {
1437 const struct sfc_mae_field_locator *fl = &field_locators[i];
1439 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1442 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1443 fremap[fl->field_id],
1444 fl->size, spec + fl->ofst,
1445 fl->size, mask + fl->ofst);
1451 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1452 NULL, "Failed to process item fields");
1458 static const struct sfc_mae_field_locator flocs_eth[] = {
1461 * This locator is used only for building supported fields mask.
1462 * The field is handled by sfc_mae_rule_process_pattern_data().
1464 SFC_MAE_FIELD_HANDLING_DEFERRED,
1465 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1466 offsetof(struct rte_flow_item_eth, type),
1469 EFX_MAE_FIELD_ETH_DADDR_BE,
1470 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1471 offsetof(struct rte_flow_item_eth, dst),
1474 EFX_MAE_FIELD_ETH_SADDR_BE,
1475 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1476 offsetof(struct rte_flow_item_eth, src),
1481 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1482 struct sfc_flow_parse_ctx *ctx,
1483 struct rte_flow_error *error)
1485 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1486 struct rte_flow_item_eth supp_mask;
1487 const uint8_t *spec = NULL;
1488 const uint8_t *mask = NULL;
1491 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1492 &supp_mask, sizeof(supp_mask));
1493 supp_mask.has_vlan = 1;
1495 rc = sfc_flow_parse_init(item,
1496 (const void **)&spec, (const void **)&mask,
1497 (const void *)&supp_mask,
1498 &rte_flow_item_eth_mask,
1499 sizeof(struct rte_flow_item_eth), error);
1504 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1505 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1506 const struct rte_flow_item_eth *item_spec;
1507 const struct rte_flow_item_eth *item_mask;
1509 item_spec = (const struct rte_flow_item_eth *)spec;
1510 item_mask = (const struct rte_flow_item_eth *)mask;
1513 * Remember various match criteria in the parsing context.
1514 * sfc_mae_rule_process_pattern_data() will consider them
1515 * altogether when the rest of the items have been parsed.
1517 ethertypes[0].value = item_spec->type;
1518 ethertypes[0].mask = item_mask->type;
1519 if (item_mask->has_vlan) {
1520 pdata->has_ovlan_mask = B_TRUE;
1521 if (item_spec->has_vlan)
1522 pdata->has_ovlan_value = B_TRUE;
1526 * The specification is empty. The overall pattern
1527 * validity will be enforced at the end of parsing.
1528 * See sfc_mae_rule_process_pattern_data().
1533 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1537 static const struct sfc_mae_field_locator flocs_vlan[] = {
1540 EFX_MAE_FIELD_VLAN0_TCI_BE,
1541 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1542 offsetof(struct rte_flow_item_vlan, tci),
1546 * This locator is used only for building supported fields mask.
1547 * The field is handled by sfc_mae_rule_process_pattern_data().
1549 SFC_MAE_FIELD_HANDLING_DEFERRED,
1550 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1551 offsetof(struct rte_flow_item_vlan, inner_type),
1556 EFX_MAE_FIELD_VLAN1_TCI_BE,
1557 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1558 offsetof(struct rte_flow_item_vlan, tci),
1562 * This locator is used only for building supported fields mask.
1563 * The field is handled by sfc_mae_rule_process_pattern_data().
1565 SFC_MAE_FIELD_HANDLING_DEFERRED,
1566 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1567 offsetof(struct rte_flow_item_vlan, inner_type),
1572 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1573 struct sfc_flow_parse_ctx *ctx,
1574 struct rte_flow_error *error)
1576 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1577 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1578 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1579 &pdata->has_ovlan_mask,
1580 &pdata->has_ivlan_mask,
1582 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1583 &pdata->has_ovlan_value,
1584 &pdata->has_ivlan_value,
1586 boolean_t *cur_tag_presence_bit_mp;
1587 boolean_t *cur_tag_presence_bit_vp;
1588 const struct sfc_mae_field_locator *flocs;
1589 struct rte_flow_item_vlan supp_mask;
1590 const uint8_t *spec = NULL;
1591 const uint8_t *mask = NULL;
1592 unsigned int nb_flocs;
1595 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1597 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1598 return rte_flow_error_set(error, ENOTSUP,
1599 RTE_FLOW_ERROR_TYPE_ITEM, item,
1600 "Can't match that many VLAN tags");
1603 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1604 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1606 if (*cur_tag_presence_bit_mp == B_TRUE &&
1607 *cur_tag_presence_bit_vp == B_FALSE) {
1608 return rte_flow_error_set(error, EINVAL,
1609 RTE_FLOW_ERROR_TYPE_ITEM, item,
1610 "The previous item enforces no (more) VLAN, "
1611 "so the current item (VLAN) must not exist");
1614 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1615 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1617 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1618 &supp_mask, sizeof(supp_mask));
1620 * This only means that the field is supported by the driver and libefx.
1621 * Support on NIC level will be checked when all items have been parsed.
1623 supp_mask.has_more_vlan = 1;
1625 rc = sfc_flow_parse_init(item,
1626 (const void **)&spec, (const void **)&mask,
1627 (const void *)&supp_mask,
1628 &rte_flow_item_vlan_mask,
1629 sizeof(struct rte_flow_item_vlan), error);
1634 struct sfc_mae_ethertype *et = pdata->ethertypes;
1635 const struct rte_flow_item_vlan *item_spec;
1636 const struct rte_flow_item_vlan *item_mask;
1638 item_spec = (const struct rte_flow_item_vlan *)spec;
1639 item_mask = (const struct rte_flow_item_vlan *)mask;
1642 * Remember various match criteria in the parsing context.
1643 * sfc_mae_rule_process_pattern_data() will consider them
1644 * altogether when the rest of the items have been parsed.
1646 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1647 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1648 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1649 if (item_mask->has_more_vlan) {
1650 if (pdata->nb_vlan_tags ==
1651 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1652 return rte_flow_error_set(error, ENOTSUP,
1653 RTE_FLOW_ERROR_TYPE_ITEM, item,
1654 "Can't use 'has_more_vlan' in "
1655 "the second item VLAN");
1657 pdata->has_ivlan_mask = B_TRUE;
1658 if (item_spec->has_more_vlan)
1659 pdata->has_ivlan_value = B_TRUE;
1662 /* Convert TCI to MAE representation right now. */
1663 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1669 ++(pdata->nb_vlan_tags);
1674 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1676 EFX_MAE_FIELD_SRC_IP4_BE,
1677 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1678 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1681 EFX_MAE_FIELD_DST_IP4_BE,
1682 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1683 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1687 * This locator is used only for building supported fields mask.
1688 * The field is handled by sfc_mae_rule_process_pattern_data().
1690 SFC_MAE_FIELD_HANDLING_DEFERRED,
1691 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1692 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1695 EFX_MAE_FIELD_IP_TOS,
1696 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1697 hdr.type_of_service),
1698 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1701 EFX_MAE_FIELD_IP_TTL,
1702 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1703 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1708 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1709 struct sfc_flow_parse_ctx *ctx,
1710 struct rte_flow_error *error)
1712 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1713 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1714 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1715 struct rte_flow_item_ipv4 supp_mask;
1716 const uint8_t *spec = NULL;
1717 const uint8_t *mask = NULL;
1720 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1721 &supp_mask, sizeof(supp_mask));
1723 rc = sfc_flow_parse_init(item,
1724 (const void **)&spec, (const void **)&mask,
1725 (const void *)&supp_mask,
1726 &rte_flow_item_ipv4_mask,
1727 sizeof(struct rte_flow_item_ipv4), error);
1731 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1732 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1735 const struct rte_flow_item_ipv4 *item_spec;
1736 const struct rte_flow_item_ipv4 *item_mask;
1738 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1739 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1741 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1742 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1747 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1751 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1753 EFX_MAE_FIELD_SRC_IP6_BE,
1754 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1755 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1758 EFX_MAE_FIELD_DST_IP6_BE,
1759 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1760 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1764 * This locator is used only for building supported fields mask.
1765 * The field is handled by sfc_mae_rule_process_pattern_data().
1767 SFC_MAE_FIELD_HANDLING_DEFERRED,
1768 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1769 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1772 EFX_MAE_FIELD_IP_TTL,
1773 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1774 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1779 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1780 struct sfc_flow_parse_ctx *ctx,
1781 struct rte_flow_error *error)
1783 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1784 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1785 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1786 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1787 struct rte_flow_item_ipv6 supp_mask;
1788 const uint8_t *spec = NULL;
1789 const uint8_t *mask = NULL;
1790 rte_be32_t vtc_flow_be;
1796 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1797 &supp_mask, sizeof(supp_mask));
1799 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1800 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1802 rc = sfc_flow_parse_init(item,
1803 (const void **)&spec, (const void **)&mask,
1804 (const void *)&supp_mask,
1805 &rte_flow_item_ipv6_mask,
1806 sizeof(struct rte_flow_item_ipv6), error);
1810 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1811 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1814 const struct rte_flow_item_ipv6 *item_spec;
1815 const struct rte_flow_item_ipv6 *item_mask;
1817 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1818 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1820 pdata->l3_next_proto_value = item_spec->hdr.proto;
1821 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1826 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1831 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1832 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1833 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1835 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1836 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1837 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1839 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1840 fremap[EFX_MAE_FIELD_IP_TOS],
1841 sizeof(tc_value), &tc_value,
1842 sizeof(tc_mask), &tc_mask);
1844 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1845 NULL, "Failed to process item fields");
1851 static const struct sfc_mae_field_locator flocs_tcp[] = {
1853 EFX_MAE_FIELD_L4_SPORT_BE,
1854 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1855 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1858 EFX_MAE_FIELD_L4_DPORT_BE,
1859 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1860 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1863 EFX_MAE_FIELD_TCP_FLAGS_BE,
1865 * The values have been picked intentionally since the
1866 * target MAE field is oversize (16 bit). This mapping
1867 * relies on the fact that the MAE field is big-endian.
1869 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1870 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1871 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1876 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1877 struct sfc_flow_parse_ctx *ctx,
1878 struct rte_flow_error *error)
1880 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1881 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1882 struct rte_flow_item_tcp supp_mask;
1883 const uint8_t *spec = NULL;
1884 const uint8_t *mask = NULL;
1888 * When encountered among outermost items, item TCP is invalid.
1889 * Check which match specification is being constructed now.
1891 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1892 return rte_flow_error_set(error, EINVAL,
1893 RTE_FLOW_ERROR_TYPE_ITEM, item,
1894 "TCP in outer frame is invalid");
1897 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1898 &supp_mask, sizeof(supp_mask));
1900 rc = sfc_flow_parse_init(item,
1901 (const void **)&spec, (const void **)&mask,
1902 (const void *)&supp_mask,
1903 &rte_flow_item_tcp_mask,
1904 sizeof(struct rte_flow_item_tcp), error);
1908 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1909 pdata->l3_next_proto_restriction_mask = 0xff;
1914 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1918 static const struct sfc_mae_field_locator flocs_udp[] = {
1920 EFX_MAE_FIELD_L4_SPORT_BE,
1921 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1922 offsetof(struct rte_flow_item_udp, hdr.src_port),
1925 EFX_MAE_FIELD_L4_DPORT_BE,
1926 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1927 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1932 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1933 struct sfc_flow_parse_ctx *ctx,
1934 struct rte_flow_error *error)
1936 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1937 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1938 struct rte_flow_item_udp supp_mask;
1939 const uint8_t *spec = NULL;
1940 const uint8_t *mask = NULL;
1943 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1944 &supp_mask, sizeof(supp_mask));
1946 rc = sfc_flow_parse_init(item,
1947 (const void **)&spec, (const void **)&mask,
1948 (const void *)&supp_mask,
1949 &rte_flow_item_udp_mask,
1950 sizeof(struct rte_flow_item_udp), error);
1954 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1955 pdata->l3_next_proto_restriction_mask = 0xff;
1960 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1964 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1967 * The size and offset values are relevant
1968 * for Geneve and NVGRE, too.
1970 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1971 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1976 * An auxiliary registry which allows using non-encap. field IDs
1977 * directly when building a match specification of type ACTION.
1979 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1981 static const efx_mae_field_id_t field_ids_no_remap[] = {
1982 #define FIELD_ID_NO_REMAP(_field) \
1983 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1985 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1986 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1987 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1988 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1989 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1990 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1991 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1992 FIELD_ID_NO_REMAP(SRC_IP4_BE),
1993 FIELD_ID_NO_REMAP(DST_IP4_BE),
1994 FIELD_ID_NO_REMAP(IP_PROTO),
1995 FIELD_ID_NO_REMAP(IP_TOS),
1996 FIELD_ID_NO_REMAP(IP_TTL),
1997 FIELD_ID_NO_REMAP(SRC_IP6_BE),
1998 FIELD_ID_NO_REMAP(DST_IP6_BE),
1999 FIELD_ID_NO_REMAP(L4_SPORT_BE),
2000 FIELD_ID_NO_REMAP(L4_DPORT_BE),
2001 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2002 FIELD_ID_NO_REMAP(HAS_OVLAN),
2003 FIELD_ID_NO_REMAP(HAS_IVLAN),
2005 #undef FIELD_ID_NO_REMAP
2009 * An auxiliary registry which allows using "ENC" field IDs
2010 * when building a match specification of type OUTER.
2012 * See sfc_mae_rule_encap_parse_init().
2014 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2015 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2016 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2018 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2019 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2020 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2021 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2022 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2023 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2024 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2025 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2026 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2027 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2028 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2029 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2030 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2031 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2032 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2033 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2034 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2035 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2037 #undef FIELD_ID_REMAP_TO_ENCAP
2041 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2042 struct sfc_flow_parse_ctx *ctx,
2043 struct rte_flow_error *error)
2045 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2046 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2047 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2048 const struct rte_flow_item_vxlan *vxp;
2049 uint8_t supp_mask[sizeof(uint64_t)];
2050 const uint8_t *spec = NULL;
2051 const uint8_t *mask = NULL;
2055 * We're about to start processing inner frame items.
2056 * Process pattern data that has been deferred so far
2057 * and reset pattern data storage.
2059 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2063 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2065 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2066 &supp_mask, sizeof(supp_mask));
2069 * This tunnel item was preliminarily detected by
2070 * sfc_mae_rule_encap_parse_init(). Default mask
2071 * was also picked by that helper. Use it here.
2073 rc = sfc_flow_parse_init(item,
2074 (const void **)&spec, (const void **)&mask,
2075 (const void *)&supp_mask,
2076 ctx_mae->tunnel_def_mask,
2077 ctx_mae->tunnel_def_mask_size, error);
2082 * This item and later ones comprise a
2083 * match specification of type ACTION.
2085 ctx_mae->match_spec = ctx_mae->match_spec_action;
2087 /* This item and later ones use non-encap. EFX MAE field IDs. */
2088 ctx_mae->field_ids_remap = field_ids_no_remap;
2094 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2095 * Copy 24-bit VNI, which is BE, at offset 1 in it.
2096 * The extra byte is 0 both in the mask and in the value.
2098 vxp = (const struct rte_flow_item_vxlan *)spec;
2099 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2101 vxp = (const struct rte_flow_item_vxlan *)mask;
2102 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2104 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2105 EFX_MAE_FIELD_ENC_VNET_ID_BE,
2106 sizeof(vnet_id_v), vnet_id_v,
2107 sizeof(vnet_id_m), vnet_id_m);
2109 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2110 item, "Failed to set VXLAN VNI");
2116 static const struct sfc_flow_item sfc_flow_items[] = {
2118 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2121 * In terms of RTE flow, this item is a META one,
2122 * and its position in the pattern is don't care.
2124 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2125 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2126 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2127 .parse = sfc_mae_rule_parse_item_port_id,
2130 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2133 * In terms of RTE flow, this item is a META one,
2134 * and its position in the pattern is don't care.
2136 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2137 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2138 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2139 .parse = sfc_mae_rule_parse_item_phy_port,
2142 .type = RTE_FLOW_ITEM_TYPE_PF,
2145 * In terms of RTE flow, this item is a META one,
2146 * and its position in the pattern is don't care.
2148 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2149 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2150 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2151 .parse = sfc_mae_rule_parse_item_pf,
2154 .type = RTE_FLOW_ITEM_TYPE_VF,
2157 * In terms of RTE flow, this item is a META one,
2158 * and its position in the pattern is don't care.
2160 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2161 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2162 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2163 .parse = sfc_mae_rule_parse_item_vf,
2166 .type = RTE_FLOW_ITEM_TYPE_ETH,
2168 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2169 .layer = SFC_FLOW_ITEM_L2,
2170 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2171 .parse = sfc_mae_rule_parse_item_eth,
2174 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2176 .prev_layer = SFC_FLOW_ITEM_L2,
2177 .layer = SFC_FLOW_ITEM_L2,
2178 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2179 .parse = sfc_mae_rule_parse_item_vlan,
2182 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2184 .prev_layer = SFC_FLOW_ITEM_L2,
2185 .layer = SFC_FLOW_ITEM_L3,
2186 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2187 .parse = sfc_mae_rule_parse_item_ipv4,
2190 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2192 .prev_layer = SFC_FLOW_ITEM_L2,
2193 .layer = SFC_FLOW_ITEM_L3,
2194 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2195 .parse = sfc_mae_rule_parse_item_ipv6,
2198 .type = RTE_FLOW_ITEM_TYPE_TCP,
2200 .prev_layer = SFC_FLOW_ITEM_L3,
2201 .layer = SFC_FLOW_ITEM_L4,
2202 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2203 .parse = sfc_mae_rule_parse_item_tcp,
2206 .type = RTE_FLOW_ITEM_TYPE_UDP,
2208 .prev_layer = SFC_FLOW_ITEM_L3,
2209 .layer = SFC_FLOW_ITEM_L4,
2210 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2211 .parse = sfc_mae_rule_parse_item_udp,
2214 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2216 .prev_layer = SFC_FLOW_ITEM_L4,
2217 .layer = SFC_FLOW_ITEM_START_LAYER,
2218 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2219 .parse = sfc_mae_rule_parse_item_tunnel,
2222 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2224 .prev_layer = SFC_FLOW_ITEM_L4,
2225 .layer = SFC_FLOW_ITEM_START_LAYER,
2226 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2227 .parse = sfc_mae_rule_parse_item_tunnel,
2230 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2232 .prev_layer = SFC_FLOW_ITEM_L3,
2233 .layer = SFC_FLOW_ITEM_START_LAYER,
2234 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2235 .parse = sfc_mae_rule_parse_item_tunnel,
2240 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2241 struct sfc_mae_parse_ctx *ctx,
2242 struct sfc_mae_outer_rule **rulep,
2243 struct rte_flow_error *error)
2245 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2248 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2253 SFC_ASSERT(ctx->match_spec_outer != NULL);
2255 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2256 return rte_flow_error_set(error, ENOTSUP,
2257 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2258 "Inconsistent pattern (outer)");
2261 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2263 if (*rulep != NULL) {
2264 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2266 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2267 ctx->encap_type, rulep);
2269 return rte_flow_error_set(error, rc,
2270 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2271 "Failed to process the pattern");
2275 /* The spec has now been tracked by the outer rule entry. */
2276 ctx->match_spec_outer = NULL;
2280 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2281 * inner parse (when some outer rule is hit) and action rule lookup.
2282 * If the currently processed flow does not come with an outer rule,
2283 * its action rule must be available only for packets which miss in
2284 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2285 * in the action rule specification; this ensures correct behaviour.
2287 * If, on the other hand, this flow does have an outer rule, its ID
2288 * may be unknown at the moment (not yet allocated), but OR_ID mask
2289 * has to be set to 0xffffffff anyway for correct class comparisons.
2290 * When the outer rule has been allocated, this match field will be
2291 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2293 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2297 sfc_mae_outer_rule_del(sa, *rulep);
2301 return rte_flow_error_set(error, rc,
2302 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2303 "Failed to process the pattern");
2310 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2311 const struct rte_flow_item pattern[],
2312 struct sfc_mae_parse_ctx *ctx,
2313 struct rte_flow_error *error)
2315 struct sfc_mae *mae = &sa->mae;
2318 if (pattern == NULL) {
2319 rte_flow_error_set(error, EINVAL,
2320 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2326 switch (pattern->type) {
2327 case RTE_FLOW_ITEM_TYPE_VXLAN:
2328 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2329 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2330 ctx->tunnel_def_mask_size =
2331 sizeof(rte_flow_item_vxlan_mask);
2333 case RTE_FLOW_ITEM_TYPE_GENEVE:
2334 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2335 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2336 ctx->tunnel_def_mask_size =
2337 sizeof(rte_flow_item_geneve_mask);
2339 case RTE_FLOW_ITEM_TYPE_NVGRE:
2340 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2341 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2342 ctx->tunnel_def_mask_size =
2343 sizeof(rte_flow_item_nvgre_mask);
2345 case RTE_FLOW_ITEM_TYPE_END:
2355 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2358 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2359 return rte_flow_error_set(error, ENOTSUP,
2360 RTE_FLOW_ERROR_TYPE_ITEM,
2361 pattern, "Unsupported tunnel item");
2364 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2365 return rte_flow_error_set(error, ENOTSUP,
2366 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2367 NULL, "Unsupported priority level");
2370 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
2371 &ctx->match_spec_outer);
2373 return rte_flow_error_set(error, rc,
2374 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
2375 "Failed to initialise outer rule match specification");
2378 /* Outermost items comprise a match specification of type OUTER. */
2379 ctx->match_spec = ctx->match_spec_outer;
2381 /* Outermost items use "ENC" EFX MAE field IDs. */
2382 ctx->field_ids_remap = field_ids_remap_to_encap;
2388 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2389 struct sfc_mae_parse_ctx *ctx)
2391 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2394 if (ctx->match_spec_outer != NULL)
2395 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2399 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2400 const struct rte_flow_item pattern[],
2401 struct sfc_flow_spec_mae *spec,
2402 struct rte_flow_error *error)
2404 struct sfc_mae_parse_ctx ctx_mae;
2405 struct sfc_flow_parse_ctx ctx;
2408 memset(&ctx_mae, 0, sizeof(ctx_mae));
2409 ctx_mae.priority = spec->priority;
2412 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2414 &ctx_mae.match_spec_action);
2416 rc = rte_flow_error_set(error, rc,
2417 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2418 "Failed to initialise action rule match specification");
2419 goto fail_init_match_spec_action;
2423 * As a preliminary setting, assume that there is no encapsulation
2424 * in the pattern. That is, pattern items are about to comprise a
2425 * match specification of type ACTION and use non-encap. field IDs.
2427 * sfc_mae_rule_encap_parse_init() below may override this.
2429 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2430 ctx_mae.match_spec = ctx_mae.match_spec_action;
2431 ctx_mae.field_ids_remap = field_ids_no_remap;
2433 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2436 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2438 goto fail_encap_parse_init;
2440 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2441 pattern, &ctx, error);
2443 goto fail_parse_pattern;
2445 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2447 goto fail_process_pattern_data;
2449 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2451 goto fail_process_outer;
2453 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2454 rc = rte_flow_error_set(error, ENOTSUP,
2455 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2456 "Inconsistent pattern");
2457 goto fail_validate_match_spec_action;
2460 spec->match_spec = ctx_mae.match_spec_action;
2464 fail_validate_match_spec_action:
2466 fail_process_pattern_data:
2468 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2470 fail_encap_parse_init:
2471 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2473 fail_init_match_spec_action:
2478 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2479 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2480 * That is, related RTE flow actions need to be tracked as parts of a whole
2481 * so that they can be combined into a single action and submitted to MAE
2482 * representation of a given rule's action set.
2484 * Each RTE flow action provided by an application gets classified as
2485 * one belonging to some bundle type. If an action is not supposed to
2486 * belong to any bundle, or if this action is END, it is described as
2487 * one belonging to a dummy bundle of type EMPTY.
2489 * A currently tracked bundle will be submitted if a repeating
2490 * action or an action of different bundle type follows.
2493 enum sfc_mae_actions_bundle_type {
2494 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2495 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2498 struct sfc_mae_actions_bundle {
2499 enum sfc_mae_actions_bundle_type type;
2501 /* Indicates actions already tracked by the current bundle */
2502 uint64_t actions_mask;
2504 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2505 rte_be16_t vlan_push_tpid;
2506 rte_be16_t vlan_push_tci;
2510 * Combine configuration of RTE flow actions tracked by the bundle into a
2511 * single action and submit the result to MAE action set specification.
2512 * Do nothing in the case of dummy action bundle.
2515 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2516 efx_mae_actions_t *spec)
2520 switch (bundle->type) {
2521 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2523 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2524 rc = efx_mae_action_set_populate_vlan_push(
2525 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2528 SFC_ASSERT(B_FALSE);
2536 * Given the type of the next RTE flow action in the line, decide
2537 * whether a new bundle is about to start, and, if this is the case,
2538 * submit and reset the current bundle.
2541 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2542 struct sfc_mae_actions_bundle *bundle,
2543 efx_mae_actions_t *spec,
2544 struct rte_flow_error *error)
2546 enum sfc_mae_actions_bundle_type bundle_type_new;
2549 switch (action->type) {
2550 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2551 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2552 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2553 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2557 * Self-sufficient actions, including END, are handled in this
2558 * case. No checks for unsupported actions are needed here
2559 * because parsing doesn't occur at this point.
2561 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2565 if (bundle_type_new != bundle->type ||
2566 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2567 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2571 memset(bundle, 0, sizeof(*bundle));
2574 bundle->type = bundle_type_new;
2579 return rte_flow_error_set(error, rc,
2580 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2581 "Failed to request the (group of) action(s)");
2585 sfc_mae_rule_parse_action_of_push_vlan(
2586 const struct rte_flow_action_of_push_vlan *conf,
2587 struct sfc_mae_actions_bundle *bundle)
2589 bundle->vlan_push_tpid = conf->ethertype;
2593 sfc_mae_rule_parse_action_of_set_vlan_vid(
2594 const struct rte_flow_action_of_set_vlan_vid *conf,
2595 struct sfc_mae_actions_bundle *bundle)
2597 bundle->vlan_push_tci |= (conf->vlan_vid &
2598 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2602 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2603 const struct rte_flow_action_of_set_vlan_pcp *conf,
2604 struct sfc_mae_actions_bundle *bundle)
2606 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2607 RTE_LEN2MASK(3, uint8_t)) << 13;
2609 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2612 struct sfc_mae_parsed_item {
2613 const struct rte_flow_item *item;
2614 size_t proto_header_ofst;
2615 size_t proto_header_size;
2619 * For each 16-bit word of the given header, override
2620 * bits enforced by the corresponding 16-bit mask.
2623 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2624 const struct sfc_mae_parsed_item *parsed_items,
2625 unsigned int nb_parsed_items)
2627 unsigned int item_idx;
2629 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2630 const struct sfc_mae_parsed_item *parsed_item;
2631 const struct rte_flow_item *item;
2632 size_t proto_header_size;
2635 parsed_item = &parsed_items[item_idx];
2636 proto_header_size = parsed_item->proto_header_size;
2637 item = parsed_item->item;
2639 for (ofst = 0; ofst < proto_header_size;
2640 ofst += sizeof(rte_be16_t)) {
2641 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2642 const rte_be16_t *w_maskp;
2643 const rte_be16_t *w_specp;
2645 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2646 w_specp = RTE_PTR_ADD(item->spec, ofst);
2649 *wp |= (*w_specp & *w_maskp);
2652 header_buf += proto_header_size;
2656 #define SFC_IPV4_TTL_DEF 0x40
2657 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
2658 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2659 #define SFC_VXLAN_FLAGS_DEF 0x08000000
2662 sfc_mae_rule_parse_action_vxlan_encap(
2663 struct sfc_mae *mae,
2664 const struct rte_flow_action_vxlan_encap *conf,
2665 efx_mae_actions_t *spec,
2666 struct rte_flow_error *error)
2668 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2669 struct rte_flow_item *pattern = conf->definition;
2670 uint8_t *buf = bounce_eh->buf;
2672 /* This array will keep track of non-VOID pattern items. */
2673 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2675 1 /* IPv4 or IPv6 */ +
2678 unsigned int nb_parsed_items = 0;
2680 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2681 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2682 sizeof(struct rte_ipv6_hdr))];
2683 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2684 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2685 struct rte_vxlan_hdr *vxlan = NULL;
2686 struct rte_udp_hdr *udp = NULL;
2687 unsigned int nb_vlan_tags = 0;
2688 size_t next_proto_ofst = 0;
2689 size_t ethertype_ofst = 0;
2693 if (pattern == NULL) {
2694 return rte_flow_error_set(error, EINVAL,
2695 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2696 "The encap. header definition is NULL");
2699 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2700 bounce_eh->size = 0;
2703 * Process pattern items and remember non-VOID ones.
2704 * Defer applying masks until after the complete header
2705 * has been built from the pattern items.
2707 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2709 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2710 struct sfc_mae_parsed_item *parsed_item;
2711 const uint64_t exp_items_extra_vlan[] = {
2712 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2714 size_t proto_header_size;
2715 rte_be16_t *ethertypep;
2716 uint8_t *next_protop;
2719 if (pattern->spec == NULL) {
2720 return rte_flow_error_set(error, EINVAL,
2721 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2722 "NULL item spec in the encap. header");
2725 if (pattern->mask == NULL) {
2726 return rte_flow_error_set(error, EINVAL,
2727 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2728 "NULL item mask in the encap. header");
2731 if (pattern->last != NULL) {
2732 /* This is not a match pattern, so disallow range. */
2733 return rte_flow_error_set(error, EINVAL,
2734 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2735 "Range item in the encap. header");
2738 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2739 /* Handle VOID separately, for clarity. */
2743 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2744 return rte_flow_error_set(error, ENOTSUP,
2745 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2746 "Unexpected item in the encap. header");
2749 parsed_item = &parsed_items[nb_parsed_items];
2750 buf_cur = buf + bounce_eh->size;
2752 switch (pattern->type) {
2753 case RTE_FLOW_ITEM_TYPE_ETH:
2754 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2756 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2759 proto_header_size = sizeof(struct rte_ether_hdr);
2761 ethertype_ofst = eth_ethertype_ofst;
2763 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2764 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2765 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2767 case RTE_FLOW_ITEM_TYPE_VLAN:
2768 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2770 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2773 proto_header_size = sizeof(struct rte_vlan_hdr);
2775 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2776 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2778 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2779 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2783 offsetof(struct rte_vlan_hdr, eth_proto);
2785 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2786 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2787 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2791 case RTE_FLOW_ITEM_TYPE_IPV4:
2792 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2794 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2797 proto_header_size = sizeof(struct rte_ipv4_hdr);
2799 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2800 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2804 offsetof(struct rte_ipv4_hdr, next_proto_id);
2806 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2808 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2810 case RTE_FLOW_ITEM_TYPE_IPV6:
2811 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2813 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2816 proto_header_size = sizeof(struct rte_ipv6_hdr);
2818 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2819 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2821 next_proto_ofst = bounce_eh->size +
2822 offsetof(struct rte_ipv6_hdr, proto);
2824 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2826 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2828 case RTE_FLOW_ITEM_TYPE_UDP:
2829 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2831 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2834 proto_header_size = sizeof(struct rte_udp_hdr);
2836 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2837 *next_protop = IPPROTO_UDP;
2839 udp = (struct rte_udp_hdr *)buf_cur;
2841 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2843 case RTE_FLOW_ITEM_TYPE_VXLAN:
2844 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2846 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2849 proto_header_size = sizeof(struct rte_vxlan_hdr);
2851 vxlan = (struct rte_vxlan_hdr *)buf_cur;
2853 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2854 udp->dgram_len = RTE_BE16(sizeof(*udp) +
2856 udp->dgram_cksum = 0;
2861 return rte_flow_error_set(error, ENOTSUP,
2862 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2863 "Unknown item in the encap. header");
2866 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2867 return rte_flow_error_set(error, E2BIG,
2868 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2869 "The encap. header is too big");
2872 if ((proto_header_size & 1) != 0) {
2873 return rte_flow_error_set(error, EINVAL,
2874 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2875 "Odd layer size in the encap. header");
2878 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2879 bounce_eh->size += proto_header_size;
2881 parsed_item->item = pattern;
2882 parsed_item->proto_header_size = proto_header_size;
2886 if (exp_items != 0) {
2887 /* Parsing item VXLAN would have reset exp_items to 0. */
2888 return rte_flow_error_set(error, ENOTSUP,
2889 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2890 "No item VXLAN in the encap. header");
2893 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2894 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2895 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2896 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2898 /* The HW cannot compute this checksum. */
2899 ipv4->hdr_checksum = 0;
2900 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2902 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2903 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2904 ipv6->payload_len = udp->dgram_len;
2906 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2908 /* Take care of the masks. */
2909 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2911 rc = efx_mae_action_set_populate_encap(spec);
2913 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2914 NULL, "failed to request action ENCAP");
2921 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
2922 const struct rte_flow_action_mark *conf,
2923 efx_mae_actions_t *spec)
2927 if (conf->id > SFC_FT_USER_MARK_MASK) {
2928 sfc_err(sa, "the mark value is too large");
2932 rc = efx_mae_action_set_populate_mark(spec, conf->id);
2934 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
2940 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
2941 const struct rte_flow_action_count *conf
2943 efx_mae_actions_t *spec)
2947 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
2949 "counter queue is not configured for COUNT action");
2951 goto fail_counter_queue_uninit;
2954 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
2956 goto fail_no_service_core;
2959 rc = efx_mae_action_set_populate_count(spec);
2962 "failed to populate counters in MAE action set: %s",
2964 goto fail_populate_count;
2969 fail_populate_count:
2970 fail_no_service_core:
2971 fail_counter_queue_uninit:
2977 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2978 const struct rte_flow_action_phy_port *conf,
2979 efx_mae_actions_t *spec)
2981 efx_mport_sel_t mport;
2985 if (conf->original != 0)
2986 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2988 phy_port = conf->index;
2990 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2992 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
2993 phy_port, strerror(rc));
2997 rc = efx_mae_action_set_populate_deliver(spec, &mport);
2999 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3000 mport.sel, strerror(rc));
3007 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3008 const struct rte_flow_action_vf *vf_conf,
3009 efx_mae_actions_t *spec)
3011 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3012 efx_mport_sel_t mport;
3016 if (vf_conf == NULL)
3017 vf = EFX_PCI_VF_INVALID;
3018 else if (vf_conf->original != 0)
3023 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3025 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3026 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3031 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3033 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3034 mport.sel, strerror(rc));
3041 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3042 const struct rte_flow_action_port_id *conf,
3043 efx_mae_actions_t *spec)
3045 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3046 struct sfc_mae *mae = &sa->mae;
3047 efx_mport_sel_t mport;
3051 if (conf->id > UINT16_MAX)
3054 port_id = (conf->original != 0) ? sas->port_id : conf->id;
3056 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
3059 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
3060 port_id, strerror(rc));
3064 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3066 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3067 mport.sel, strerror(rc));
3073 static const char * const action_names[] = {
3074 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3075 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3076 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3077 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3078 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3079 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3080 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3081 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3082 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3083 [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3084 [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3085 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3086 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3090 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3091 const struct rte_flow_action *action,
3092 const struct sfc_mae_outer_rule *outer_rule,
3093 struct sfc_mae_actions_bundle *bundle,
3094 efx_mae_actions_t *spec,
3095 struct rte_flow_error *error)
3097 const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3098 bool custom_error = B_FALSE;
3101 switch (action->type) {
3102 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3103 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3104 bundle->actions_mask);
3105 if (outer_rule == NULL ||
3106 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3109 rc = efx_mae_action_set_populate_decap(spec);
3111 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3112 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3113 bundle->actions_mask);
3114 rc = efx_mae_action_set_populate_vlan_pop(spec);
3116 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3117 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3118 bundle->actions_mask);
3119 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3121 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3122 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3123 bundle->actions_mask);
3124 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3126 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3127 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3128 bundle->actions_mask);
3129 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3131 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3132 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3133 bundle->actions_mask);
3134 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3137 custom_error = B_TRUE;
3139 case RTE_FLOW_ACTION_TYPE_COUNT:
3140 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3141 bundle->actions_mask);
3142 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3144 case RTE_FLOW_ACTION_TYPE_FLAG:
3145 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3146 bundle->actions_mask);
3147 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3148 rc = efx_mae_action_set_populate_flag(spec);
3150 rc = rte_flow_error_set(error, ENOTSUP,
3151 RTE_FLOW_ERROR_TYPE_ACTION,
3153 "flag delivery has not been negotiated");
3154 custom_error = B_TRUE;
3157 case RTE_FLOW_ACTION_TYPE_MARK:
3158 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3159 bundle->actions_mask);
3160 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0) {
3161 rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3164 rc = rte_flow_error_set(error, ENOTSUP,
3165 RTE_FLOW_ERROR_TYPE_ACTION,
3167 "mark delivery has not been negotiated");
3168 custom_error = B_TRUE;
3171 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3172 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3173 bundle->actions_mask);
3174 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3176 case RTE_FLOW_ACTION_TYPE_PF:
3177 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3178 bundle->actions_mask);
3179 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3181 case RTE_FLOW_ACTION_TYPE_VF:
3182 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3183 bundle->actions_mask);
3184 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3186 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3187 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3188 bundle->actions_mask);
3189 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3191 case RTE_FLOW_ACTION_TYPE_DROP:
3192 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3193 bundle->actions_mask);
3194 rc = efx_mae_action_set_populate_drop(spec);
3197 return rte_flow_error_set(error, ENOTSUP,
3198 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3199 "Unsupported action");
3203 bundle->actions_mask |= (1ULL << action->type);
3204 } else if (!custom_error) {
3205 if (action->type < RTE_DIM(action_names)) {
3206 const char *action_name = action_names[action->type];
3208 if (action_name != NULL) {
3209 sfc_err(sa, "action %s was rejected: %s",
3210 action_name, strerror(rc));
3213 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3214 NULL, "Failed to request the action");
3221 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3223 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3227 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3228 const struct sfc_mae_bounce_eh *bounce_eh,
3229 struct sfc_mae_encap_header **encap_headerp)
3231 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3232 encap_headerp = NULL;
3236 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3237 if (*encap_headerp != NULL)
3240 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3244 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3245 const struct rte_flow_action actions[],
3246 struct sfc_flow_spec_mae *spec_mae,
3247 struct rte_flow_error *error)
3249 struct sfc_mae_encap_header *encap_header = NULL;
3250 struct sfc_mae_actions_bundle bundle = {0};
3251 const struct rte_flow_action *action;
3252 struct sfc_mae *mae = &sa->mae;
3253 efx_mae_actions_t *spec;
3254 unsigned int n_count;
3259 if (actions == NULL) {
3260 return rte_flow_error_set(error, EINVAL,
3261 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3265 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3267 goto fail_action_set_spec_init;
3269 /* Cleanup after previous encap. header bounce buffer usage. */
3270 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3272 for (action = actions;
3273 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3274 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3276 goto fail_rule_parse_action;
3278 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
3279 &bundle, spec, error);
3281 goto fail_rule_parse_action;
3284 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3286 goto fail_rule_parse_action;
3288 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3290 goto fail_process_encap_header;
3292 n_count = efx_mae_action_set_get_nb_count(spec);
3295 sfc_err(sa, "too many count actions requested: %u", n_count);
3299 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3301 if (spec_mae->action_set != NULL) {
3302 sfc_mae_encap_header_del(sa, encap_header);
3303 efx_mae_action_set_spec_fini(sa->nic, spec);
3307 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
3308 &spec_mae->action_set);
3310 goto fail_action_set_add;
3314 fail_action_set_add:
3316 sfc_mae_encap_header_del(sa, encap_header);
3318 fail_process_encap_header:
3319 fail_rule_parse_action:
3320 efx_mae_action_set_spec_fini(sa->nic, spec);
3322 fail_action_set_spec_init:
3323 if (rc > 0 && rte_errno == 0) {
3324 rc = rte_flow_error_set(error, rc,
3325 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3326 NULL, "Failed to process the action");
3332 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3333 const efx_mae_match_spec_t *left,
3334 const efx_mae_match_spec_t *right)
3336 bool have_same_class;
3339 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3342 return (rc == 0) ? have_same_class : false;
3346 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3347 struct sfc_mae_outer_rule *rule)
3349 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3350 struct sfc_mae_outer_rule *entry;
3351 struct sfc_mae *mae = &sa->mae;
3353 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3354 /* An active rule is reused. It's class is wittingly valid. */
3358 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3359 sfc_mae_outer_rules, entries) {
3360 const efx_mae_match_spec_t *left = entry->match_spec;
3361 const efx_mae_match_spec_t *right = rule->match_spec;
3366 if (sfc_mae_rules_class_cmp(sa, left, right))
3370 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3371 "support for outer frame pattern items is not guaranteed; "
3372 "other than that, the items are valid from SW standpoint");
3377 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3378 struct sfc_flow_spec_mae *spec)
3380 const struct rte_flow *entry;
3382 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3383 const struct sfc_flow_spec *entry_spec = &entry->spec;
3384 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3385 const efx_mae_match_spec_t *left = es_mae->match_spec;
3386 const efx_mae_match_spec_t *right = spec->match_spec;
3388 switch (entry_spec->type) {
3389 case SFC_FLOW_SPEC_FILTER:
3390 /* Ignore VNIC-level flows */
3392 case SFC_FLOW_SPEC_MAE:
3393 if (sfc_mae_rules_class_cmp(sa, left, right))
3401 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3402 "support for inner frame pattern items is not guaranteed; "
3403 "other than that, the items are valid from SW standpoint");
3408 * Confirm that a given flow can be accepted by the FW.
3411 * Software adapter context
3413 * Flow to be verified
3415 * Zero on success and non-zero in the case of error.
3416 * A special value of EAGAIN indicates that the adapter is
3417 * not in started state. This state is compulsory because
3418 * it only makes sense to compare the rule class of the flow
3419 * being validated with classes of the active rules.
3420 * Such classes are wittingly supported by the FW.
3423 sfc_mae_flow_verify(struct sfc_adapter *sa,
3424 struct rte_flow *flow)
3426 struct sfc_flow_spec *spec = &flow->spec;
3427 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3428 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3431 SFC_ASSERT(sfc_adapter_is_locked(sa));
3433 if (sa->state != SFC_ETHDEV_STARTED)
3436 if (outer_rule != NULL) {
3437 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3442 return sfc_mae_action_rule_class_verify(sa, spec_mae);
3446 sfc_mae_flow_insert(struct sfc_adapter *sa,
3447 struct rte_flow *flow)
3449 struct sfc_flow_spec *spec = &flow->spec;
3450 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3451 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3452 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3453 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
3456 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3457 SFC_ASSERT(action_set != NULL);
3459 if (outer_rule != NULL) {
3460 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3461 spec_mae->match_spec);
3463 goto fail_outer_rule_enable;
3466 rc = sfc_mae_action_set_enable(sa, action_set);
3468 goto fail_action_set_enable;
3470 if (action_set->n_counters > 0) {
3471 rc = sfc_mae_counter_start(sa);
3473 sfc_err(sa, "failed to start MAE counters support: %s",
3475 goto fail_mae_counter_start;
3479 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3480 NULL, &fw_rsrc->aset_id,
3481 &spec_mae->rule_id);
3483 goto fail_action_rule_insert;
3485 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3486 flow, spec_mae->rule_id.id);
3490 fail_action_rule_insert:
3491 fail_mae_counter_start:
3492 sfc_mae_action_set_disable(sa, action_set);
3494 fail_action_set_enable:
3495 if (outer_rule != NULL)
3496 sfc_mae_outer_rule_disable(sa, outer_rule);
3498 fail_outer_rule_enable:
3503 sfc_mae_flow_remove(struct sfc_adapter *sa,
3504 struct rte_flow *flow)
3506 struct sfc_flow_spec *spec = &flow->spec;
3507 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3508 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3509 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3512 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3513 SFC_ASSERT(action_set != NULL);
3515 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3517 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3518 flow, spec_mae->rule_id.id, strerror(rc));
3520 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3521 flow, spec_mae->rule_id.id);
3522 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3524 sfc_mae_action_set_disable(sa, action_set);
3526 if (outer_rule != NULL)
3527 sfc_mae_outer_rule_disable(sa, outer_rule);
3533 sfc_mae_query_counter(struct sfc_adapter *sa,
3534 struct sfc_flow_spec_mae *spec,
3535 const struct rte_flow_action *action,
3536 struct rte_flow_query_count *data,
3537 struct rte_flow_error *error)
3539 struct sfc_mae_action_set *action_set = spec->action_set;
3540 const struct rte_flow_action_count *conf = action->conf;
3544 if (action_set->n_counters == 0) {
3545 return rte_flow_error_set(error, EINVAL,
3546 RTE_FLOW_ERROR_TYPE_ACTION, action,
3547 "Queried flow rule does not have count actions");
3550 for (i = 0; i < action_set->n_counters; i++) {
3552 * Get the first available counter of the flow rule if
3553 * counter ID is not specified.
3555 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3558 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3559 &action_set->counters[i], data);
3561 return rte_flow_error_set(error, EINVAL,
3562 RTE_FLOW_ERROR_TYPE_ACTION, action,
3563 "Queried flow rule counter action is invalid");
3569 return rte_flow_error_set(error, ENOENT,
3570 RTE_FLOW_ERROR_TYPE_ACTION, action,
3571 "No such flow rule action count ID");
3575 sfc_mae_flow_query(struct rte_eth_dev *dev,
3576 struct rte_flow *flow,
3577 const struct rte_flow_action *action,
3579 struct rte_flow_error *error)
3581 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3582 struct sfc_flow_spec *spec = &flow->spec;
3583 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3585 switch (action->type) {
3586 case RTE_FLOW_ACTION_TYPE_COUNT:
3587 return sfc_mae_query_counter(sa, spec_mae, action,
3590 return rte_flow_error_set(error, ENOTSUP,
3591 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3592 "Query for action of this type is not supported");
3597 sfc_mae_switchdev_init(struct sfc_adapter *sa)
3599 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3600 struct sfc_mae *mae = &sa->mae;
3602 efx_mport_sel_t phy;
3605 sfc_log_init(sa, "entry");
3607 if (!sa->switchdev) {
3608 sfc_log_init(sa, "switchdev is not enabled - skip");
3612 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
3614 sfc_err(sa, "failed to init switchdev - no MAE support");
3618 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
3621 sfc_err(sa, "failed get PF mport");
3625 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
3627 sfc_err(sa, "failed get PHY mport");
3631 rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
3632 SFC_MAE_RULE_PRIO_LOWEST,
3633 &mae->switchdev_rule_pf_to_ext);
3635 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
3639 rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
3640 SFC_MAE_RULE_PRIO_LOWEST,
3641 &mae->switchdev_rule_ext_to_pf);
3643 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
3647 sfc_log_init(sa, "done");
3652 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3658 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
3663 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
3665 struct sfc_mae *mae = &sa->mae;
3670 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3671 sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);